From dcffc7cab5c6a712d6c27af0a16d2a55b4e23a2e Mon Sep 17 00:00:00 2001 From: V3n3RiX Date: Sun, 10 Feb 2019 22:14:29 +0000 Subject: sys-kernel/linux-{image,sources}-redcore-lts : revision bump, drop and disable BFQ-SQ --- .../files/4.14-0001-BFQ-v8r12-20171108.patch | 25199 ------------------- .../files/4.14-0002-BFQ-v8r12-20180404.patch | 4611 ---- .../files/4.14-amd64.config | 9101 +++++++ .../files/4.14-redcore-lts-amd64.config | 9106 ------- .../files/4.19-amd64.config | 9343 +++++++ .../files/4.19-bfq-sq-mq-v9r1-2K190204-rc1.patch | 18511 -------------- .../files/4.19-redcore-lts-amd64.config | 9348 ------- 7 files changed, 18444 insertions(+), 66775 deletions(-) delete mode 100644 sys-kernel/linux-image-redcore-lts/files/4.14-0001-BFQ-v8r12-20171108.patch delete mode 100644 sys-kernel/linux-image-redcore-lts/files/4.14-0002-BFQ-v8r12-20180404.patch create mode 100644 sys-kernel/linux-image-redcore-lts/files/4.14-amd64.config delete mode 100644 sys-kernel/linux-image-redcore-lts/files/4.14-redcore-lts-amd64.config create mode 100644 sys-kernel/linux-image-redcore-lts/files/4.19-amd64.config delete mode 100644 sys-kernel/linux-image-redcore-lts/files/4.19-bfq-sq-mq-v9r1-2K190204-rc1.patch delete mode 100644 sys-kernel/linux-image-redcore-lts/files/4.19-redcore-lts-amd64.config (limited to 'sys-kernel/linux-image-redcore-lts/files') diff --git a/sys-kernel/linux-image-redcore-lts/files/4.14-0001-BFQ-v8r12-20171108.patch b/sys-kernel/linux-image-redcore-lts/files/4.14-0001-BFQ-v8r12-20171108.patch deleted file mode 100644 index db7d064b..00000000 --- a/sys-kernel/linux-image-redcore-lts/files/4.14-0001-BFQ-v8r12-20171108.patch +++ /dev/null @@ -1,25199 +0,0 @@ -From c21f53f17430230dab50df29b8ea1b71f99d09d6 Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Tue, 7 Apr 2015 13:39:12 +0200 -Subject: [PATCH 01/51] Add BFQ-v8r12 - -This commit is the result of the following operations. - -1. The squash of all the commits between "block: cgroups, kconfig, -build bits for BFQ-v7r11-4.5.0" and BFQ-v8r12 in the branch -bfq-mq-v8-v4.11 - -2. The renaming of two files (block/bfq-cgroup.c -> -block/bfq-cgroup-included.c and block/bfq-iosched.c -> -block/bfq-sq-iosched.c) and of one option (CONFIG_BFQ_GROUP_IOSCHED -> -CONFIG_BFQ_SQ_GROUP_IOSCHED), to avoid name clashes. These name -clashes are due to the presence of bfq in mainline from 4.12. - -3. The modification of block/Makefile and block/Kconfig.iosched to -comply with the above renaming. - -Signed-off-by: Mauro Andreolini -Signed-off-by: Arianna Avanzini -Signed-off-by: Linus Walleij -Signed-off-by: Paolo Valente ---- - Makefile | 2 +- - block/Kconfig.iosched | 31 + - block/bfq-cgroup-included.c | 1190 ++++++++++ - block/bfq-ioc.c | 36 + - block/bfq-sched.c | 2002 ++++++++++++++++ - block/bfq-sq-iosched.c | 5379 +++++++++++++++++++++++++++++++++++++++++++ - block/bfq.h | 948 ++++++++ - include/linux/blkdev.h | 2 +- - 9 files changed, 9589 insertions(+), 2 deletions(-) - create mode 100644 block/bfq-cgroup-included.c - create mode 100644 block/bfq-ioc.c - create mode 100644 block/bfq-sched.c - create mode 100644 block/bfq-sq-iosched.c - create mode 100644 block/bfq.h - -diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched -index a4a8914bf7a4..9e3f4c2f7390 100644 ---- a/block/Kconfig.iosched -+++ b/block/Kconfig.iosched -@@ -40,6 +40,26 @@ config CFQ_GROUP_IOSCHED - ---help--- - Enable group IO scheduling in CFQ. - -+config IOSCHED_BFQ_SQ -+ tristate "BFQ-SQ I/O scheduler" -+ default n -+ ---help--- -+ The BFQ-SQ I/O scheduler (for legacy blk: SQ stands for -+ SingleQueue) distributes bandwidth among all processes -+ according to their weights, regardless of the device -+ parameters and with any workload. It also guarantees a low -+ latency to interactive and soft real-time applications. -+ Details in Documentation/block/bfq-iosched.txt -+ -+config BFQ_SQ_GROUP_IOSCHED -+ bool "BFQ-SQ hierarchical scheduling support" -+ depends on IOSCHED_BFQ_SQ && BLK_CGROUP -+ default n -+ ---help--- -+ -+ Enable hierarchical scheduling in BFQ-SQ, using the blkio -+ (cgroups-v1) or io (cgroups-v2) controller. -+ - choice - - prompt "Default I/O scheduler" -@@ -54,6 +74,16 @@ choice - config DEFAULT_CFQ - bool "CFQ" if IOSCHED_CFQ=y - -+ config DEFAULT_BFQ_SQ -+ bool "BFQ-SQ" if IOSCHED_BFQ_SQ=y -+ help -+ Selects BFQ-SQ as the default I/O scheduler which will be -+ used by default for all block devices. -+ The BFQ-SQ I/O scheduler aims at distributing the bandwidth -+ as desired, independently of the disk parameters and with -+ any workload. It also tries to guarantee low latency to -+ interactive and soft real-time applications. -+ - config DEFAULT_NOOP - bool "No-op" - -@@ -63,6 +93,7 @@ config DEFAULT_IOSCHED - string - default "deadline" if DEFAULT_DEADLINE - default "cfq" if DEFAULT_CFQ -+ default "bfq-sq" if DEFAULT_BFQ_SQ - default "noop" if DEFAULT_NOOP - - config MQ_IOSCHED_DEADLINE -diff --git a/block/Makefile b/block/Makefile -index 6a56303b9925..59026b425791 100644 ---- a/block/Makefile -+++ b/block/Makefile -@@ -24,6 +24,7 @@ obj-$(CONFIG_MQ_IOSCHED_DEADLINE) += mq-deadline.o - obj-$(CONFIG_MQ_IOSCHED_KYBER) += kyber-iosched.o - bfq-y := bfq-iosched.o bfq-wf2q.o bfq-cgroup.o - obj-$(CONFIG_IOSCHED_BFQ) += bfq.o -+obj-$(CONFIG_IOSCHED_BFQ_SQ) += bfq-sq-iosched.o - - obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o - obj-$(CONFIG_BLK_CMDLINE_PARSER) += cmdline-parser.o -diff --git a/block/bfq-cgroup-included.c b/block/bfq-cgroup-included.c -new file mode 100644 -index 000000000000..af7c216a3540 ---- /dev/null -+++ b/block/bfq-cgroup-included.c -@@ -0,0 +1,1190 @@ -+/* -+ * BFQ: CGROUPS support. -+ * -+ * Based on ideas and code from CFQ: -+ * Copyright (C) 2003 Jens Axboe -+ * -+ * Copyright (C) 2008 Fabio Checconi -+ * Paolo Valente -+ * -+ * Copyright (C) 2015 Paolo Valente -+ * -+ * Copyright (C) 2016 Paolo Valente -+ * -+ * Licensed under the GPL-2 as detailed in the accompanying COPYING.BFQ -+ * file. -+ */ -+ -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+ -+/* bfqg stats flags */ -+enum bfqg_stats_flags { -+ BFQG_stats_waiting = 0, -+ BFQG_stats_idling, -+ BFQG_stats_empty, -+}; -+ -+#define BFQG_FLAG_FNS(name) \ -+static void bfqg_stats_mark_##name(struct bfqg_stats *stats) \ -+{ \ -+ stats->flags |= (1 << BFQG_stats_##name); \ -+} \ -+static void bfqg_stats_clear_##name(struct bfqg_stats *stats) \ -+{ \ -+ stats->flags &= ~(1 << BFQG_stats_##name); \ -+} \ -+static int bfqg_stats_##name(struct bfqg_stats *stats) \ -+{ \ -+ return (stats->flags & (1 << BFQG_stats_##name)) != 0; \ -+} \ -+ -+BFQG_FLAG_FNS(waiting) -+BFQG_FLAG_FNS(idling) -+BFQG_FLAG_FNS(empty) -+#undef BFQG_FLAG_FNS -+ -+/* This should be called with the queue_lock held. */ -+static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats) -+{ -+ unsigned long long now; -+ -+ if (!bfqg_stats_waiting(stats)) -+ return; -+ -+ now = sched_clock(); -+ if (time_after64(now, stats->start_group_wait_time)) -+ blkg_stat_add(&stats->group_wait_time, -+ now - stats->start_group_wait_time); -+ bfqg_stats_clear_waiting(stats); -+} -+ -+/* This should be called with the queue_lock held. */ -+static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg, -+ struct bfq_group *curr_bfqg) -+{ -+ struct bfqg_stats *stats = &bfqg->stats; -+ -+ if (bfqg_stats_waiting(stats)) -+ return; -+ if (bfqg == curr_bfqg) -+ return; -+ stats->start_group_wait_time = sched_clock(); -+ bfqg_stats_mark_waiting(stats); -+} -+ -+/* This should be called with the queue_lock held. */ -+static void bfqg_stats_end_empty_time(struct bfqg_stats *stats) -+{ -+ unsigned long long now; -+ -+ if (!bfqg_stats_empty(stats)) -+ return; -+ -+ now = sched_clock(); -+ if (time_after64(now, stats->start_empty_time)) -+ blkg_stat_add(&stats->empty_time, -+ now - stats->start_empty_time); -+ bfqg_stats_clear_empty(stats); -+} -+ -+static void bfqg_stats_update_dequeue(struct bfq_group *bfqg) -+{ -+ blkg_stat_add(&bfqg->stats.dequeue, 1); -+} -+ -+static void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) -+{ -+ struct bfqg_stats *stats = &bfqg->stats; -+ -+ if (blkg_rwstat_total(&stats->queued)) -+ return; -+ -+ /* -+ * group is already marked empty. This can happen if bfqq got new -+ * request in parent group and moved to this group while being added -+ * to service tree. Just ignore the event and move on. -+ */ -+ if (bfqg_stats_empty(stats)) -+ return; -+ -+ stats->start_empty_time = sched_clock(); -+ bfqg_stats_mark_empty(stats); -+} -+ -+static void bfqg_stats_update_idle_time(struct bfq_group *bfqg) -+{ -+ struct bfqg_stats *stats = &bfqg->stats; -+ -+ if (bfqg_stats_idling(stats)) { -+ unsigned long long now = sched_clock(); -+ -+ if (time_after64(now, stats->start_idle_time)) -+ blkg_stat_add(&stats->idle_time, -+ now - stats->start_idle_time); -+ bfqg_stats_clear_idling(stats); -+ } -+} -+ -+static void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) -+{ -+ struct bfqg_stats *stats = &bfqg->stats; -+ -+ stats->start_idle_time = sched_clock(); -+ bfqg_stats_mark_idling(stats); -+} -+ -+static void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) -+{ -+ struct bfqg_stats *stats = &bfqg->stats; -+ -+ blkg_stat_add(&stats->avg_queue_size_sum, -+ blkg_rwstat_total(&stats->queued)); -+ blkg_stat_add(&stats->avg_queue_size_samples, 1); -+ bfqg_stats_update_group_wait_time(stats); -+} -+ -+static struct blkcg_policy blkcg_policy_bfq; -+ -+/* -+ * blk-cgroup policy-related handlers -+ * The following functions help in converting between blk-cgroup -+ * internal structures and BFQ-specific structures. -+ */ -+ -+static struct bfq_group *pd_to_bfqg(struct blkg_policy_data *pd) -+{ -+ return pd ? container_of(pd, struct bfq_group, pd) : NULL; -+} -+ -+static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg) -+{ -+ return pd_to_blkg(&bfqg->pd); -+} -+ -+static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg) -+{ -+ struct blkg_policy_data *pd = blkg_to_pd(blkg, &blkcg_policy_bfq); -+ -+ return pd_to_bfqg(pd); -+} -+ -+/* -+ * bfq_group handlers -+ * The following functions help in navigating the bfq_group hierarchy -+ * by allowing to find the parent of a bfq_group or the bfq_group -+ * associated to a bfq_queue. -+ */ -+ -+static struct bfq_group *bfqg_parent(struct bfq_group *bfqg) -+{ -+ struct blkcg_gq *pblkg = bfqg_to_blkg(bfqg)->parent; -+ -+ return pblkg ? blkg_to_bfqg(pblkg) : NULL; -+} -+ -+static struct bfq_group *bfqq_group(struct bfq_queue *bfqq) -+{ -+ struct bfq_entity *group_entity = bfqq->entity.parent; -+ -+ return group_entity ? container_of(group_entity, struct bfq_group, -+ entity) : -+ bfqq->bfqd->root_group; -+} -+ -+/* -+ * The following two functions handle get and put of a bfq_group by -+ * wrapping the related blk-cgroup hooks. -+ */ -+ -+static void bfqg_get(struct bfq_group *bfqg) -+{ -+ return blkg_get(bfqg_to_blkg(bfqg)); -+} -+ -+static void bfqg_put(struct bfq_group *bfqg) -+{ -+ return blkg_put(bfqg_to_blkg(bfqg)); -+} -+ -+static void bfqg_stats_update_io_add(struct bfq_group *bfqg, -+ struct bfq_queue *bfqq, -+ unsigned int op) -+{ -+ blkg_rwstat_add(&bfqg->stats.queued, op, 1); -+ bfqg_stats_end_empty_time(&bfqg->stats); -+ if (!(bfqq == ((struct bfq_data *)bfqg->bfqd)->in_service_queue)) -+ bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq)); -+} -+ -+static void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) -+{ -+ blkg_rwstat_add(&bfqg->stats.queued, op, -1); -+} -+ -+static void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) -+{ -+ blkg_rwstat_add(&bfqg->stats.merged, op, 1); -+} -+ -+static void bfqg_stats_update_completion(struct bfq_group *bfqg, -+ uint64_t start_time, uint64_t io_start_time, -+ unsigned int op) -+{ -+ struct bfqg_stats *stats = &bfqg->stats; -+ unsigned long long now = sched_clock(); -+ -+ if (time_after64(now, io_start_time)) -+ blkg_rwstat_add(&stats->service_time, op, -+ now - io_start_time); -+ if (time_after64(io_start_time, start_time)) -+ blkg_rwstat_add(&stats->wait_time, op, -+ io_start_time - start_time); -+} -+ -+/* @stats = 0 */ -+static void bfqg_stats_reset(struct bfqg_stats *stats) -+{ -+ /* queued stats shouldn't be cleared */ -+ blkg_rwstat_reset(&stats->merged); -+ blkg_rwstat_reset(&stats->service_time); -+ blkg_rwstat_reset(&stats->wait_time); -+ blkg_stat_reset(&stats->time); -+ blkg_stat_reset(&stats->avg_queue_size_sum); -+ blkg_stat_reset(&stats->avg_queue_size_samples); -+ blkg_stat_reset(&stats->dequeue); -+ blkg_stat_reset(&stats->group_wait_time); -+ blkg_stat_reset(&stats->idle_time); -+ blkg_stat_reset(&stats->empty_time); -+} -+ -+/* @to += @from */ -+static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from) -+{ -+ if (!to || !from) -+ return; -+ -+ /* queued stats shouldn't be cleared */ -+ blkg_rwstat_add_aux(&to->merged, &from->merged); -+ blkg_rwstat_add_aux(&to->service_time, &from->service_time); -+ blkg_rwstat_add_aux(&to->wait_time, &from->wait_time); -+ blkg_stat_add_aux(&from->time, &from->time); -+ blkg_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum); -+ blkg_stat_add_aux(&to->avg_queue_size_samples, -+ &from->avg_queue_size_samples); -+ blkg_stat_add_aux(&to->dequeue, &from->dequeue); -+ blkg_stat_add_aux(&to->group_wait_time, &from->group_wait_time); -+ blkg_stat_add_aux(&to->idle_time, &from->idle_time); -+ blkg_stat_add_aux(&to->empty_time, &from->empty_time); -+} -+ -+/* -+ * Transfer @bfqg's stats to its parent's dead_stats so that the ancestors' -+ * recursive stats can still account for the amount used by this bfqg after -+ * it's gone. -+ */ -+static void bfqg_stats_xfer_dead(struct bfq_group *bfqg) -+{ -+ struct bfq_group *parent; -+ -+ if (!bfqg) /* root_group */ -+ return; -+ -+ parent = bfqg_parent(bfqg); -+ -+ lockdep_assert_held(bfqg_to_blkg(bfqg)->q->queue_lock); -+ -+ if (unlikely(!parent)) -+ return; -+ -+ bfqg_stats_add_aux(&parent->stats, &bfqg->stats); -+ bfqg_stats_reset(&bfqg->stats); -+} -+ -+static void bfq_init_entity(struct bfq_entity *entity, -+ struct bfq_group *bfqg) -+{ -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); -+ -+ entity->weight = entity->new_weight; -+ entity->orig_weight = entity->new_weight; -+ if (bfqq) { -+ bfqq->ioprio = bfqq->new_ioprio; -+ bfqq->ioprio_class = bfqq->new_ioprio_class; -+ bfqg_get(bfqg); -+ } -+ entity->parent = bfqg->my_entity; /* NULL for root group */ -+ entity->sched_data = &bfqg->sched_data; -+} -+ -+static void bfqg_stats_exit(struct bfqg_stats *stats) -+{ -+ blkg_rwstat_exit(&stats->merged); -+ blkg_rwstat_exit(&stats->service_time); -+ blkg_rwstat_exit(&stats->wait_time); -+ blkg_rwstat_exit(&stats->queued); -+ blkg_stat_exit(&stats->time); -+ blkg_stat_exit(&stats->avg_queue_size_sum); -+ blkg_stat_exit(&stats->avg_queue_size_samples); -+ blkg_stat_exit(&stats->dequeue); -+ blkg_stat_exit(&stats->group_wait_time); -+ blkg_stat_exit(&stats->idle_time); -+ blkg_stat_exit(&stats->empty_time); -+} -+ -+static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp) -+{ -+ if (blkg_rwstat_init(&stats->merged, gfp) || -+ blkg_rwstat_init(&stats->service_time, gfp) || -+ blkg_rwstat_init(&stats->wait_time, gfp) || -+ blkg_rwstat_init(&stats->queued, gfp) || -+ blkg_stat_init(&stats->time, gfp) || -+ blkg_stat_init(&stats->avg_queue_size_sum, gfp) || -+ blkg_stat_init(&stats->avg_queue_size_samples, gfp) || -+ blkg_stat_init(&stats->dequeue, gfp) || -+ blkg_stat_init(&stats->group_wait_time, gfp) || -+ blkg_stat_init(&stats->idle_time, gfp) || -+ blkg_stat_init(&stats->empty_time, gfp)) { -+ bfqg_stats_exit(stats); -+ return -ENOMEM; -+ } -+ -+ return 0; -+} -+ -+static struct bfq_group_data *cpd_to_bfqgd(struct blkcg_policy_data *cpd) -+{ -+ return cpd ? container_of(cpd, struct bfq_group_data, pd) : NULL; -+} -+ -+static struct bfq_group_data *blkcg_to_bfqgd(struct blkcg *blkcg) -+{ -+ return cpd_to_bfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_bfq)); -+} -+ -+static struct blkcg_policy_data *bfq_cpd_alloc(gfp_t gfp) -+{ -+ struct bfq_group_data *bgd; -+ -+ bgd = kzalloc(sizeof(*bgd), gfp); -+ if (!bgd) -+ return NULL; -+ return &bgd->pd; -+} -+ -+static void bfq_cpd_init(struct blkcg_policy_data *cpd) -+{ -+ struct bfq_group_data *d = cpd_to_bfqgd(cpd); -+ -+ d->weight = cgroup_subsys_on_dfl(io_cgrp_subsys) ? -+ CGROUP_WEIGHT_DFL : BFQ_WEIGHT_LEGACY_DFL; -+} -+ -+static void bfq_cpd_free(struct blkcg_policy_data *cpd) -+{ -+ kfree(cpd_to_bfqgd(cpd)); -+} -+ -+static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node) -+{ -+ struct bfq_group *bfqg; -+ -+ bfqg = kzalloc_node(sizeof(*bfqg), gfp, node); -+ if (!bfqg) -+ return NULL; -+ -+ if (bfqg_stats_init(&bfqg->stats, gfp)) { -+ kfree(bfqg); -+ return NULL; -+ } -+ -+ return &bfqg->pd; -+} -+ -+static void bfq_pd_init(struct blkg_policy_data *pd) -+{ -+ struct blkcg_gq *blkg; -+ struct bfq_group *bfqg; -+ struct bfq_data *bfqd; -+ struct bfq_entity *entity; -+ struct bfq_group_data *d; -+ -+ blkg = pd_to_blkg(pd); -+ BUG_ON(!blkg); -+ bfqg = blkg_to_bfqg(blkg); -+ bfqd = blkg->q->elevator->elevator_data; -+ entity = &bfqg->entity; -+ d = blkcg_to_bfqgd(blkg->blkcg); -+ -+ entity->orig_weight = entity->weight = entity->new_weight = d->weight; -+ entity->my_sched_data = &bfqg->sched_data; -+ bfqg->my_entity = entity; /* -+ * the root_group's will be set to NULL -+ * in bfq_init_queue() -+ */ -+ bfqg->bfqd = bfqd; -+ bfqg->active_entities = 0; -+ bfqg->rq_pos_tree = RB_ROOT; -+} -+ -+static void bfq_pd_free(struct blkg_policy_data *pd) -+{ -+ struct bfq_group *bfqg = pd_to_bfqg(pd); -+ -+ bfqg_stats_exit(&bfqg->stats); -+ return kfree(bfqg); -+} -+ -+static void bfq_pd_reset_stats(struct blkg_policy_data *pd) -+{ -+ struct bfq_group *bfqg = pd_to_bfqg(pd); -+ -+ bfqg_stats_reset(&bfqg->stats); -+} -+ -+static void bfq_group_set_parent(struct bfq_group *bfqg, -+ struct bfq_group *parent) -+{ -+ struct bfq_entity *entity; -+ -+ BUG_ON(!parent); -+ BUG_ON(!bfqg); -+ BUG_ON(bfqg == parent); -+ -+ entity = &bfqg->entity; -+ entity->parent = parent->my_entity; -+ entity->sched_data = &parent->sched_data; -+} -+ -+static struct bfq_group *bfq_lookup_bfqg(struct bfq_data *bfqd, -+ struct blkcg *blkcg) -+{ -+ struct blkcg_gq *blkg; -+ -+ blkg = blkg_lookup(blkcg, bfqd->queue); -+ if (likely(blkg)) -+ return blkg_to_bfqg(blkg); -+ return NULL; -+} -+ -+static struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, -+ struct blkcg *blkcg) -+{ -+ struct bfq_group *bfqg, *parent; -+ struct bfq_entity *entity; -+ -+ assert_spin_locked(bfqd->queue->queue_lock); -+ -+ bfqg = bfq_lookup_bfqg(bfqd, blkcg); -+ -+ if (unlikely(!bfqg)) -+ return NULL; -+ -+ /* -+ * Update chain of bfq_groups as we might be handling a leaf group -+ * which, along with some of its relatives, has not been hooked yet -+ * to the private hierarchy of BFQ. -+ */ -+ entity = &bfqg->entity; -+ for_each_entity(entity) { -+ bfqg = container_of(entity, struct bfq_group, entity); -+ BUG_ON(!bfqg); -+ if (bfqg != bfqd->root_group) { -+ parent = bfqg_parent(bfqg); -+ if (!parent) -+ parent = bfqd->root_group; -+ BUG_ON(!parent); -+ bfq_group_set_parent(bfqg, parent); -+ } -+ } -+ -+ return bfqg; -+} -+ -+static void bfq_pos_tree_add_move(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq); -+ -+static void bfq_bfqq_expire(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq, -+ bool compensate, -+ enum bfqq_expiration reason); -+ -+/** -+ * bfq_bfqq_move - migrate @bfqq to @bfqg. -+ * @bfqd: queue descriptor. -+ * @bfqq: the queue to move. -+ * @bfqg: the group to move to. -+ * -+ * Move @bfqq to @bfqg, deactivating it from its old group and reactivating -+ * it on the new one. Avoid putting the entity on the old group idle tree. -+ * -+ * Must be called under the queue lock; the cgroup owning @bfqg must -+ * not disappear (by now this just means that we are called under -+ * rcu_read_lock()). -+ */ -+static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, -+ struct bfq_group *bfqg) -+{ -+ struct bfq_entity *entity = &bfqq->entity; -+ -+ BUG_ON(!bfq_bfqq_busy(bfqq) && !RB_EMPTY_ROOT(&bfqq->sort_list)); -+ BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list) && !entity->on_st); -+ BUG_ON(bfq_bfqq_busy(bfqq) && RB_EMPTY_ROOT(&bfqq->sort_list) -+ && entity->on_st && -+ bfqq != bfqd->in_service_queue); -+ BUG_ON(!bfq_bfqq_busy(bfqq) && bfqq == bfqd->in_service_queue); -+ -+ /* If bfqq is empty, then bfq_bfqq_expire also invokes -+ * bfq_del_bfqq_busy, thereby removing bfqq and its entity -+ * from data structures related to current group. Otherwise we -+ * need to remove bfqq explicitly with bfq_deactivate_bfqq, as -+ * we do below. -+ */ -+ if (bfqq == bfqd->in_service_queue) -+ bfq_bfqq_expire(bfqd, bfqd->in_service_queue, -+ false, BFQ_BFQQ_PREEMPTED); -+ -+ BUG_ON(entity->on_st && !bfq_bfqq_busy(bfqq) -+ && &bfq_entity_service_tree(entity)->idle != -+ entity->tree); -+ -+ BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_bfqq_busy(bfqq)); -+ -+ if (bfq_bfqq_busy(bfqq)) -+ bfq_deactivate_bfqq(bfqd, bfqq, false, false); -+ else if (entity->on_st) { -+ BUG_ON(&bfq_entity_service_tree(entity)->idle != -+ entity->tree); -+ bfq_put_idle_entity(bfq_entity_service_tree(entity), entity); -+ } -+ bfqg_put(bfqq_group(bfqq)); -+ -+ /* -+ * Here we use a reference to bfqg. We don't need a refcounter -+ * as the cgroup reference will not be dropped, so that its -+ * destroy() callback will not be invoked. -+ */ -+ entity->parent = bfqg->my_entity; -+ entity->sched_data = &bfqg->sched_data; -+ bfqg_get(bfqg); -+ -+ BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_bfqq_busy(bfqq)); -+ if (bfq_bfqq_busy(bfqq)) { -+ bfq_pos_tree_add_move(bfqd, bfqq); -+ bfq_activate_bfqq(bfqd, bfqq); -+ } -+ -+ if (!bfqd->in_service_queue && !bfqd->rq_in_driver) -+ bfq_schedule_dispatch(bfqd); -+ BUG_ON(entity->on_st && !bfq_bfqq_busy(bfqq) -+ && &bfq_entity_service_tree(entity)->idle != -+ entity->tree); -+} -+ -+/** -+ * __bfq_bic_change_cgroup - move @bic to @cgroup. -+ * @bfqd: the queue descriptor. -+ * @bic: the bic to move. -+ * @blkcg: the blk-cgroup to move to. -+ * -+ * Move bic to blkcg, assuming that bfqd->queue is locked; the caller -+ * has to make sure that the reference to cgroup is valid across the call. -+ * -+ * NOTE: an alternative approach might have been to store the current -+ * cgroup in bfqq and getting a reference to it, reducing the lookup -+ * time here, at the price of slightly more complex code. -+ */ -+static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd, -+ struct bfq_io_cq *bic, -+ struct blkcg *blkcg) -+{ -+ struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0); -+ struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1); -+ struct bfq_group *bfqg; -+ struct bfq_entity *entity; -+ -+ lockdep_assert_held(bfqd->queue->queue_lock); -+ -+ bfqg = bfq_find_set_group(bfqd, blkcg); -+ -+ if (unlikely(!bfqg)) -+ bfqg = bfqd->root_group; -+ -+ if (async_bfqq) { -+ entity = &async_bfqq->entity; -+ -+ if (entity->sched_data != &bfqg->sched_data) { -+ bic_set_bfqq(bic, NULL, 0); -+ bfq_log_bfqq(bfqd, async_bfqq, -+ "bic_change_group: %p %d", -+ async_bfqq, -+ async_bfqq->ref); -+ bfq_put_queue(async_bfqq); -+ } -+ } -+ -+ if (sync_bfqq) { -+ entity = &sync_bfqq->entity; -+ if (entity->sched_data != &bfqg->sched_data) -+ bfq_bfqq_move(bfqd, sync_bfqq, bfqg); -+ } -+ -+ return bfqg; -+} -+ -+static void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) -+{ -+ struct bfq_data *bfqd = bic_to_bfqd(bic); -+ struct bfq_group *bfqg = NULL; -+ uint64_t serial_nr; -+ -+ rcu_read_lock(); -+ serial_nr = bio_blkcg(bio)->css.serial_nr; -+ -+ /* -+ * Check whether blkcg has changed. The condition may trigger -+ * spuriously on a newly created cic but there's no harm. -+ */ -+ if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr)) -+ goto out; -+ -+ bfqg = __bfq_bic_change_cgroup(bfqd, bic, bio_blkcg(bio)); -+ bic->blkcg_serial_nr = serial_nr; -+out: -+ rcu_read_unlock(); -+} -+ -+/** -+ * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st. -+ * @st: the service tree being flushed. -+ */ -+static void bfq_flush_idle_tree(struct bfq_service_tree *st) -+{ -+ struct bfq_entity *entity = st->first_idle; -+ -+ for (; entity ; entity = st->first_idle) -+ __bfq_deactivate_entity(entity, false); -+} -+ -+/** -+ * bfq_reparent_leaf_entity - move leaf entity to the root_group. -+ * @bfqd: the device data structure with the root group. -+ * @entity: the entity to move. -+ */ -+static void bfq_reparent_leaf_entity(struct bfq_data *bfqd, -+ struct bfq_entity *entity) -+{ -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); -+ -+ BUG_ON(!bfqq); -+ bfq_bfqq_move(bfqd, bfqq, bfqd->root_group); -+} -+ -+/** -+ * bfq_reparent_active_entities - move to the root group all active -+ * entities. -+ * @bfqd: the device data structure with the root group. -+ * @bfqg: the group to move from. -+ * @st: the service tree with the entities. -+ * -+ * Needs queue_lock to be taken and reference to be valid over the call. -+ */ -+static void bfq_reparent_active_entities(struct bfq_data *bfqd, -+ struct bfq_group *bfqg, -+ struct bfq_service_tree *st) -+{ -+ struct rb_root *active = &st->active; -+ struct bfq_entity *entity = NULL; -+ -+ if (!RB_EMPTY_ROOT(&st->active)) -+ entity = bfq_entity_of(rb_first(active)); -+ -+ for (; entity ; entity = bfq_entity_of(rb_first(active))) -+ bfq_reparent_leaf_entity(bfqd, entity); -+ -+ if (bfqg->sched_data.in_service_entity) -+ bfq_reparent_leaf_entity(bfqd, -+ bfqg->sched_data.in_service_entity); -+} -+ -+/** -+ * bfq_pd_offline - deactivate the entity associated with @pd, -+ * and reparent its children entities. -+ * @pd: descriptor of the policy going offline. -+ * -+ * blkio already grabs the queue_lock for us, so no need to use -+ * RCU-based magic -+ */ -+static void bfq_pd_offline(struct blkg_policy_data *pd) -+{ -+ struct bfq_service_tree *st; -+ struct bfq_group *bfqg; -+ struct bfq_data *bfqd; -+ struct bfq_entity *entity; -+ int i; -+ -+ BUG_ON(!pd); -+ bfqg = pd_to_bfqg(pd); -+ BUG_ON(!bfqg); -+ bfqd = bfqg->bfqd; -+ BUG_ON(bfqd && !bfqd->root_group); -+ -+ entity = bfqg->my_entity; -+ -+ if (!entity) /* root group */ -+ return; -+ -+ /* -+ * Empty all service_trees belonging to this group before -+ * deactivating the group itself. -+ */ -+ for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) { -+ BUG_ON(!bfqg->sched_data.service_tree); -+ st = bfqg->sched_data.service_tree + i; -+ /* -+ * The idle tree may still contain bfq_queues belonging -+ * to exited task because they never migrated to a different -+ * cgroup from the one being destroyed now. No one else -+ * can access them so it's safe to act without any lock. -+ */ -+ bfq_flush_idle_tree(st); -+ -+ /* -+ * It may happen that some queues are still active -+ * (busy) upon group destruction (if the corresponding -+ * processes have been forced to terminate). We move -+ * all the leaf entities corresponding to these queues -+ * to the root_group. -+ * Also, it may happen that the group has an entity -+ * in service, which is disconnected from the active -+ * tree: it must be moved, too. -+ * There is no need to put the sync queues, as the -+ * scheduler has taken no reference. -+ */ -+ bfq_reparent_active_entities(bfqd, bfqg, st); -+ BUG_ON(!RB_EMPTY_ROOT(&st->active)); -+ BUG_ON(!RB_EMPTY_ROOT(&st->idle)); -+ } -+ BUG_ON(bfqg->sched_data.next_in_service); -+ BUG_ON(bfqg->sched_data.in_service_entity); -+ -+ __bfq_deactivate_entity(entity, false); -+ bfq_put_async_queues(bfqd, bfqg); -+ -+ /* -+ * @blkg is going offline and will be ignored by -+ * blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so -+ * that they don't get lost. If IOs complete after this point, the -+ * stats for them will be lost. Oh well... -+ */ -+ bfqg_stats_xfer_dead(bfqg); -+} -+ -+static void bfq_end_wr_async(struct bfq_data *bfqd) -+{ -+ struct blkcg_gq *blkg; -+ -+ list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) { -+ struct bfq_group *bfqg = blkg_to_bfqg(blkg); -+ BUG_ON(!bfqg); -+ -+ bfq_end_wr_async_queues(bfqd, bfqg); -+ } -+ bfq_end_wr_async_queues(bfqd, bfqd->root_group); -+} -+ -+static int bfq_io_show_weight(struct seq_file *sf, void *v) -+{ -+ struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); -+ struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg); -+ unsigned int val = 0; -+ -+ if (bfqgd) -+ val = bfqgd->weight; -+ -+ seq_printf(sf, "%u\n", val); -+ -+ return 0; -+} -+ -+static int bfq_io_set_weight_legacy(struct cgroup_subsys_state *css, -+ struct cftype *cftype, -+ u64 val) -+{ -+ struct blkcg *blkcg = css_to_blkcg(css); -+ struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg); -+ struct blkcg_gq *blkg; -+ int ret = -ERANGE; -+ -+ if (val < BFQ_MIN_WEIGHT || val > BFQ_MAX_WEIGHT) -+ return ret; -+ -+ ret = 0; -+ spin_lock_irq(&blkcg->lock); -+ bfqgd->weight = (unsigned short)val; -+ hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { -+ struct bfq_group *bfqg = blkg_to_bfqg(blkg); -+ -+ if (!bfqg) -+ continue; -+ /* -+ * Setting the prio_changed flag of the entity -+ * to 1 with new_weight == weight would re-set -+ * the value of the weight to its ioprio mapping. -+ * Set the flag only if necessary. -+ */ -+ if ((unsigned short)val != bfqg->entity.new_weight) { -+ bfqg->entity.new_weight = (unsigned short)val; -+ /* -+ * Make sure that the above new value has been -+ * stored in bfqg->entity.new_weight before -+ * setting the prio_changed flag. In fact, -+ * this flag may be read asynchronously (in -+ * critical sections protected by a different -+ * lock than that held here), and finding this -+ * flag set may cause the execution of the code -+ * for updating parameters whose value may -+ * depend also on bfqg->entity.new_weight (in -+ * __bfq_entity_update_weight_prio). -+ * This barrier makes sure that the new value -+ * of bfqg->entity.new_weight is correctly -+ * seen in that code. -+ */ -+ smp_wmb(); -+ bfqg->entity.prio_changed = 1; -+ } -+ } -+ spin_unlock_irq(&blkcg->lock); -+ -+ return ret; -+} -+ -+static ssize_t bfq_io_set_weight(struct kernfs_open_file *of, -+ char *buf, size_t nbytes, -+ loff_t off) -+{ -+ u64 weight; -+ /* First unsigned long found in the file is used */ -+ int ret = kstrtoull(strim(buf), 0, &weight); -+ -+ if (ret) -+ return ret; -+ -+ return bfq_io_set_weight_legacy(of_css(of), NULL, weight); -+} -+ -+static int bfqg_print_stat(struct seq_file *sf, void *v) -+{ -+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat, -+ &blkcg_policy_bfq, seq_cft(sf)->private, false); -+ return 0; -+} -+ -+static int bfqg_print_rwstat(struct seq_file *sf, void *v) -+{ -+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat, -+ &blkcg_policy_bfq, seq_cft(sf)->private, true); -+ return 0; -+} -+ -+static u64 bfqg_prfill_stat_recursive(struct seq_file *sf, -+ struct blkg_policy_data *pd, int off) -+{ -+ u64 sum = blkg_stat_recursive_sum(pd_to_blkg(pd), -+ &blkcg_policy_bfq, off); -+ return __blkg_prfill_u64(sf, pd, sum); -+} -+ -+static u64 bfqg_prfill_rwstat_recursive(struct seq_file *sf, -+ struct blkg_policy_data *pd, int off) -+{ -+ struct blkg_rwstat sum = blkg_rwstat_recursive_sum(pd_to_blkg(pd), -+ &blkcg_policy_bfq, -+ off); -+ return __blkg_prfill_rwstat(sf, pd, &sum); -+} -+ -+static int bfqg_print_stat_recursive(struct seq_file *sf, void *v) -+{ -+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), -+ bfqg_prfill_stat_recursive, &blkcg_policy_bfq, -+ seq_cft(sf)->private, false); -+ return 0; -+} -+ -+static int bfqg_print_rwstat_recursive(struct seq_file *sf, void *v) -+{ -+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), -+ bfqg_prfill_rwstat_recursive, &blkcg_policy_bfq, -+ seq_cft(sf)->private, true); -+ return 0; -+} -+ -+static u64 bfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd, -+ int off) -+{ -+ u64 sum = blkg_rwstat_total(&pd->blkg->stat_bytes); -+ -+ return __blkg_prfill_u64(sf, pd, sum >> 9); -+} -+ -+static int bfqg_print_stat_sectors(struct seq_file *sf, void *v) -+{ -+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), -+ bfqg_prfill_sectors, &blkcg_policy_bfq, 0, false); -+ return 0; -+} -+ -+static u64 bfqg_prfill_sectors_recursive(struct seq_file *sf, -+ struct blkg_policy_data *pd, int off) -+{ -+ struct blkg_rwstat tmp = blkg_rwstat_recursive_sum(pd->blkg, NULL, -+ offsetof(struct blkcg_gq, stat_bytes)); -+ u64 sum = atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) + -+ atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]); -+ -+ return __blkg_prfill_u64(sf, pd, sum >> 9); -+} -+ -+static int bfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v) -+{ -+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), -+ bfqg_prfill_sectors_recursive, &blkcg_policy_bfq, 0, -+ false); -+ return 0; -+} -+ -+ -+static u64 bfqg_prfill_avg_queue_size(struct seq_file *sf, -+ struct blkg_policy_data *pd, int off) -+{ -+ struct bfq_group *bfqg = pd_to_bfqg(pd); -+ u64 samples = blkg_stat_read(&bfqg->stats.avg_queue_size_samples); -+ u64 v = 0; -+ -+ if (samples) { -+ v = blkg_stat_read(&bfqg->stats.avg_queue_size_sum); -+ v = div64_u64(v, samples); -+ } -+ __blkg_prfill_u64(sf, pd, v); -+ return 0; -+} -+ -+/* print avg_queue_size */ -+static int bfqg_print_avg_queue_size(struct seq_file *sf, void *v) -+{ -+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), -+ bfqg_prfill_avg_queue_size, &blkcg_policy_bfq, -+ 0, false); -+ return 0; -+} -+ -+static struct bfq_group * -+bfq_create_group_hierarchy(struct bfq_data *bfqd, int node) -+{ -+ int ret; -+ -+ ret = blkcg_activate_policy(bfqd->queue, &blkcg_policy_bfq); -+ if (ret) -+ return NULL; -+ -+ return blkg_to_bfqg(bfqd->queue->root_blkg); -+} -+ -+static struct cftype bfq_blkcg_legacy_files[] = { -+ { -+ .name = "bfq.weight", -+ .flags = CFTYPE_NOT_ON_ROOT, -+ .seq_show = bfq_io_show_weight, -+ .write_u64 = bfq_io_set_weight_legacy, -+ }, -+ -+ /* statistics, covers only the tasks in the bfqg */ -+ { -+ .name = "bfq.time", -+ .private = offsetof(struct bfq_group, stats.time), -+ .seq_show = bfqg_print_stat, -+ }, -+ { -+ .name = "bfq.sectors", -+ .seq_show = bfqg_print_stat_sectors, -+ }, -+ { -+ .name = "bfq.io_service_bytes", -+ .private = (unsigned long)&blkcg_policy_bfq, -+ .seq_show = blkg_print_stat_bytes, -+ }, -+ { -+ .name = "bfq.io_serviced", -+ .private = (unsigned long)&blkcg_policy_bfq, -+ .seq_show = blkg_print_stat_ios, -+ }, -+ { -+ .name = "bfq.io_service_time", -+ .private = offsetof(struct bfq_group, stats.service_time), -+ .seq_show = bfqg_print_rwstat, -+ }, -+ { -+ .name = "bfq.io_wait_time", -+ .private = offsetof(struct bfq_group, stats.wait_time), -+ .seq_show = bfqg_print_rwstat, -+ }, -+ { -+ .name = "bfq.io_merged", -+ .private = offsetof(struct bfq_group, stats.merged), -+ .seq_show = bfqg_print_rwstat, -+ }, -+ { -+ .name = "bfq.io_queued", -+ .private = offsetof(struct bfq_group, stats.queued), -+ .seq_show = bfqg_print_rwstat, -+ }, -+ -+ /* the same statictics which cover the bfqg and its descendants */ -+ { -+ .name = "bfq.time_recursive", -+ .private = offsetof(struct bfq_group, stats.time), -+ .seq_show = bfqg_print_stat_recursive, -+ }, -+ { -+ .name = "bfq.sectors_recursive", -+ .seq_show = bfqg_print_stat_sectors_recursive, -+ }, -+ { -+ .name = "bfq.io_service_bytes_recursive", -+ .private = (unsigned long)&blkcg_policy_bfq, -+ .seq_show = blkg_print_stat_bytes_recursive, -+ }, -+ { -+ .name = "bfq.io_serviced_recursive", -+ .private = (unsigned long)&blkcg_policy_bfq, -+ .seq_show = blkg_print_stat_ios_recursive, -+ }, -+ { -+ .name = "bfq.io_service_time_recursive", -+ .private = offsetof(struct bfq_group, stats.service_time), -+ .seq_show = bfqg_print_rwstat_recursive, -+ }, -+ { -+ .name = "bfq.io_wait_time_recursive", -+ .private = offsetof(struct bfq_group, stats.wait_time), -+ .seq_show = bfqg_print_rwstat_recursive, -+ }, -+ { -+ .name = "bfq.io_merged_recursive", -+ .private = offsetof(struct bfq_group, stats.merged), -+ .seq_show = bfqg_print_rwstat_recursive, -+ }, -+ { -+ .name = "bfq.io_queued_recursive", -+ .private = offsetof(struct bfq_group, stats.queued), -+ .seq_show = bfqg_print_rwstat_recursive, -+ }, -+ { -+ .name = "bfq.avg_queue_size", -+ .seq_show = bfqg_print_avg_queue_size, -+ }, -+ { -+ .name = "bfq.group_wait_time", -+ .private = offsetof(struct bfq_group, stats.group_wait_time), -+ .seq_show = bfqg_print_stat, -+ }, -+ { -+ .name = "bfq.idle_time", -+ .private = offsetof(struct bfq_group, stats.idle_time), -+ .seq_show = bfqg_print_stat, -+ }, -+ { -+ .name = "bfq.empty_time", -+ .private = offsetof(struct bfq_group, stats.empty_time), -+ .seq_show = bfqg_print_stat, -+ }, -+ { -+ .name = "bfq.dequeue", -+ .private = offsetof(struct bfq_group, stats.dequeue), -+ .seq_show = bfqg_print_stat, -+ }, -+ { } /* terminate */ -+}; -+ -+static struct cftype bfq_blkg_files[] = { -+ { -+ .name = "bfq.weight", -+ .flags = CFTYPE_NOT_ON_ROOT, -+ .seq_show = bfq_io_show_weight, -+ .write = bfq_io_set_weight, -+ }, -+ {} /* terminate */ -+}; -+ -+#else /* CONFIG_BFQ_SQ_GROUP_IOSCHED */ -+ -+static inline void bfqg_stats_update_io_add(struct bfq_group *bfqg, -+ struct bfq_queue *bfqq, unsigned int op) { } -+static inline void -+bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) { } -+static inline void -+bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) { } -+static inline void bfqg_stats_update_completion(struct bfq_group *bfqg, -+ uint64_t start_time, uint64_t io_start_time, -+ unsigned int op) { } -+static inline void -+bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg, -+ struct bfq_group *curr_bfqg) { } -+static inline void bfqg_stats_end_empty_time(struct bfqg_stats *stats) { } -+static inline void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { } -+static inline void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) { } -+static inline void bfqg_stats_update_idle_time(struct bfq_group *bfqg) { } -+static inline void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { } -+static inline void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) { } -+ -+static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, -+ struct bfq_group *bfqg) {} -+ -+static void bfq_init_entity(struct bfq_entity *entity, -+ struct bfq_group *bfqg) -+{ -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); -+ -+ entity->weight = entity->new_weight; -+ entity->orig_weight = entity->new_weight; -+ if (bfqq) { -+ bfqq->ioprio = bfqq->new_ioprio; -+ bfqq->ioprio_class = bfqq->new_ioprio_class; -+ } -+ entity->sched_data = &bfqg->sched_data; -+} -+ -+static void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) {} -+ -+static void bfq_end_wr_async(struct bfq_data *bfqd) -+{ -+ bfq_end_wr_async_queues(bfqd, bfqd->root_group); -+} -+ -+static struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, -+ struct blkcg *blkcg) -+{ -+ return bfqd->root_group; -+} -+ -+static struct bfq_group *bfqq_group(struct bfq_queue *bfqq) -+{ -+ return bfqq->bfqd->root_group; -+} -+ -+static struct bfq_group * -+bfq_create_group_hierarchy(struct bfq_data *bfqd, int node) -+{ -+ struct bfq_group *bfqg; -+ int i; -+ -+ bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node); -+ if (!bfqg) -+ return NULL; -+ -+ for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) -+ bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT; -+ -+ return bfqg; -+} -+#endif -diff --git a/block/bfq-ioc.c b/block/bfq-ioc.c -new file mode 100644 -index 000000000000..fb7bb8f08b75 ---- /dev/null -+++ b/block/bfq-ioc.c -@@ -0,0 +1,36 @@ -+/* -+ * BFQ: I/O context handling. -+ * -+ * Based on ideas and code from CFQ: -+ * Copyright (C) 2003 Jens Axboe -+ * -+ * Copyright (C) 2008 Fabio Checconi -+ * Paolo Valente -+ * -+ * Copyright (C) 2010 Paolo Valente -+ */ -+ -+/** -+ * icq_to_bic - convert iocontext queue structure to bfq_io_cq. -+ * @icq: the iocontext queue. -+ */ -+static struct bfq_io_cq *icq_to_bic(struct io_cq *icq) -+{ -+ /* bic->icq is the first member, %NULL will convert to %NULL */ -+ return container_of(icq, struct bfq_io_cq, icq); -+} -+ -+/** -+ * bfq_bic_lookup - search into @ioc a bic associated to @bfqd. -+ * @bfqd: the lookup key. -+ * @ioc: the io_context of the process doing I/O. -+ * -+ * Queue lock must be held. -+ */ -+static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd, -+ struct io_context *ioc) -+{ -+ if (ioc) -+ return icq_to_bic(ioc_lookup_icq(ioc, bfqd->queue)); -+ return NULL; -+} -diff --git a/block/bfq-sched.c b/block/bfq-sched.c -new file mode 100644 -index 000000000000..ac8991bca9fa ---- /dev/null -+++ b/block/bfq-sched.c -@@ -0,0 +1,2002 @@ -+/* -+ * BFQ: Hierarchical B-WF2Q+ scheduler. -+ * -+ * Based on ideas and code from CFQ: -+ * Copyright (C) 2003 Jens Axboe -+ * -+ * Copyright (C) 2008 Fabio Checconi -+ * Paolo Valente -+ * -+ * Copyright (C) 2015 Paolo Valente -+ * -+ * Copyright (C) 2016 Paolo Valente -+ */ -+ -+static struct bfq_group *bfqq_group(struct bfq_queue *bfqq); -+ -+/** -+ * bfq_gt - compare two timestamps. -+ * @a: first ts. -+ * @b: second ts. -+ * -+ * Return @a > @b, dealing with wrapping correctly. -+ */ -+static int bfq_gt(u64 a, u64 b) -+{ -+ return (s64)(a - b) > 0; -+} -+ -+static struct bfq_entity *bfq_root_active_entity(struct rb_root *tree) -+{ -+ struct rb_node *node = tree->rb_node; -+ -+ return rb_entry(node, struct bfq_entity, rb_node); -+} -+ -+static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd); -+ -+static bool bfq_update_parent_budget(struct bfq_entity *next_in_service); -+ -+/** -+ * bfq_update_next_in_service - update sd->next_in_service -+ * @sd: sched_data for which to perform the update. -+ * @new_entity: if not NULL, pointer to the entity whose activation, -+ * requeueing or repositionig triggered the invocation of -+ * this function. -+ * -+ * This function is called to update sd->next_in_service, which, in -+ * its turn, may change as a consequence of the insertion or -+ * extraction of an entity into/from one of the active trees of -+ * sd. These insertions/extractions occur as a consequence of -+ * activations/deactivations of entities, with some activations being -+ * 'true' activations, and other activations being requeueings (i.e., -+ * implementing the second, requeueing phase of the mechanism used to -+ * reposition an entity in its active tree; see comments on -+ * __bfq_activate_entity and __bfq_requeue_entity for details). In -+ * both the last two activation sub-cases, new_entity points to the -+ * just activated or requeued entity. -+ * -+ * Returns true if sd->next_in_service changes in such a way that -+ * entity->parent may become the next_in_service for its parent -+ * entity. -+ */ -+static bool bfq_update_next_in_service(struct bfq_sched_data *sd, -+ struct bfq_entity *new_entity) -+{ -+ struct bfq_entity *next_in_service = sd->next_in_service; -+ struct bfq_queue *bfqq; -+ bool parent_sched_may_change = false; -+ -+ /* -+ * If this update is triggered by the activation, requeueing -+ * or repositiong of an entity that does not coincide with -+ * sd->next_in_service, then a full lookup in the active tree -+ * can be avoided. In fact, it is enough to check whether the -+ * just-modified entity has a higher priority than -+ * sd->next_in_service, or, even if it has the same priority -+ * as sd->next_in_service, is eligible and has a lower virtual -+ * finish time than sd->next_in_service. If this compound -+ * condition holds, then the new entity becomes the new -+ * next_in_service. Otherwise no change is needed. -+ */ -+ if (new_entity && new_entity != sd->next_in_service) { -+ /* -+ * Flag used to decide whether to replace -+ * sd->next_in_service with new_entity. Tentatively -+ * set to true, and left as true if -+ * sd->next_in_service is NULL. -+ */ -+ bool replace_next = true; -+ -+ /* -+ * If there is already a next_in_service candidate -+ * entity, then compare class priorities or timestamps -+ * to decide whether to replace sd->service_tree with -+ * new_entity. -+ */ -+ if (next_in_service) { -+ unsigned int new_entity_class_idx = -+ bfq_class_idx(new_entity); -+ struct bfq_service_tree *st = -+ sd->service_tree + new_entity_class_idx; -+ -+ /* -+ * For efficiency, evaluate the most likely -+ * sub-condition first. -+ */ -+ replace_next = -+ (new_entity_class_idx == -+ bfq_class_idx(next_in_service) -+ && -+ !bfq_gt(new_entity->start, st->vtime) -+ && -+ bfq_gt(next_in_service->finish, -+ new_entity->finish)) -+ || -+ new_entity_class_idx < -+ bfq_class_idx(next_in_service); -+ } -+ -+ if (replace_next) -+ next_in_service = new_entity; -+ } else /* invoked because of a deactivation: lookup needed */ -+ next_in_service = bfq_lookup_next_entity(sd); -+ -+ if (next_in_service) { -+ parent_sched_may_change = !sd->next_in_service || -+ bfq_update_parent_budget(next_in_service); -+ } -+ -+ sd->next_in_service = next_in_service; -+ -+ if (!next_in_service) -+ return parent_sched_may_change; -+ -+ bfqq = bfq_entity_to_bfqq(next_in_service); -+ if (bfqq) -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "update_next_in_service: chosen this queue"); -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+ else { -+ struct bfq_group *bfqg = -+ container_of(next_in_service, -+ struct bfq_group, entity); -+ -+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg, -+ "update_next_in_service: chosen this entity"); -+ } -+#endif -+ return parent_sched_may_change; -+} -+ -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+/* both next loops stop at one of the child entities of the root group */ -+#define for_each_entity(entity) \ -+ for (; entity ; entity = entity->parent) -+ -+/* -+ * For each iteration, compute parent in advance, so as to be safe if -+ * entity is deallocated during the iteration. Such a deallocation may -+ * happen as a consequence of a bfq_put_queue that frees the bfq_queue -+ * containing entity. -+ */ -+#define for_each_entity_safe(entity, parent) \ -+ for (; entity && ({ parent = entity->parent; 1; }); entity = parent) -+ -+/* -+ * Returns true if this budget changes may let next_in_service->parent -+ * become the next_in_service entity for its parent entity. -+ */ -+static bool bfq_update_parent_budget(struct bfq_entity *next_in_service) -+{ -+ struct bfq_entity *bfqg_entity; -+ struct bfq_group *bfqg; -+ struct bfq_sched_data *group_sd; -+ bool ret = false; -+ -+ BUG_ON(!next_in_service); -+ -+ group_sd = next_in_service->sched_data; -+ -+ bfqg = container_of(group_sd, struct bfq_group, sched_data); -+ /* -+ * bfq_group's my_entity field is not NULL only if the group -+ * is not the root group. We must not touch the root entity -+ * as it must never become an in-service entity. -+ */ -+ bfqg_entity = bfqg->my_entity; -+ if (bfqg_entity) { -+ if (bfqg_entity->budget > next_in_service->budget) -+ ret = true; -+ bfqg_entity->budget = next_in_service->budget; -+ } -+ -+ return ret; -+} -+ -+/* -+ * This function tells whether entity stops being a candidate for next -+ * service, according to the following logic. -+ * -+ * This function is invoked for an entity that is about to be set in -+ * service. If such an entity is a queue, then the entity is no longer -+ * a candidate for next service (i.e, a candidate entity to serve -+ * after the in-service entity is expired). The function then returns -+ * true. -+ * -+ * In contrast, the entity could stil be a candidate for next service -+ * if it is not a queue, and has more than one child. In fact, even if -+ * one of its children is about to be set in service, other children -+ * may still be the next to serve. As a consequence, a non-queue -+ * entity is not a candidate for next-service only if it has only one -+ * child. And only if this condition holds, then the function returns -+ * true for a non-queue entity. -+ */ -+static bool bfq_no_longer_next_in_service(struct bfq_entity *entity) -+{ -+ struct bfq_group *bfqg; -+ -+ if (bfq_entity_to_bfqq(entity)) -+ return true; -+ -+ bfqg = container_of(entity, struct bfq_group, entity); -+ -+ BUG_ON(bfqg == ((struct bfq_data *)(bfqg->bfqd))->root_group); -+ BUG_ON(bfqg->active_entities == 0); -+ if (bfqg->active_entities == 1) -+ return true; -+ -+ return false; -+} -+ -+#else /* CONFIG_BFQ_SQ_GROUP_IOSCHED */ -+#define for_each_entity(entity) \ -+ for (; entity ; entity = NULL) -+ -+#define for_each_entity_safe(entity, parent) \ -+ for (parent = NULL; entity ; entity = parent) -+ -+static bool bfq_update_parent_budget(struct bfq_entity *next_in_service) -+{ -+ return false; -+} -+ -+static bool bfq_no_longer_next_in_service(struct bfq_entity *entity) -+{ -+ return true; -+} -+ -+#endif /* CONFIG_BFQ_SQ_GROUP_IOSCHED */ -+ -+/* -+ * Shift for timestamp calculations. This actually limits the maximum -+ * service allowed in one timestamp delta (small shift values increase it), -+ * the maximum total weight that can be used for the queues in the system -+ * (big shift values increase it), and the period of virtual time -+ * wraparounds. -+ */ -+#define WFQ_SERVICE_SHIFT 22 -+ -+static struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity) -+{ -+ struct bfq_queue *bfqq = NULL; -+ -+ BUG_ON(!entity); -+ -+ if (!entity->my_sched_data) -+ bfqq = container_of(entity, struct bfq_queue, entity); -+ -+ return bfqq; -+} -+ -+ -+/** -+ * bfq_delta - map service into the virtual time domain. -+ * @service: amount of service. -+ * @weight: scale factor (weight of an entity or weight sum). -+ */ -+static u64 bfq_delta(unsigned long service, unsigned long weight) -+{ -+ u64 d = (u64)service << WFQ_SERVICE_SHIFT; -+ -+ do_div(d, weight); -+ return d; -+} -+ -+/** -+ * bfq_calc_finish - assign the finish time to an entity. -+ * @entity: the entity to act upon. -+ * @service: the service to be charged to the entity. -+ */ -+static void bfq_calc_finish(struct bfq_entity *entity, unsigned long service) -+{ -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); -+ unsigned long long start, finish, delta; -+ -+ BUG_ON(entity->weight == 0); -+ -+ entity->finish = entity->start + -+ bfq_delta(service, entity->weight); -+ -+ start = ((entity->start>>10)*1000)>>12; -+ finish = ((entity->finish>>10)*1000)>>12; -+ delta = ((bfq_delta(service, entity->weight)>>10)*1000)>>12; -+ -+ if (bfqq) { -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "calc_finish: serv %lu, w %d", -+ service, entity->weight); -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "calc_finish: start %llu, finish %llu, delta %llu", -+ start, finish, delta); -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+ } else { -+ struct bfq_group *bfqg = -+ container_of(entity, struct bfq_group, entity); -+ -+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg, -+ "calc_finish group: serv %lu, w %d", -+ service, entity->weight); -+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg, -+ "calc_finish group: start %llu, finish %llu, delta %llu", -+ start, finish, delta); -+#endif -+ } -+} -+ -+/** -+ * bfq_entity_of - get an entity from a node. -+ * @node: the node field of the entity. -+ * -+ * Convert a node pointer to the relative entity. This is used only -+ * to simplify the logic of some functions and not as the generic -+ * conversion mechanism because, e.g., in the tree walking functions, -+ * the check for a %NULL value would be redundant. -+ */ -+static struct bfq_entity *bfq_entity_of(struct rb_node *node) -+{ -+ struct bfq_entity *entity = NULL; -+ -+ if (node) -+ entity = rb_entry(node, struct bfq_entity, rb_node); -+ -+ return entity; -+} -+ -+/** -+ * bfq_extract - remove an entity from a tree. -+ * @root: the tree root. -+ * @entity: the entity to remove. -+ */ -+static void bfq_extract(struct rb_root *root, struct bfq_entity *entity) -+{ -+ BUG_ON(entity->tree != root); -+ -+ entity->tree = NULL; -+ rb_erase(&entity->rb_node, root); -+} -+ -+/** -+ * bfq_idle_extract - extract an entity from the idle tree. -+ * @st: the service tree of the owning @entity. -+ * @entity: the entity being removed. -+ */ -+static void bfq_idle_extract(struct bfq_service_tree *st, -+ struct bfq_entity *entity) -+{ -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); -+ struct rb_node *next; -+ -+ BUG_ON(entity->tree != &st->idle); -+ -+ if (entity == st->first_idle) { -+ next = rb_next(&entity->rb_node); -+ st->first_idle = bfq_entity_of(next); -+ } -+ -+ if (entity == st->last_idle) { -+ next = rb_prev(&entity->rb_node); -+ st->last_idle = bfq_entity_of(next); -+ } -+ -+ bfq_extract(&st->idle, entity); -+ -+ if (bfqq) -+ list_del(&bfqq->bfqq_list); -+} -+ -+/** -+ * bfq_insert - generic tree insertion. -+ * @root: tree root. -+ * @entity: entity to insert. -+ * -+ * This is used for the idle and the active tree, since they are both -+ * ordered by finish time. -+ */ -+static void bfq_insert(struct rb_root *root, struct bfq_entity *entity) -+{ -+ struct bfq_entity *entry; -+ struct rb_node **node = &root->rb_node; -+ struct rb_node *parent = NULL; -+ -+ BUG_ON(entity->tree); -+ -+ while (*node) { -+ parent = *node; -+ entry = rb_entry(parent, struct bfq_entity, rb_node); -+ -+ if (bfq_gt(entry->finish, entity->finish)) -+ node = &parent->rb_left; -+ else -+ node = &parent->rb_right; -+ } -+ -+ rb_link_node(&entity->rb_node, parent, node); -+ rb_insert_color(&entity->rb_node, root); -+ -+ entity->tree = root; -+} -+ -+/** -+ * bfq_update_min - update the min_start field of a entity. -+ * @entity: the entity to update. -+ * @node: one of its children. -+ * -+ * This function is called when @entity may store an invalid value for -+ * min_start due to updates to the active tree. The function assumes -+ * that the subtree rooted at @node (which may be its left or its right -+ * child) has a valid min_start value. -+ */ -+static void bfq_update_min(struct bfq_entity *entity, struct rb_node *node) -+{ -+ struct bfq_entity *child; -+ -+ if (node) { -+ child = rb_entry(node, struct bfq_entity, rb_node); -+ if (bfq_gt(entity->min_start, child->min_start)) -+ entity->min_start = child->min_start; -+ } -+} -+ -+/** -+ * bfq_update_active_node - recalculate min_start. -+ * @node: the node to update. -+ * -+ * @node may have changed position or one of its children may have moved, -+ * this function updates its min_start value. The left and right subtrees -+ * are assumed to hold a correct min_start value. -+ */ -+static void bfq_update_active_node(struct rb_node *node) -+{ -+ struct bfq_entity *entity = rb_entry(node, struct bfq_entity, rb_node); -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); -+ -+ entity->min_start = entity->start; -+ bfq_update_min(entity, node->rb_right); -+ bfq_update_min(entity, node->rb_left); -+ -+ if (bfqq) { -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "update_active_node: new min_start %llu", -+ ((entity->min_start>>10)*1000)>>12); -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+ } else { -+ struct bfq_group *bfqg = -+ container_of(entity, struct bfq_group, entity); -+ -+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg, -+ "update_active_node: new min_start %llu", -+ ((entity->min_start>>10)*1000)>>12); -+#endif -+ } -+} -+ -+/** -+ * bfq_update_active_tree - update min_start for the whole active tree. -+ * @node: the starting node. -+ * -+ * @node must be the deepest modified node after an update. This function -+ * updates its min_start using the values held by its children, assuming -+ * that they did not change, and then updates all the nodes that may have -+ * changed in the path to the root. The only nodes that may have changed -+ * are the ones in the path or their siblings. -+ */ -+static void bfq_update_active_tree(struct rb_node *node) -+{ -+ struct rb_node *parent; -+ -+up: -+ bfq_update_active_node(node); -+ -+ parent = rb_parent(node); -+ if (!parent) -+ return; -+ -+ if (node == parent->rb_left && parent->rb_right) -+ bfq_update_active_node(parent->rb_right); -+ else if (parent->rb_left) -+ bfq_update_active_node(parent->rb_left); -+ -+ node = parent; -+ goto up; -+} -+ -+static void bfq_weights_tree_add(struct bfq_data *bfqd, -+ struct bfq_entity *entity, -+ struct rb_root *root); -+ -+static void bfq_weights_tree_remove(struct bfq_data *bfqd, -+ struct bfq_entity *entity, -+ struct rb_root *root); -+ -+ -+/** -+ * bfq_active_insert - insert an entity in the active tree of its -+ * group/device. -+ * @st: the service tree of the entity. -+ * @entity: the entity being inserted. -+ * -+ * The active tree is ordered by finish time, but an extra key is kept -+ * per each node, containing the minimum value for the start times of -+ * its children (and the node itself), so it's possible to search for -+ * the eligible node with the lowest finish time in logarithmic time. -+ */ -+static void bfq_active_insert(struct bfq_service_tree *st, -+ struct bfq_entity *entity) -+{ -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); -+ struct rb_node *node = &entity->rb_node; -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+ struct bfq_sched_data *sd = NULL; -+ struct bfq_group *bfqg = NULL; -+ struct bfq_data *bfqd = NULL; -+#endif -+ -+ bfq_insert(&st->active, entity); -+ -+ if (node->rb_left) -+ node = node->rb_left; -+ else if (node->rb_right) -+ node = node->rb_right; -+ -+ bfq_update_active_tree(node); -+ -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+ sd = entity->sched_data; -+ bfqg = container_of(sd, struct bfq_group, sched_data); -+ BUG_ON(!bfqg); -+ bfqd = (struct bfq_data *)bfqg->bfqd; -+#endif -+ if (bfqq) -+ list_add(&bfqq->bfqq_list, &bfqq->bfqd->active_list); -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+ else { /* bfq_group */ -+ BUG_ON(!bfqd); -+ bfq_weights_tree_add(bfqd, entity, &bfqd->group_weights_tree); -+ } -+ if (bfqg != bfqd->root_group) { -+ BUG_ON(!bfqg); -+ BUG_ON(!bfqd); -+ bfqg->active_entities++; -+ } -+#endif -+} -+ -+/** -+ * bfq_ioprio_to_weight - calc a weight from an ioprio. -+ * @ioprio: the ioprio value to convert. -+ */ -+static unsigned short bfq_ioprio_to_weight(int ioprio) -+{ -+ BUG_ON(ioprio < 0 || ioprio >= IOPRIO_BE_NR); -+ return (IOPRIO_BE_NR - ioprio) * BFQ_WEIGHT_CONVERSION_COEFF; -+} -+ -+/** -+ * bfq_weight_to_ioprio - calc an ioprio from a weight. -+ * @weight: the weight value to convert. -+ * -+ * To preserve as much as possible the old only-ioprio user interface, -+ * 0 is used as an escape ioprio value for weights (numerically) equal or -+ * larger than IOPRIO_BE_NR * BFQ_WEIGHT_CONVERSION_COEFF. -+ */ -+static unsigned short bfq_weight_to_ioprio(int weight) -+{ -+ BUG_ON(weight < BFQ_MIN_WEIGHT || weight > BFQ_MAX_WEIGHT); -+ return IOPRIO_BE_NR * BFQ_WEIGHT_CONVERSION_COEFF - weight < 0 ? -+ 0 : IOPRIO_BE_NR * BFQ_WEIGHT_CONVERSION_COEFF - weight; -+} -+ -+static void bfq_get_entity(struct bfq_entity *entity) -+{ -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); -+ -+ if (bfqq) { -+ bfqq->ref++; -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "get_entity: %p %d", -+ bfqq, bfqq->ref); -+ } -+} -+ -+/** -+ * bfq_find_deepest - find the deepest node that an extraction can modify. -+ * @node: the node being removed. -+ * -+ * Do the first step of an extraction in an rb tree, looking for the -+ * node that will replace @node, and returning the deepest node that -+ * the following modifications to the tree can touch. If @node is the -+ * last node in the tree return %NULL. -+ */ -+static struct rb_node *bfq_find_deepest(struct rb_node *node) -+{ -+ struct rb_node *deepest; -+ -+ if (!node->rb_right && !node->rb_left) -+ deepest = rb_parent(node); -+ else if (!node->rb_right) -+ deepest = node->rb_left; -+ else if (!node->rb_left) -+ deepest = node->rb_right; -+ else { -+ deepest = rb_next(node); -+ if (deepest->rb_right) -+ deepest = deepest->rb_right; -+ else if (rb_parent(deepest) != node) -+ deepest = rb_parent(deepest); -+ } -+ -+ return deepest; -+} -+ -+/** -+ * bfq_active_extract - remove an entity from the active tree. -+ * @st: the service_tree containing the tree. -+ * @entity: the entity being removed. -+ */ -+static void bfq_active_extract(struct bfq_service_tree *st, -+ struct bfq_entity *entity) -+{ -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); -+ struct rb_node *node; -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+ struct bfq_sched_data *sd = NULL; -+ struct bfq_group *bfqg = NULL; -+ struct bfq_data *bfqd = NULL; -+#endif -+ -+ node = bfq_find_deepest(&entity->rb_node); -+ bfq_extract(&st->active, entity); -+ -+ if (node) -+ bfq_update_active_tree(node); -+ -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+ sd = entity->sched_data; -+ bfqg = container_of(sd, struct bfq_group, sched_data); -+ BUG_ON(!bfqg); -+ bfqd = (struct bfq_data *)bfqg->bfqd; -+#endif -+ if (bfqq) -+ list_del(&bfqq->bfqq_list); -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+ else { /* bfq_group */ -+ BUG_ON(!bfqd); -+ bfq_weights_tree_remove(bfqd, entity, -+ &bfqd->group_weights_tree); -+ } -+ if (bfqg != bfqd->root_group) { -+ BUG_ON(!bfqg); -+ BUG_ON(!bfqd); -+ BUG_ON(!bfqg->active_entities); -+ bfqg->active_entities--; -+ } -+#endif -+} -+ -+/** -+ * bfq_idle_insert - insert an entity into the idle tree. -+ * @st: the service tree containing the tree. -+ * @entity: the entity to insert. -+ */ -+static void bfq_idle_insert(struct bfq_service_tree *st, -+ struct bfq_entity *entity) -+{ -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); -+ struct bfq_entity *first_idle = st->first_idle; -+ struct bfq_entity *last_idle = st->last_idle; -+ -+ if (!first_idle || bfq_gt(first_idle->finish, entity->finish)) -+ st->first_idle = entity; -+ if (!last_idle || bfq_gt(entity->finish, last_idle->finish)) -+ st->last_idle = entity; -+ -+ bfq_insert(&st->idle, entity); -+ -+ if (bfqq) -+ list_add(&bfqq->bfqq_list, &bfqq->bfqd->idle_list); -+} -+ -+/** -+ * bfq_forget_entity - do not consider entity any longer for scheduling -+ * @st: the service tree. -+ * @entity: the entity being removed. -+ * @is_in_service: true if entity is currently the in-service entity. -+ * -+ * Forget everything about @entity. In addition, if entity represents -+ * a queue, and the latter is not in service, then release the service -+ * reference to the queue (the one taken through bfq_get_entity). In -+ * fact, in this case, there is really no more service reference to -+ * the queue, as the latter is also outside any service tree. If, -+ * instead, the queue is in service, then __bfq_bfqd_reset_in_service -+ * will take care of putting the reference when the queue finally -+ * stops being served. -+ */ -+static void bfq_forget_entity(struct bfq_service_tree *st, -+ struct bfq_entity *entity, -+ bool is_in_service) -+{ -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); -+ BUG_ON(!entity->on_st); -+ -+ entity->on_st = false; -+ st->wsum -= entity->weight; -+ if (bfqq && !is_in_service) { -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "forget_entity (before): %p %d", -+ bfqq, bfqq->ref); -+ bfq_put_queue(bfqq); -+ } -+} -+ -+/** -+ * bfq_put_idle_entity - release the idle tree ref of an entity. -+ * @st: service tree for the entity. -+ * @entity: the entity being released. -+ */ -+static void bfq_put_idle_entity(struct bfq_service_tree *st, -+ struct bfq_entity *entity) -+{ -+ bfq_idle_extract(st, entity); -+ bfq_forget_entity(st, entity, -+ entity == entity->sched_data->in_service_entity); -+} -+ -+/** -+ * bfq_forget_idle - update the idle tree if necessary. -+ * @st: the service tree to act upon. -+ * -+ * To preserve the global O(log N) complexity we only remove one entry here; -+ * as the idle tree will not grow indefinitely this can be done safely. -+ */ -+static void bfq_forget_idle(struct bfq_service_tree *st) -+{ -+ struct bfq_entity *first_idle = st->first_idle; -+ struct bfq_entity *last_idle = st->last_idle; -+ -+ if (RB_EMPTY_ROOT(&st->active) && last_idle && -+ !bfq_gt(last_idle->finish, st->vtime)) { -+ /* -+ * Forget the whole idle tree, increasing the vtime past -+ * the last finish time of idle entities. -+ */ -+ st->vtime = last_idle->finish; -+ } -+ -+ if (first_idle && !bfq_gt(first_idle->finish, st->vtime)) -+ bfq_put_idle_entity(st, first_idle); -+} -+ -+/* -+ * Update weight and priority of entity. If update_class_too is true, -+ * then update the ioprio_class of entity too. -+ * -+ * The reason why the update of ioprio_class is controlled through the -+ * last parameter is as follows. Changing the ioprio class of an -+ * entity implies changing the destination service trees for that -+ * entity. If such a change occurred when the entity is already on one -+ * of the service trees for its previous class, then the state of the -+ * entity would become more complex: none of the new possible service -+ * trees for the entity, according to bfq_entity_service_tree(), would -+ * match any of the possible service trees on which the entity -+ * is. Complex operations involving these trees, such as entity -+ * activations and deactivations, should take into account this -+ * additional complexity. To avoid this issue, this function is -+ * invoked with update_class_too unset in the points in the code where -+ * entity may happen to be on some tree. -+ */ -+static struct bfq_service_tree * -+__bfq_entity_update_weight_prio(struct bfq_service_tree *old_st, -+ struct bfq_entity *entity, -+ bool update_class_too) -+{ -+ struct bfq_service_tree *new_st = old_st; -+ -+ if (entity->prio_changed) { -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); -+ unsigned int prev_weight, new_weight; -+ struct bfq_data *bfqd = NULL; -+ struct rb_root *root; -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+ struct bfq_sched_data *sd; -+ struct bfq_group *bfqg; -+#endif -+ -+ if (bfqq) -+ bfqd = bfqq->bfqd; -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+ else { -+ sd = entity->my_sched_data; -+ bfqg = container_of(sd, struct bfq_group, sched_data); -+ BUG_ON(!bfqg); -+ bfqd = (struct bfq_data *)bfqg->bfqd; -+ BUG_ON(!bfqd); -+ } -+#endif -+ -+ BUG_ON(old_st->wsum < entity->weight); -+ old_st->wsum -= entity->weight; -+ -+ if (entity->new_weight != entity->orig_weight) { -+ if (entity->new_weight < BFQ_MIN_WEIGHT || -+ entity->new_weight > BFQ_MAX_WEIGHT) { -+ pr_crit("update_weight_prio: new_weight %d\n", -+ entity->new_weight); -+ if (entity->new_weight < BFQ_MIN_WEIGHT) -+ entity->new_weight = BFQ_MIN_WEIGHT; -+ else -+ entity->new_weight = BFQ_MAX_WEIGHT; -+ } -+ entity->orig_weight = entity->new_weight; -+ if (bfqq) -+ bfqq->ioprio = -+ bfq_weight_to_ioprio(entity->orig_weight); -+ } -+ -+ if (bfqq && update_class_too) -+ bfqq->ioprio_class = bfqq->new_ioprio_class; -+ -+ /* -+ * Reset prio_changed only if the ioprio_class change -+ * is not pending any longer. -+ */ -+ if (!bfqq || bfqq->ioprio_class == bfqq->new_ioprio_class) -+ entity->prio_changed = 0; -+ -+ /* -+ * NOTE: here we may be changing the weight too early, -+ * this will cause unfairness. The correct approach -+ * would have required additional complexity to defer -+ * weight changes to the proper time instants (i.e., -+ * when entity->finish <= old_st->vtime). -+ */ -+ new_st = bfq_entity_service_tree(entity); -+ -+ prev_weight = entity->weight; -+ new_weight = entity->orig_weight * -+ (bfqq ? bfqq->wr_coeff : 1); -+ /* -+ * If the weight of the entity changes, remove the entity -+ * from its old weight counter (if there is a counter -+ * associated with the entity), and add it to the counter -+ * associated with its new weight. -+ */ -+ if (prev_weight != new_weight) { -+ if (bfqq) -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "weight changed %d %d(%d %d)", -+ prev_weight, new_weight, -+ entity->orig_weight, -+ bfqq->wr_coeff); -+ -+ root = bfqq ? &bfqd->queue_weights_tree : -+ &bfqd->group_weights_tree; -+ bfq_weights_tree_remove(bfqd, entity, root); -+ } -+ entity->weight = new_weight; -+ /* -+ * Add the entity to its weights tree only if it is -+ * not associated with a weight-raised queue. -+ */ -+ if (prev_weight != new_weight && -+ (bfqq ? bfqq->wr_coeff == 1 : 1)) -+ /* If we get here, root has been initialized. */ -+ bfq_weights_tree_add(bfqd, entity, root); -+ -+ new_st->wsum += entity->weight; -+ -+ if (new_st != old_st) -+ entity->start = new_st->vtime; -+ } -+ -+ return new_st; -+} -+ -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+static void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg); -+#endif -+ -+/** -+ * bfq_bfqq_served - update the scheduler status after selection for -+ * service. -+ * @bfqq: the queue being served. -+ * @served: bytes to transfer. -+ * -+ * NOTE: this can be optimized, as the timestamps of upper level entities -+ * are synchronized every time a new bfqq is selected for service. By now, -+ * we keep it to better check consistency. -+ */ -+static void bfq_bfqq_served(struct bfq_queue *bfqq, int served) -+{ -+ struct bfq_entity *entity = &bfqq->entity; -+ struct bfq_service_tree *st; -+ -+ for_each_entity(entity) { -+ st = bfq_entity_service_tree(entity); -+ -+ entity->service += served; -+ -+ BUG_ON(st->wsum == 0); -+ -+ st->vtime += bfq_delta(served, st->wsum); -+ bfq_forget_idle(st); -+ } -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+ bfqg_stats_set_start_empty_time(bfqq_group(bfqq)); -+#endif -+ st = bfq_entity_service_tree(&bfqq->entity); -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "bfqq_served %d secs, vtime %llu on %p", -+ served, ((st->vtime>>10)*1000)>>12, st); -+} -+ -+/** -+ * bfq_bfqq_charge_time - charge an amount of service equivalent to the length -+ * of the time interval during which bfqq has been in -+ * service. -+ * @bfqd: the device -+ * @bfqq: the queue that needs a service update. -+ * @time_ms: the amount of time during which the queue has received service -+ * -+ * If a queue does not consume its budget fast enough, then providing -+ * the queue with service fairness may impair throughput, more or less -+ * severely. For this reason, queues that consume their budget slowly -+ * are provided with time fairness instead of service fairness. This -+ * goal is achieved through the BFQ scheduling engine, even if such an -+ * engine works in the service, and not in the time domain. The trick -+ * is charging these queues with an inflated amount of service, equal -+ * to the amount of service that they would have received during their -+ * service slot if they had been fast, i.e., if their requests had -+ * been dispatched at a rate equal to the estimated peak rate. -+ * -+ * It is worth noting that time fairness can cause important -+ * distortions in terms of bandwidth distribution, on devices with -+ * internal queueing. The reason is that I/O requests dispatched -+ * during the service slot of a queue may be served after that service -+ * slot is finished, and may have a total processing time loosely -+ * correlated with the duration of the service slot. This is -+ * especially true for short service slots. -+ */ -+static void bfq_bfqq_charge_time(struct bfq_data *bfqd, struct bfq_queue *bfqq, -+ unsigned long time_ms) -+{ -+ struct bfq_entity *entity = &bfqq->entity; -+ int tot_serv_to_charge = entity->service; -+ unsigned int timeout_ms = jiffies_to_msecs(bfq_timeout); -+ -+ if (time_ms > 0 && time_ms < timeout_ms) -+ tot_serv_to_charge = -+ (bfqd->bfq_max_budget * time_ms) / timeout_ms; -+ -+ if (tot_serv_to_charge < entity->service) -+ tot_serv_to_charge = entity->service; -+ -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "charge_time: %lu/%u ms, %d/%d/%d sectors", -+ time_ms, timeout_ms, entity->service, -+ tot_serv_to_charge, entity->budget); -+ -+ /* Increase budget to avoid inconsistencies */ -+ if (tot_serv_to_charge > entity->budget) -+ entity->budget = tot_serv_to_charge; -+ -+ bfq_bfqq_served(bfqq, -+ max_t(int, 0, tot_serv_to_charge - entity->service)); -+} -+ -+static void bfq_update_fin_time_enqueue(struct bfq_entity *entity, -+ struct bfq_service_tree *st, -+ bool backshifted) -+{ -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); -+ struct bfq_sched_data *sd = entity->sched_data; -+ -+ /* -+ * When this function is invoked, entity is not in any service -+ * tree, then it is safe to invoke next function with the last -+ * parameter set (see the comments on the function). -+ */ -+ st = __bfq_entity_update_weight_prio(st, entity, true); -+ bfq_calc_finish(entity, entity->budget); -+ -+ /* -+ * If some queues enjoy backshifting for a while, then their -+ * (virtual) finish timestamps may happen to become lower and -+ * lower than the system virtual time. In particular, if -+ * these queues often happen to be idle for short time -+ * periods, and during such time periods other queues with -+ * higher timestamps happen to be busy, then the backshifted -+ * timestamps of the former queues can become much lower than -+ * the system virtual time. In fact, to serve the queues with -+ * higher timestamps while the ones with lower timestamps are -+ * idle, the system virtual time may be pushed-up to much -+ * higher values than the finish timestamps of the idle -+ * queues. As a consequence, the finish timestamps of all new -+ * or newly activated queues may end up being much larger than -+ * those of lucky queues with backshifted timestamps. The -+ * latter queues may then monopolize the device for a lot of -+ * time. This would simply break service guarantees. -+ * -+ * To reduce this problem, push up a little bit the -+ * backshifted timestamps of the queue associated with this -+ * entity (only a queue can happen to have the backshifted -+ * flag set): just enough to let the finish timestamp of the -+ * queue be equal to the current value of the system virtual -+ * time. This may introduce a little unfairness among queues -+ * with backshifted timestamps, but it does not break -+ * worst-case fairness guarantees. -+ * -+ * As a special case, if bfqq is weight-raised, push up -+ * timestamps much less, to keep very low the probability that -+ * this push up causes the backshifted finish timestamps of -+ * weight-raised queues to become higher than the backshifted -+ * finish timestamps of non weight-raised queues. -+ */ -+ if (backshifted && bfq_gt(st->vtime, entity->finish)) { -+ unsigned long delta = st->vtime - entity->finish; -+ -+ if (bfqq) -+ delta /= bfqq->wr_coeff; -+ -+ entity->start += delta; -+ entity->finish += delta; -+ -+ if (bfqq) { -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "__activate_entity: new queue finish %llu", -+ ((entity->finish>>10)*1000)>>12); -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+ } else { -+ struct bfq_group *bfqg = -+ container_of(entity, struct bfq_group, entity); -+ -+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg, -+ "__activate_entity: new group finish %llu", -+ ((entity->finish>>10)*1000)>>12); -+#endif -+ } -+ } -+ -+ bfq_active_insert(st, entity); -+ -+ if (bfqq) { -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "__activate_entity: queue %seligible in st %p", -+ entity->start <= st->vtime ? "" : "non ", st); -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+ } else { -+ struct bfq_group *bfqg = -+ container_of(entity, struct bfq_group, entity); -+ -+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg, -+ "__activate_entity: group %seligible in st %p", -+ entity->start <= st->vtime ? "" : "non ", st); -+#endif -+ } -+ BUG_ON(RB_EMPTY_ROOT(&st->active)); -+ BUG_ON(&st->active != &sd->service_tree->active && -+ &st->active != &(sd->service_tree+1)->active && -+ &st->active != &(sd->service_tree+2)->active); -+} -+ -+/** -+ * __bfq_activate_entity - handle activation of entity. -+ * @entity: the entity being activated. -+ * @non_blocking_wait_rq: true if entity was waiting for a request -+ * -+ * Called for a 'true' activation, i.e., if entity is not active and -+ * one of its children receives a new request. -+ * -+ * Basically, this function updates the timestamps of entity and -+ * inserts entity into its active tree, ater possible extracting it -+ * from its idle tree. -+ */ -+static void __bfq_activate_entity(struct bfq_entity *entity, -+ bool non_blocking_wait_rq) -+{ -+ struct bfq_sched_data *sd = entity->sched_data; -+ struct bfq_service_tree *st = bfq_entity_service_tree(entity); -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); -+ bool backshifted = false; -+ unsigned long long min_vstart; -+ -+ BUG_ON(!sd); -+ BUG_ON(!st); -+ -+ /* See comments on bfq_fqq_update_budg_for_activation */ -+ if (non_blocking_wait_rq && bfq_gt(st->vtime, entity->finish)) { -+ backshifted = true; -+ min_vstart = entity->finish; -+ } else -+ min_vstart = st->vtime; -+ -+ if (entity->tree == &st->idle) { -+ /* -+ * Must be on the idle tree, bfq_idle_extract() will -+ * check for that. -+ */ -+ bfq_idle_extract(st, entity); -+ entity->start = bfq_gt(min_vstart, entity->finish) ? -+ min_vstart : entity->finish; -+ } else { -+ /* -+ * The finish time of the entity may be invalid, and -+ * it is in the past for sure, otherwise the queue -+ * would have been on the idle tree. -+ */ -+ entity->start = min_vstart; -+ st->wsum += entity->weight; -+ /* -+ * entity is about to be inserted into a service tree, -+ * and then set in service: get a reference to make -+ * sure entity does not disappear until it is no -+ * longer in service or scheduled for service. -+ */ -+ bfq_get_entity(entity); -+ -+ BUG_ON(entity->on_st && bfqq); -+ -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+ if (entity->on_st && !bfqq) { -+ struct bfq_group *bfqg = -+ container_of(entity, struct bfq_group, -+ entity); -+ -+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, -+ bfqg, -+ "activate bug, class %d in_service %p", -+ bfq_class_idx(entity), sd->in_service_entity); -+ } -+#endif -+ BUG_ON(entity->on_st && !bfqq); -+ entity->on_st = true; -+ } -+ -+ bfq_update_fin_time_enqueue(entity, st, backshifted); -+} -+ -+/** -+ * __bfq_requeue_entity - handle requeueing or repositioning of an entity. -+ * @entity: the entity being requeued or repositioned. -+ * -+ * Requeueing is needed if this entity stops being served, which -+ * happens if a leaf descendant entity has expired. On the other hand, -+ * repositioning is needed if the next_inservice_entity for the child -+ * entity has changed. See the comments inside the function for -+ * details. -+ * -+ * Basically, this function: 1) removes entity from its active tree if -+ * present there, 2) updates the timestamps of entity and 3) inserts -+ * entity back into its active tree (in the new, right position for -+ * the new values of the timestamps). -+ */ -+static void __bfq_requeue_entity(struct bfq_entity *entity) -+{ -+ struct bfq_sched_data *sd = entity->sched_data; -+ struct bfq_service_tree *st = bfq_entity_service_tree(entity); -+ -+ BUG_ON(!sd); -+ BUG_ON(!st); -+ -+ BUG_ON(entity != sd->in_service_entity && -+ entity->tree != &st->active); -+ -+ if (entity == sd->in_service_entity) { -+ /* -+ * We are requeueing the current in-service entity, -+ * which may have to be done for one of the following -+ * reasons: -+ * - entity represents the in-service queue, and the -+ * in-service queue is being requeued after an -+ * expiration; -+ * - entity represents a group, and its budget has -+ * changed because one of its child entities has -+ * just been either activated or requeued for some -+ * reason; the timestamps of the entity need then to -+ * be updated, and the entity needs to be enqueued -+ * or repositioned accordingly. -+ * -+ * In particular, before requeueing, the start time of -+ * the entity must be moved forward to account for the -+ * service that the entity has received while in -+ * service. This is done by the next instructions. The -+ * finish time will then be updated according to this -+ * new value of the start time, and to the budget of -+ * the entity. -+ */ -+ bfq_calc_finish(entity, entity->service); -+ entity->start = entity->finish; -+ BUG_ON(entity->tree && entity->tree != &st->active); -+ /* -+ * In addition, if the entity had more than one child -+ * when set in service, then was not extracted from -+ * the active tree. This implies that the position of -+ * the entity in the active tree may need to be -+ * changed now, because we have just updated the start -+ * time of the entity, and we will update its finish -+ * time in a moment (the requeueing is then, more -+ * precisely, a repositioning in this case). To -+ * implement this repositioning, we: 1) dequeue the -+ * entity here, 2) update the finish time and -+ * requeue the entity according to the new -+ * timestamps below. -+ */ -+ if (entity->tree) -+ bfq_active_extract(st, entity); -+ } else { /* The entity is already active, and not in service */ -+ /* -+ * In this case, this function gets called only if the -+ * next_in_service entity below this entity has -+ * changed, and this change has caused the budget of -+ * this entity to change, which, finally implies that -+ * the finish time of this entity must be -+ * updated. Such an update may cause the scheduling, -+ * i.e., the position in the active tree, of this -+ * entity to change. We handle this change by: 1) -+ * dequeueing the entity here, 2) updating the finish -+ * time and requeueing the entity according to the new -+ * timestamps below. This is the same approach as the -+ * non-extracted-entity sub-case above. -+ */ -+ bfq_active_extract(st, entity); -+ } -+ -+ bfq_update_fin_time_enqueue(entity, st, false); -+} -+ -+static void __bfq_activate_requeue_entity(struct bfq_entity *entity, -+ struct bfq_sched_data *sd, -+ bool non_blocking_wait_rq) -+{ -+ struct bfq_service_tree *st = bfq_entity_service_tree(entity); -+ -+ if (sd->in_service_entity == entity || entity->tree == &st->active) -+ /* -+ * in service or already queued on the active tree, -+ * requeue or reposition -+ */ -+ __bfq_requeue_entity(entity); -+ else -+ /* -+ * Not in service and not queued on its active tree: -+ * the activity is idle and this is a true activation. -+ */ -+ __bfq_activate_entity(entity, non_blocking_wait_rq); -+} -+ -+ -+/** -+ * bfq_activate_entity - activate or requeue an entity representing a bfq_queue, -+ * and activate, requeue or reposition all ancestors -+ * for which such an update becomes necessary. -+ * @entity: the entity to activate. -+ * @non_blocking_wait_rq: true if this entity was waiting for a request -+ * @requeue: true if this is a requeue, which implies that bfqq is -+ * being expired; thus ALL its ancestors stop being served and must -+ * therefore be requeued -+ */ -+static void bfq_activate_requeue_entity(struct bfq_entity *entity, -+ bool non_blocking_wait_rq, -+ bool requeue) -+{ -+ struct bfq_sched_data *sd; -+ -+ for_each_entity(entity) { -+ BUG_ON(!entity); -+ sd = entity->sched_data; -+ __bfq_activate_requeue_entity(entity, sd, non_blocking_wait_rq); -+ -+ BUG_ON(RB_EMPTY_ROOT(&sd->service_tree->active) && -+ RB_EMPTY_ROOT(&(sd->service_tree+1)->active) && -+ RB_EMPTY_ROOT(&(sd->service_tree+2)->active)); -+ -+ if (!bfq_update_next_in_service(sd, entity) && !requeue) { -+ BUG_ON(!sd->next_in_service); -+ break; -+ } -+ BUG_ON(!sd->next_in_service); -+ } -+} -+ -+/** -+ * __bfq_deactivate_entity - deactivate an entity from its service tree. -+ * @entity: the entity to deactivate. -+ * @ins_into_idle_tree: if false, the entity will not be put into the -+ * idle tree. -+ * -+ * Deactivates an entity, independently from its previous state. Must -+ * be invoked only if entity is on a service tree. Extracts the entity -+ * from that tree, and if necessary and allowed, puts it on the idle -+ * tree. -+ */ -+static bool __bfq_deactivate_entity(struct bfq_entity *entity, -+ bool ins_into_idle_tree) -+{ -+ struct bfq_sched_data *sd = entity->sched_data; -+ struct bfq_service_tree *st; -+ bool is_in_service; -+ -+ if (!entity->on_st) { /* entity never activated, or already inactive */ -+ BUG_ON(sd && entity == sd->in_service_entity); -+ return false; -+ } -+ -+ /* -+ * If we get here, then entity is active, which implies that -+ * bfq_group_set_parent has already been invoked for the group -+ * represented by entity. Therefore, the field -+ * entity->sched_data has been set, and we can safely use it. -+ */ -+ st = bfq_entity_service_tree(entity); -+ is_in_service = entity == sd->in_service_entity; -+ -+ BUG_ON(is_in_service && entity->tree && entity->tree != &st->active); -+ -+ if (is_in_service) -+ bfq_calc_finish(entity, entity->service); -+ -+ if (entity->tree == &st->active) -+ bfq_active_extract(st, entity); -+ else if (!is_in_service && entity->tree == &st->idle) -+ bfq_idle_extract(st, entity); -+ else if (entity->tree) -+ BUG(); -+ -+ if (!ins_into_idle_tree || !bfq_gt(entity->finish, st->vtime)) -+ bfq_forget_entity(st, entity, is_in_service); -+ else -+ bfq_idle_insert(st, entity); -+ -+ return true; -+} -+ -+/** -+ * bfq_deactivate_entity - deactivate an entity representing a bfq_queue. -+ * @entity: the entity to deactivate. -+ * @ins_into_idle_tree: true if the entity can be put on the idle tree -+ */ -+static void bfq_deactivate_entity(struct bfq_entity *entity, -+ bool ins_into_idle_tree, -+ bool expiration) -+{ -+ struct bfq_sched_data *sd; -+ struct bfq_entity *parent = NULL; -+ -+ for_each_entity_safe(entity, parent) { -+ sd = entity->sched_data; -+ -+ BUG_ON(sd == NULL); /* -+ * It would mean that this is the -+ * root group. -+ */ -+ -+ BUG_ON(expiration && entity != sd->in_service_entity); -+ -+ BUG_ON(entity != sd->in_service_entity && -+ entity->tree == -+ &bfq_entity_service_tree(entity)->active && -+ !sd->next_in_service); -+ -+ if (!__bfq_deactivate_entity(entity, ins_into_idle_tree)) { -+ /* -+ * entity is not in any tree any more, so -+ * this deactivation is a no-op, and there is -+ * nothing to change for upper-level entities -+ * (in case of expiration, this can never -+ * happen). -+ */ -+ BUG_ON(expiration); /* -+ * entity cannot be already out of -+ * any tree -+ */ -+ return; -+ } -+ -+ if (sd->next_in_service == entity) -+ /* -+ * entity was the next_in_service entity, -+ * then, since entity has just been -+ * deactivated, a new one must be found. -+ */ -+ bfq_update_next_in_service(sd, NULL); -+ -+ if (sd->next_in_service) { -+ /* -+ * The parent entity is still backlogged, -+ * because next_in_service is not NULL. So, no -+ * further upwards deactivation must be -+ * performed. Yet, next_in_service has -+ * changed. Then the schedule does need to be -+ * updated upwards. -+ */ -+ BUG_ON(sd->next_in_service == entity); -+ break; -+ } -+ -+ /* -+ * If we get here, then the parent is no more -+ * backlogged and we need to propagate the -+ * deactivation upwards. Thus let the loop go on. -+ */ -+ -+ /* -+ * Also let parent be queued into the idle tree on -+ * deactivation, to preserve service guarantees, and -+ * assuming that who invoked this function does not -+ * need parent entities too to be removed completely. -+ */ -+ ins_into_idle_tree = true; -+ } -+ -+ /* -+ * If the deactivation loop is fully executed, then there are -+ * no more entities to touch and next loop is not executed at -+ * all. Otherwise, requeue remaining entities if they are -+ * about to stop receiving service, or reposition them if this -+ * is not the case. -+ */ -+ entity = parent; -+ for_each_entity(entity) { -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); -+ -+ /* -+ * Invoke __bfq_requeue_entity on entity, even if -+ * already active, to requeue/reposition it in the -+ * active tree (because sd->next_in_service has -+ * changed) -+ */ -+ __bfq_requeue_entity(entity); -+ -+ sd = entity->sched_data; -+ BUG_ON(expiration && sd->in_service_entity != entity); -+ -+ if (bfqq) -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "invoking udpdate_next for this queue"); -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+ else { -+ struct bfq_group *bfqg = -+ container_of(entity, -+ struct bfq_group, entity); -+ -+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg, -+ "invoking udpdate_next for this entity"); -+ } -+#endif -+ if (!bfq_update_next_in_service(sd, entity) && -+ !expiration) -+ /* -+ * next_in_service unchanged or not causing -+ * any change in entity->parent->sd, and no -+ * requeueing needed for expiration: stop -+ * here. -+ */ -+ break; -+ } -+} -+ -+/** -+ * bfq_calc_vtime_jump - compute the value to which the vtime should jump, -+ * if needed, to have at least one entity eligible. -+ * @st: the service tree to act upon. -+ * -+ * Assumes that st is not empty. -+ */ -+static u64 bfq_calc_vtime_jump(struct bfq_service_tree *st) -+{ -+ struct bfq_entity *root_entity = bfq_root_active_entity(&st->active); -+ -+ if (bfq_gt(root_entity->min_start, st->vtime)) { -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(root_entity); -+ -+ if (bfqq) -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "calc_vtime_jump: new value %llu", -+ root_entity->min_start); -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+ else { -+ struct bfq_group *bfqg = -+ container_of(root_entity, struct bfq_group, -+ entity); -+ -+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg, -+ "calc_vtime_jump: new value %llu", -+ root_entity->min_start); -+ } -+#endif -+ return root_entity->min_start; -+ } -+ return st->vtime; -+} -+ -+static void bfq_update_vtime(struct bfq_service_tree *st, u64 new_value) -+{ -+ if (new_value > st->vtime) { -+ st->vtime = new_value; -+ bfq_forget_idle(st); -+ } -+} -+ -+/** -+ * bfq_first_active_entity - find the eligible entity with -+ * the smallest finish time -+ * @st: the service tree to select from. -+ * @vtime: the system virtual to use as a reference for eligibility -+ * -+ * This function searches the first schedulable entity, starting from the -+ * root of the tree and going on the left every time on this side there is -+ * a subtree with at least one eligible (start >= vtime) entity. The path on -+ * the right is followed only if a) the left subtree contains no eligible -+ * entities and b) no eligible entity has been found yet. -+ */ -+static struct bfq_entity *bfq_first_active_entity(struct bfq_service_tree *st, -+ u64 vtime) -+{ -+ struct bfq_entity *entry, *first = NULL; -+ struct rb_node *node = st->active.rb_node; -+ -+ while (node) { -+ entry = rb_entry(node, struct bfq_entity, rb_node); -+left: -+ if (!bfq_gt(entry->start, vtime)) -+ first = entry; -+ -+ BUG_ON(bfq_gt(entry->min_start, vtime)); -+ -+ if (node->rb_left) { -+ entry = rb_entry(node->rb_left, -+ struct bfq_entity, rb_node); -+ if (!bfq_gt(entry->min_start, vtime)) { -+ node = node->rb_left; -+ goto left; -+ } -+ } -+ if (first) -+ break; -+ node = node->rb_right; -+ } -+ -+ BUG_ON(!first && !RB_EMPTY_ROOT(&st->active)); -+ return first; -+} -+ -+/** -+ * __bfq_lookup_next_entity - return the first eligible entity in @st. -+ * @st: the service tree. -+ * -+ * If there is no in-service entity for the sched_data st belongs to, -+ * then return the entity that will be set in service if: -+ * 1) the parent entity this st belongs to is set in service; -+ * 2) no entity belonging to such parent entity undergoes a state change -+ * that would influence the timestamps of the entity (e.g., becomes idle, -+ * becomes backlogged, changes its budget, ...). -+ * -+ * In this first case, update the virtual time in @st too (see the -+ * comments on this update inside the function). -+ * -+ * In constrast, if there is an in-service entity, then return the -+ * entity that would be set in service if not only the above -+ * conditions, but also the next one held true: the currently -+ * in-service entity, on expiration, -+ * 1) gets a finish time equal to the current one, or -+ * 2) is not eligible any more, or -+ * 3) is idle. -+ */ -+static struct bfq_entity * -+__bfq_lookup_next_entity(struct bfq_service_tree *st, bool in_service -+#if 0 -+ , bool force -+#endif -+ ) -+{ -+ struct bfq_entity *entity -+#if 0 -+ , *new_next_in_service = NULL -+#endif -+ ; -+ u64 new_vtime; -+ struct bfq_queue *bfqq; -+ -+ if (RB_EMPTY_ROOT(&st->active)) -+ return NULL; -+ -+ /* -+ * Get the value of the system virtual time for which at -+ * least one entity is eligible. -+ */ -+ new_vtime = bfq_calc_vtime_jump(st); -+ -+ /* -+ * If there is no in-service entity for the sched_data this -+ * active tree belongs to, then push the system virtual time -+ * up to the value that guarantees that at least one entity is -+ * eligible. If, instead, there is an in-service entity, then -+ * do not make any such update, because there is already an -+ * eligible entity, namely the in-service one (even if the -+ * entity is not on st, because it was extracted when set in -+ * service). -+ */ -+ if (!in_service) -+ bfq_update_vtime(st, new_vtime); -+ -+ entity = bfq_first_active_entity(st, new_vtime); -+ BUG_ON(bfq_gt(entity->start, new_vtime)); -+ -+ /* Log some information */ -+ bfqq = bfq_entity_to_bfqq(entity); -+ if (bfqq) -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "__lookup_next: start %llu vtime %llu st %p", -+ ((entity->start>>10)*1000)>>12, -+ ((new_vtime>>10)*1000)>>12, st); -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+ else { -+ struct bfq_group *bfqg = -+ container_of(entity, struct bfq_group, entity); -+ -+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg, -+ "__lookup_next: start %llu vtime %llu st %p", -+ ((entity->start>>10)*1000)>>12, -+ ((new_vtime>>10)*1000)>>12, st); -+ } -+#endif -+ -+ BUG_ON(!entity); -+ -+ return entity; -+} -+ -+/** -+ * bfq_lookup_next_entity - return the first eligible entity in @sd. -+ * @sd: the sched_data. -+ * -+ * This function is invoked when there has been a change in the trees -+ * for sd, and we need know what is the new next entity after this -+ * change. -+ */ -+static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd) -+{ -+ struct bfq_service_tree *st = sd->service_tree; -+ struct bfq_service_tree *idle_class_st = st + (BFQ_IOPRIO_CLASSES - 1); -+ struct bfq_entity *entity = NULL; -+ struct bfq_queue *bfqq; -+ int class_idx = 0; -+ -+ BUG_ON(!sd); -+ BUG_ON(!st); -+ /* -+ * Choose from idle class, if needed to guarantee a minimum -+ * bandwidth to this class (and if there is some active entity -+ * in idle class). This should also mitigate -+ * priority-inversion problems in case a low priority task is -+ * holding file system resources. -+ */ -+ if (time_is_before_jiffies(sd->bfq_class_idle_last_service + -+ BFQ_CL_IDLE_TIMEOUT)) { -+ if (!RB_EMPTY_ROOT(&idle_class_st->active)) -+ class_idx = BFQ_IOPRIO_CLASSES - 1; -+ /* About to be served if backlogged, or not yet backlogged */ -+ sd->bfq_class_idle_last_service = jiffies; -+ } -+ -+ /* -+ * Find the next entity to serve for the highest-priority -+ * class, unless the idle class needs to be served. -+ */ -+ for (; class_idx < BFQ_IOPRIO_CLASSES; class_idx++) { -+ entity = __bfq_lookup_next_entity(st + class_idx, -+ sd->in_service_entity); -+ -+ if (entity) -+ break; -+ } -+ -+ BUG_ON(!entity && -+ (!RB_EMPTY_ROOT(&st->active) || !RB_EMPTY_ROOT(&(st+1)->active) || -+ !RB_EMPTY_ROOT(&(st+2)->active))); -+ -+ if (!entity) -+ return NULL; -+ -+ /* Log some information */ -+ bfqq = bfq_entity_to_bfqq(entity); -+ if (bfqq) -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "chosen from st %p %d", -+ st + class_idx, class_idx); -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+ else { -+ struct bfq_group *bfqg = -+ container_of(entity, struct bfq_group, entity); -+ -+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg, -+ "chosen from st %p %d", -+ st + class_idx, class_idx); -+ } -+#endif -+ -+ return entity; -+} -+ -+static bool next_queue_may_preempt(struct bfq_data *bfqd) -+{ -+ struct bfq_sched_data *sd = &bfqd->root_group->sched_data; -+ -+ return sd->next_in_service != sd->in_service_entity; -+} -+ -+/* -+ * Get next queue for service. -+ */ -+static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd) -+{ -+ struct bfq_entity *entity = NULL; -+ struct bfq_sched_data *sd; -+ struct bfq_queue *bfqq; -+ -+ BUG_ON(bfqd->in_service_queue); -+ -+ if (bfqd->busy_queues == 0) -+ return NULL; -+ -+ /* -+ * Traverse the path from the root to the leaf entity to -+ * serve. Set in service all the entities visited along the -+ * way. -+ */ -+ sd = &bfqd->root_group->sched_data; -+ for (; sd ; sd = entity->my_sched_data) { -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+ if (entity) { -+ struct bfq_group *bfqg = -+ container_of(entity, struct bfq_group, entity); -+ -+ bfq_log_bfqg(bfqd, bfqg, -+ "get_next_queue: lookup in this group"); -+ if (!sd->next_in_service) -+ pr_crit("get_next_queue: lookup in this group"); -+ } else { -+ bfq_log_bfqg(bfqd, bfqd->root_group, -+ "get_next_queue: lookup in root group"); -+ if (!sd->next_in_service) -+ pr_crit("get_next_queue: lookup in root group"); -+ } -+#endif -+ -+ BUG_ON(!sd->next_in_service); -+ -+ /* -+ * WARNING. We are about to set the in-service entity -+ * to sd->next_in_service, i.e., to the (cached) value -+ * returned by bfq_lookup_next_entity(sd) the last -+ * time it was invoked, i.e., the last time when the -+ * service order in sd changed as a consequence of the -+ * activation or deactivation of an entity. In this -+ * respect, if we execute bfq_lookup_next_entity(sd) -+ * in this very moment, it may, although with low -+ * probability, yield a different entity than that -+ * pointed to by sd->next_in_service. This rare event -+ * happens in case there was no CLASS_IDLE entity to -+ * serve for sd when bfq_lookup_next_entity(sd) was -+ * invoked for the last time, while there is now one -+ * such entity. -+ * -+ * If the above event happens, then the scheduling of -+ * such entity in CLASS_IDLE is postponed until the -+ * service of the sd->next_in_service entity -+ * finishes. In fact, when the latter is expired, -+ * bfq_lookup_next_entity(sd) gets called again, -+ * exactly to update sd->next_in_service. -+ */ -+ -+ /* Make next_in_service entity become in_service_entity */ -+ entity = sd->next_in_service; -+ sd->in_service_entity = entity; -+ -+ /* -+ * Reset the accumulator of the amount of service that -+ * the entity is about to receive. -+ */ -+ entity->service = 0; -+ -+ /* -+ * If entity is no longer a candidate for next -+ * service, then we extract it from its active tree, -+ * for the following reason. To further boost the -+ * throughput in some special case, BFQ needs to know -+ * which is the next candidate entity to serve, while -+ * there is already an entity in service. In this -+ * respect, to make it easy to compute/update the next -+ * candidate entity to serve after the current -+ * candidate has been set in service, there is a case -+ * where it is necessary to extract the current -+ * candidate from its service tree. Such a case is -+ * when the entity just set in service cannot be also -+ * a candidate for next service. Details about when -+ * this conditions holds are reported in the comments -+ * on the function bfq_no_longer_next_in_service() -+ * invoked below. -+ */ -+ if (bfq_no_longer_next_in_service(entity)) -+ bfq_active_extract(bfq_entity_service_tree(entity), -+ entity); -+ -+ /* -+ * For the same reason why we may have just extracted -+ * entity from its active tree, we may need to update -+ * next_in_service for the sched_data of entity too, -+ * regardless of whether entity has been extracted. -+ * In fact, even if entity has not been extracted, a -+ * descendant entity may get extracted. Such an event -+ * would cause a change in next_in_service for the -+ * level of the descendant entity, and thus possibly -+ * back to upper levels. -+ * -+ * We cannot perform the resulting needed update -+ * before the end of this loop, because, to know which -+ * is the correct next-to-serve candidate entity for -+ * each level, we need first to find the leaf entity -+ * to set in service. In fact, only after we know -+ * which is the next-to-serve leaf entity, we can -+ * discover whether the parent entity of the leaf -+ * entity becomes the next-to-serve, and so on. -+ */ -+ -+ /* Log some information */ -+ bfqq = bfq_entity_to_bfqq(entity); -+ if (bfqq) -+ bfq_log_bfqq(bfqd, bfqq, -+ "get_next_queue: this queue, finish %llu", -+ (((entity->finish>>10)*1000)>>10)>>2); -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+ else { -+ struct bfq_group *bfqg = -+ container_of(entity, struct bfq_group, entity); -+ -+ bfq_log_bfqg(bfqd, bfqg, -+ "get_next_queue: this entity, finish %llu", -+ (((entity->finish>>10)*1000)>>10)>>2); -+ } -+#endif -+ -+ } -+ -+ BUG_ON(!entity); -+ bfqq = bfq_entity_to_bfqq(entity); -+ BUG_ON(!bfqq); -+ -+ /* -+ * We can finally update all next-to-serve entities along the -+ * path from the leaf entity just set in service to the root. -+ */ -+ for_each_entity(entity) { -+ struct bfq_sched_data *sd = entity->sched_data; -+ -+ if(!bfq_update_next_in_service(sd, NULL)) -+ break; -+ } -+ -+ return bfqq; -+} -+ -+static void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd) -+{ -+ struct bfq_queue *in_serv_bfqq = bfqd->in_service_queue; -+ struct bfq_entity *in_serv_entity = &in_serv_bfqq->entity; -+ struct bfq_entity *entity = in_serv_entity; -+ -+ if (bfqd->in_service_bic) { -+ put_io_context(bfqd->in_service_bic->icq.ioc); -+ bfqd->in_service_bic = NULL; -+ } -+ -+ bfq_clear_bfqq_wait_request(in_serv_bfqq); -+ hrtimer_try_to_cancel(&bfqd->idle_slice_timer); -+ bfqd->in_service_queue = NULL; -+ -+ /* -+ * When this function is called, all in-service entities have -+ * been properly deactivated or requeued, so we can safely -+ * execute the final step: reset in_service_entity along the -+ * path from entity to the root. -+ */ -+ for_each_entity(entity) -+ entity->sched_data->in_service_entity = NULL; -+ -+ /* -+ * in_serv_entity is no longer in service, so, if it is in no -+ * service tree either, then release the service reference to -+ * the queue it represents (taken with bfq_get_entity). -+ */ -+ if (!in_serv_entity->on_st) -+ bfq_put_queue(in_serv_bfqq); -+} -+ -+static void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, -+ bool ins_into_idle_tree, bool expiration) -+{ -+ struct bfq_entity *entity = &bfqq->entity; -+ -+ bfq_deactivate_entity(entity, ins_into_idle_tree, expiration); -+} -+ -+static void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) -+{ -+ struct bfq_entity *entity = &bfqq->entity; -+ struct bfq_service_tree *st = bfq_entity_service_tree(entity); -+ -+ BUG_ON(bfqq == bfqd->in_service_queue); -+ BUG_ON(entity->tree != &st->active && entity->tree != &st->idle && -+ entity->on_st); -+ -+ bfq_activate_requeue_entity(entity, bfq_bfqq_non_blocking_wait_rq(bfqq), -+ false); -+ bfq_clear_bfqq_non_blocking_wait_rq(bfqq); -+} -+ -+static void bfq_requeue_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) -+{ -+ struct bfq_entity *entity = &bfqq->entity; -+ -+ bfq_activate_requeue_entity(entity, false, -+ bfqq == bfqd->in_service_queue); -+} -+ -+static void bfqg_stats_update_dequeue(struct bfq_group *bfqg); -+ -+/* -+ * Called when the bfqq no longer has requests pending, remove it from -+ * the service tree. As a special case, it can be invoked during an -+ * expiration. -+ */ -+static void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq, -+ bool expiration) -+{ -+ BUG_ON(!bfq_bfqq_busy(bfqq)); -+ BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list)); -+ -+ bfq_log_bfqq(bfqd, bfqq, "del from busy"); -+ -+ bfq_clear_bfqq_busy(bfqq); -+ -+ BUG_ON(bfqd->busy_queues == 0); -+ bfqd->busy_queues--; -+ -+ if (!bfqq->dispatched) -+ bfq_weights_tree_remove(bfqd, &bfqq->entity, -+ &bfqd->queue_weights_tree); -+ -+ if (bfqq->wr_coeff > 1) { -+ bfqd->wr_busy_queues--; -+ BUG_ON(bfqd->wr_busy_queues < 0); -+ } -+ -+ bfqg_stats_update_dequeue(bfqq_group(bfqq)); -+ -+ BUG_ON(bfqq->entity.budget < 0); -+ -+ bfq_deactivate_bfqq(bfqd, bfqq, true, expiration); -+} -+ -+/* -+ * Called when an inactive queue receives a new request. -+ */ -+static void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq) -+{ -+ BUG_ON(bfq_bfqq_busy(bfqq)); -+ BUG_ON(bfqq == bfqd->in_service_queue); -+ -+ bfq_log_bfqq(bfqd, bfqq, "add to busy"); -+ -+ bfq_activate_bfqq(bfqd, bfqq); -+ -+ bfq_mark_bfqq_busy(bfqq); -+ bfqd->busy_queues++; -+ -+ if (!bfqq->dispatched) -+ if (bfqq->wr_coeff == 1) -+ bfq_weights_tree_add(bfqd, &bfqq->entity, -+ &bfqd->queue_weights_tree); -+ -+ if (bfqq->wr_coeff > 1) { -+ bfqd->wr_busy_queues++; -+ BUG_ON(bfqd->wr_busy_queues > bfqd->busy_queues); -+ } -+ -+} -diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c -new file mode 100644 -index 000000000000..65e7c7e77f3c ---- /dev/null -+++ b/block/bfq-sq-iosched.c -@@ -0,0 +1,5379 @@ -+/* -+ * Budget Fair Queueing (BFQ) I/O scheduler. -+ * -+ * Based on ideas and code from CFQ: -+ * Copyright (C) 2003 Jens Axboe -+ * -+ * Copyright (C) 2008 Fabio Checconi -+ * Paolo Valente -+ * -+ * Copyright (C) 2015 Paolo Valente -+ * -+ * Copyright (C) 2017 Paolo Valente -+ * -+ * Licensed under the GPL-2 as detailed in the accompanying COPYING.BFQ -+ * file. -+ * -+ * BFQ is a proportional-share I/O scheduler, with some extra -+ * low-latency capabilities. BFQ also supports full hierarchical -+ * scheduling through cgroups. Next paragraphs provide an introduction -+ * on BFQ inner workings. Details on BFQ benefits and usage can be -+ * found in Documentation/block/bfq-iosched.txt. -+ * -+ * BFQ is a proportional-share storage-I/O scheduling algorithm based -+ * on the slice-by-slice service scheme of CFQ. But BFQ assigns -+ * budgets, measured in number of sectors, to processes instead of -+ * time slices. The device is not granted to the in-service process -+ * for a given time slice, but until it has exhausted its assigned -+ * budget. This change from the time to the service domain enables BFQ -+ * to distribute the device throughput among processes as desired, -+ * without any distortion due to throughput fluctuations, or to device -+ * internal queueing. BFQ uses an ad hoc internal scheduler, called -+ * B-WF2Q+, to schedule processes according to their budgets. More -+ * precisely, BFQ schedules queues associated with processes. Thanks to -+ * the accurate policy of B-WF2Q+, BFQ can afford to assign high -+ * budgets to I/O-bound processes issuing sequential requests (to -+ * boost the throughput), and yet guarantee a low latency to -+ * interactive and soft real-time applications. -+ * -+ * NOTE: if the main or only goal, with a given device, is to achieve -+ * the maximum-possible throughput at all times, then do switch off -+ * all low-latency heuristics for that device, by setting low_latency -+ * to 0. -+ * -+ * BFQ is described in [1], where also a reference to the initial, more -+ * theoretical paper on BFQ can be found. The interested reader can find -+ * in the latter paper full details on the main algorithm, as well as -+ * formulas of the guarantees and formal proofs of all the properties. -+ * With respect to the version of BFQ presented in these papers, this -+ * implementation adds a few more heuristics, such as the one that -+ * guarantees a low latency to soft real-time applications, and a -+ * hierarchical extension based on H-WF2Q+. -+ * -+ * B-WF2Q+ is based on WF2Q+, that is described in [2], together with -+ * H-WF2Q+, while the augmented tree used to implement B-WF2Q+ with O(log N) -+ * complexity derives from the one introduced with EEVDF in [3]. -+ * -+ * [1] P. Valente, A. Avanzini, "Evolution of the BFQ Storage I/O -+ * Scheduler", Proceedings of the First Workshop on Mobile System -+ * Technologies (MST-2015), May 2015. -+ * http://algogroup.unimore.it/people/paolo/disk_sched/mst-2015.pdf -+ * -+ * http://algogroup.unimo.it/people/paolo/disk_sched/bf1-v1-suite-results.pdf -+ * -+ * [2] Jon C.R. Bennett and H. Zhang, ``Hierarchical Packet Fair Queueing -+ * Algorithms,'' IEEE/ACM Transactions on Networking, 5(5):675-689, -+ * Oct 1997. -+ * -+ * http://www.cs.cmu.edu/~hzhang/papers/TON-97-Oct.ps.gz -+ * -+ * [3] I. Stoica and H. Abdel-Wahab, ``Earliest Eligible Virtual Deadline -+ * First: A Flexible and Accurate Mechanism for Proportional Share -+ * Resource Allocation,'' technical report. -+ * -+ * http://www.cs.berkeley.edu/~istoica/papers/eevdf-tr-95.pdf -+ */ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "blk.h" -+#include "bfq.h" -+ -+/* Expiration time of sync (0) and async (1) requests, in ns. */ -+static const u64 bfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 }; -+ -+/* Maximum backwards seek, in KiB. */ -+static const int bfq_back_max = (16 * 1024); -+ -+/* Penalty of a backwards seek, in number of sectors. */ -+static const int bfq_back_penalty = 2; -+ -+/* Idling period duration, in ns. */ -+static u32 bfq_slice_idle = (NSEC_PER_SEC / 125); -+ -+/* Minimum number of assigned budgets for which stats are safe to compute. */ -+static const int bfq_stats_min_budgets = 194; -+ -+/* Default maximum budget values, in sectors and number of requests. */ -+static const int bfq_default_max_budget = (16 * 1024); -+ -+/* -+ * Async to sync throughput distribution is controlled as follows: -+ * when an async request is served, the entity is charged the number -+ * of sectors of the request, multiplied by the factor below -+ */ -+static const int bfq_async_charge_factor = 10; -+ -+/* Default timeout values, in jiffies, approximating CFQ defaults. */ -+static const int bfq_timeout = (HZ / 8); -+ -+static struct kmem_cache *bfq_pool; -+ -+/* Below this threshold (in ns), we consider thinktime immediate. */ -+#define BFQ_MIN_TT (2 * NSEC_PER_MSEC) -+ -+/* hw_tag detection: parallel requests threshold and min samples needed. */ -+#define BFQ_HW_QUEUE_THRESHOLD 4 -+#define BFQ_HW_QUEUE_SAMPLES 32 -+ -+#define BFQQ_SEEK_THR (sector_t)(8 * 100) -+#define BFQQ_SECT_THR_NONROT (sector_t)(2 * 32) -+#define BFQQ_CLOSE_THR (sector_t)(8 * 1024) -+#define BFQQ_SEEKY(bfqq) (hweight32(bfqq->seek_history) > 32/8) -+ -+/* Min number of samples required to perform peak-rate update */ -+#define BFQ_RATE_MIN_SAMPLES 32 -+/* Min observation time interval required to perform a peak-rate update (ns) */ -+#define BFQ_RATE_MIN_INTERVAL (300*NSEC_PER_MSEC) -+/* Target observation time interval for a peak-rate update (ns) */ -+#define BFQ_RATE_REF_INTERVAL NSEC_PER_SEC -+ -+/* Shift used for peak rate fixed precision calculations. */ -+#define BFQ_RATE_SHIFT 16 -+ -+/* -+ * By default, BFQ computes the duration of the weight raising for -+ * interactive applications automatically, using the following formula: -+ * duration = (R / r) * T, where r is the peak rate of the device, and -+ * R and T are two reference parameters. -+ * In particular, R is the peak rate of the reference device (see below), -+ * and T is a reference time: given the systems that are likely to be -+ * installed on the reference device according to its speed class, T is -+ * about the maximum time needed, under BFQ and while reading two files in -+ * parallel, to load typical large applications on these systems. -+ * In practice, the slower/faster the device at hand is, the more/less it -+ * takes to load applications with respect to the reference device. -+ * Accordingly, the longer/shorter BFQ grants weight raising to interactive -+ * applications. -+ * -+ * BFQ uses four different reference pairs (R, T), depending on: -+ * . whether the device is rotational or non-rotational; -+ * . whether the device is slow, such as old or portable HDDs, as well as -+ * SD cards, or fast, such as newer HDDs and SSDs. -+ * -+ * The device's speed class is dynamically (re)detected in -+ * bfq_update_peak_rate() every time the estimated peak rate is updated. -+ * -+ * In the following definitions, R_slow[0]/R_fast[0] and -+ * T_slow[0]/T_fast[0] are the reference values for a slow/fast -+ * rotational device, whereas R_slow[1]/R_fast[1] and -+ * T_slow[1]/T_fast[1] are the reference values for a slow/fast -+ * non-rotational device. Finally, device_speed_thresh are the -+ * thresholds used to switch between speed classes. The reference -+ * rates are not the actual peak rates of the devices used as a -+ * reference, but slightly lower values. The reason for using these -+ * slightly lower values is that the peak-rate estimator tends to -+ * yield slightly lower values than the actual peak rate (it can yield -+ * the actual peak rate only if there is only one process doing I/O, -+ * and the process does sequential I/O). -+ * -+ * Both the reference peak rates and the thresholds are measured in -+ * sectors/usec, left-shifted by BFQ_RATE_SHIFT. -+ */ -+static int R_slow[2] = {1000, 10700}; -+static int R_fast[2] = {14000, 33000}; -+/* -+ * To improve readability, a conversion function is used to initialize the -+ * following arrays, which entails that they can be initialized only in a -+ * function. -+ */ -+static int T_slow[2]; -+static int T_fast[2]; -+static int device_speed_thresh[2]; -+ -+#define BFQ_SERVICE_TREE_INIT ((struct bfq_service_tree) \ -+ { RB_ROOT, RB_ROOT, NULL, NULL, 0, 0 }) -+ -+#define RQ_BIC(rq) ((struct bfq_io_cq *) (rq)->elv.priv[0]) -+#define RQ_BFQQ(rq) ((rq)->elv.priv[1]) -+ -+static void bfq_schedule_dispatch(struct bfq_data *bfqd); -+ -+#include "bfq-ioc.c" -+#include "bfq-sched.c" -+#include "bfq-cgroup-included.c" -+ -+#define bfq_class_idle(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_IDLE) -+#define bfq_class_rt(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_RT) -+ -+#define bfq_sample_valid(samples) ((samples) > 80) -+ -+/* -+ * Scheduler run of queue, if there are requests pending and no one in the -+ * driver that will restart queueing. -+ */ -+static void bfq_schedule_dispatch(struct bfq_data *bfqd) -+{ -+ if (bfqd->queued != 0) { -+ bfq_log(bfqd, "schedule dispatch"); -+ kblockd_schedule_work(&bfqd->unplug_work); -+ } -+} -+ -+/* -+ * Lifted from AS - choose which of rq1 and rq2 that is best served now. -+ * We choose the request that is closesr to the head right now. Distance -+ * behind the head is penalized and only allowed to a certain extent. -+ */ -+static struct request *bfq_choose_req(struct bfq_data *bfqd, -+ struct request *rq1, -+ struct request *rq2, -+ sector_t last) -+{ -+ sector_t s1, s2, d1 = 0, d2 = 0; -+ unsigned long back_max; -+#define BFQ_RQ1_WRAP 0x01 /* request 1 wraps */ -+#define BFQ_RQ2_WRAP 0x02 /* request 2 wraps */ -+ unsigned int wrap = 0; /* bit mask: requests behind the disk head? */ -+ -+ if (!rq1 || rq1 == rq2) -+ return rq2; -+ if (!rq2) -+ return rq1; -+ -+ if (rq_is_sync(rq1) && !rq_is_sync(rq2)) -+ return rq1; -+ else if (rq_is_sync(rq2) && !rq_is_sync(rq1)) -+ return rq2; -+ if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META)) -+ return rq1; -+ else if ((rq2->cmd_flags & REQ_META) && !(rq1->cmd_flags & REQ_META)) -+ return rq2; -+ -+ s1 = blk_rq_pos(rq1); -+ s2 = blk_rq_pos(rq2); -+ -+ /* -+ * By definition, 1KiB is 2 sectors. -+ */ -+ back_max = bfqd->bfq_back_max * 2; -+ -+ /* -+ * Strict one way elevator _except_ in the case where we allow -+ * short backward seeks which are biased as twice the cost of a -+ * similar forward seek. -+ */ -+ if (s1 >= last) -+ d1 = s1 - last; -+ else if (s1 + back_max >= last) -+ d1 = (last - s1) * bfqd->bfq_back_penalty; -+ else -+ wrap |= BFQ_RQ1_WRAP; -+ -+ if (s2 >= last) -+ d2 = s2 - last; -+ else if (s2 + back_max >= last) -+ d2 = (last - s2) * bfqd->bfq_back_penalty; -+ else -+ wrap |= BFQ_RQ2_WRAP; -+ -+ /* Found required data */ -+ -+ /* -+ * By doing switch() on the bit mask "wrap" we avoid having to -+ * check two variables for all permutations: --> faster! -+ */ -+ switch (wrap) { -+ case 0: /* common case for CFQ: rq1 and rq2 not wrapped */ -+ if (d1 < d2) -+ return rq1; -+ else if (d2 < d1) -+ return rq2; -+ -+ if (s1 >= s2) -+ return rq1; -+ else -+ return rq2; -+ -+ case BFQ_RQ2_WRAP: -+ return rq1; -+ case BFQ_RQ1_WRAP: -+ return rq2; -+ case (BFQ_RQ1_WRAP|BFQ_RQ2_WRAP): /* both rqs wrapped */ -+ default: -+ /* -+ * Since both rqs are wrapped, -+ * start with the one that's further behind head -+ * (--> only *one* back seek required), -+ * since back seek takes more time than forward. -+ */ -+ if (s1 <= s2) -+ return rq1; -+ else -+ return rq2; -+ } -+} -+ -+static struct bfq_queue * -+bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root, -+ sector_t sector, struct rb_node **ret_parent, -+ struct rb_node ***rb_link) -+{ -+ struct rb_node **p, *parent; -+ struct bfq_queue *bfqq = NULL; -+ -+ parent = NULL; -+ p = &root->rb_node; -+ while (*p) { -+ struct rb_node **n; -+ -+ parent = *p; -+ bfqq = rb_entry(parent, struct bfq_queue, pos_node); -+ -+ /* -+ * Sort strictly based on sector. Smallest to the left, -+ * largest to the right. -+ */ -+ if (sector > blk_rq_pos(bfqq->next_rq)) -+ n = &(*p)->rb_right; -+ else if (sector < blk_rq_pos(bfqq->next_rq)) -+ n = &(*p)->rb_left; -+ else -+ break; -+ p = n; -+ bfqq = NULL; -+ } -+ -+ *ret_parent = parent; -+ if (rb_link) -+ *rb_link = p; -+ -+ bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d", -+ (unsigned long long) sector, -+ bfqq ? bfqq->pid : 0); -+ -+ return bfqq; -+} -+ -+static void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq) -+{ -+ struct rb_node **p, *parent; -+ struct bfq_queue *__bfqq; -+ -+ if (bfqq->pos_root) { -+ rb_erase(&bfqq->pos_node, bfqq->pos_root); -+ bfqq->pos_root = NULL; -+ } -+ -+ if (bfq_class_idle(bfqq)) -+ return; -+ if (!bfqq->next_rq) -+ return; -+ -+ bfqq->pos_root = &bfq_bfqq_to_bfqg(bfqq)->rq_pos_tree; -+ __bfqq = bfq_rq_pos_tree_lookup(bfqd, bfqq->pos_root, -+ blk_rq_pos(bfqq->next_rq), &parent, &p); -+ if (!__bfqq) { -+ rb_link_node(&bfqq->pos_node, parent, p); -+ rb_insert_color(&bfqq->pos_node, bfqq->pos_root); -+ } else -+ bfqq->pos_root = NULL; -+} -+ -+/* -+ * Tell whether there are active queues or groups with differentiated weights. -+ */ -+static bool bfq_differentiated_weights(struct bfq_data *bfqd) -+{ -+ /* -+ * For weights to differ, at least one of the trees must contain -+ * at least two nodes. -+ */ -+ return (!RB_EMPTY_ROOT(&bfqd->queue_weights_tree) && -+ (bfqd->queue_weights_tree.rb_node->rb_left || -+ bfqd->queue_weights_tree.rb_node->rb_right) -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+ ) || -+ (!RB_EMPTY_ROOT(&bfqd->group_weights_tree) && -+ (bfqd->group_weights_tree.rb_node->rb_left || -+ bfqd->group_weights_tree.rb_node->rb_right) -+#endif -+ ); -+} -+ -+/* -+ * The following function returns true if every queue must receive the -+ * same share of the throughput (this condition is used when deciding -+ * whether idling may be disabled, see the comments in the function -+ * bfq_bfqq_may_idle()). -+ * -+ * Such a scenario occurs when: -+ * 1) all active queues have the same weight, -+ * 2) all active groups at the same level in the groups tree have the same -+ * weight, -+ * 3) all active groups at the same level in the groups tree have the same -+ * number of children. -+ * -+ * Unfortunately, keeping the necessary state for evaluating exactly the -+ * above symmetry conditions would be quite complex and time-consuming. -+ * Therefore this function evaluates, instead, the following stronger -+ * sub-conditions, for which it is much easier to maintain the needed -+ * state: -+ * 1) all active queues have the same weight, -+ * 2) all active groups have the same weight, -+ * 3) all active groups have at most one active child each. -+ * In particular, the last two conditions are always true if hierarchical -+ * support and the cgroups interface are not enabled, thus no state needs -+ * to be maintained in this case. -+ */ -+static bool bfq_symmetric_scenario(struct bfq_data *bfqd) -+{ -+ return !bfq_differentiated_weights(bfqd); -+} -+ -+/* -+ * If the weight-counter tree passed as input contains no counter for -+ * the weight of the input entity, then add that counter; otherwise just -+ * increment the existing counter. -+ * -+ * Note that weight-counter trees contain few nodes in mostly symmetric -+ * scenarios. For example, if all queues have the same weight, then the -+ * weight-counter tree for the queues may contain at most one node. -+ * This holds even if low_latency is on, because weight-raised queues -+ * are not inserted in the tree. -+ * In most scenarios, the rate at which nodes are created/destroyed -+ * should be low too. -+ */ -+static void bfq_weights_tree_add(struct bfq_data *bfqd, -+ struct bfq_entity *entity, -+ struct rb_root *root) -+{ -+ struct rb_node **new = &(root->rb_node), *parent = NULL; -+ -+ /* -+ * Do not insert if the entity is already associated with a -+ * counter, which happens if: -+ * 1) the entity is associated with a queue, -+ * 2) a request arrival has caused the queue to become both -+ * non-weight-raised, and hence change its weight, and -+ * backlogged; in this respect, each of the two events -+ * causes an invocation of this function, -+ * 3) this is the invocation of this function caused by the -+ * second event. This second invocation is actually useless, -+ * and we handle this fact by exiting immediately. More -+ * efficient or clearer solutions might possibly be adopted. -+ */ -+ if (entity->weight_counter) -+ return; -+ -+ while (*new) { -+ struct bfq_weight_counter *__counter = container_of(*new, -+ struct bfq_weight_counter, -+ weights_node); -+ parent = *new; -+ -+ if (entity->weight == __counter->weight) { -+ entity->weight_counter = __counter; -+ goto inc_counter; -+ } -+ if (entity->weight < __counter->weight) -+ new = &((*new)->rb_left); -+ else -+ new = &((*new)->rb_right); -+ } -+ -+ entity->weight_counter = kzalloc(sizeof(struct bfq_weight_counter), -+ GFP_ATOMIC); -+ -+ /* -+ * In the unlucky event of an allocation failure, we just -+ * exit. This will cause the weight of entity to not be -+ * considered in bfq_differentiated_weights, which, in its -+ * turn, causes the scenario to be deemed wrongly symmetric in -+ * case entity's weight would have been the only weight making -+ * the scenario asymmetric. On the bright side, no unbalance -+ * will however occur when entity becomes inactive again (the -+ * invocation of this function is triggered by an activation -+ * of entity). In fact, bfq_weights_tree_remove does nothing -+ * if !entity->weight_counter. -+ */ -+ if (unlikely(!entity->weight_counter)) -+ return; -+ -+ entity->weight_counter->weight = entity->weight; -+ rb_link_node(&entity->weight_counter->weights_node, parent, new); -+ rb_insert_color(&entity->weight_counter->weights_node, root); -+ -+inc_counter: -+ entity->weight_counter->num_active++; -+} -+ -+/* -+ * Decrement the weight counter associated with the entity, and, if the -+ * counter reaches 0, remove the counter from the tree. -+ * See the comments to the function bfq_weights_tree_add() for considerations -+ * about overhead. -+ */ -+static void bfq_weights_tree_remove(struct bfq_data *bfqd, -+ struct bfq_entity *entity, -+ struct rb_root *root) -+{ -+ if (!entity->weight_counter) -+ return; -+ -+ BUG_ON(RB_EMPTY_ROOT(root)); -+ BUG_ON(entity->weight_counter->weight != entity->weight); -+ -+ BUG_ON(!entity->weight_counter->num_active); -+ entity->weight_counter->num_active--; -+ if (entity->weight_counter->num_active > 0) -+ goto reset_entity_pointer; -+ -+ rb_erase(&entity->weight_counter->weights_node, root); -+ kfree(entity->weight_counter); -+ -+reset_entity_pointer: -+ entity->weight_counter = NULL; -+} -+ -+/* -+ * Return expired entry, or NULL to just start from scratch in rbtree. -+ */ -+static struct request *bfq_check_fifo(struct bfq_queue *bfqq, -+ struct request *last) -+{ -+ struct request *rq; -+ -+ if (bfq_bfqq_fifo_expire(bfqq)) -+ return NULL; -+ -+ bfq_mark_bfqq_fifo_expire(bfqq); -+ -+ rq = rq_entry_fifo(bfqq->fifo.next); -+ -+ if (rq == last || ktime_get_ns() < rq->fifo_time) -+ return NULL; -+ -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "check_fifo: returned %p", rq); -+ BUG_ON(RB_EMPTY_NODE(&rq->rb_node)); -+ return rq; -+} -+ -+static struct request *bfq_find_next_rq(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq, -+ struct request *last) -+{ -+ struct rb_node *rbnext = rb_next(&last->rb_node); -+ struct rb_node *rbprev = rb_prev(&last->rb_node); -+ struct request *next, *prev = NULL; -+ -+ BUG_ON(list_empty(&bfqq->fifo)); -+ -+ /* Follow expired path, else get first next available. */ -+ next = bfq_check_fifo(bfqq, last); -+ if (next) { -+ BUG_ON(next == last); -+ return next; -+ } -+ -+ BUG_ON(RB_EMPTY_NODE(&last->rb_node)); -+ -+ if (rbprev) -+ prev = rb_entry_rq(rbprev); -+ -+ if (rbnext) -+ next = rb_entry_rq(rbnext); -+ else { -+ rbnext = rb_first(&bfqq->sort_list); -+ if (rbnext && rbnext != &last->rb_node) -+ next = rb_entry_rq(rbnext); -+ } -+ -+ return bfq_choose_req(bfqd, next, prev, blk_rq_pos(last)); -+} -+ -+/* see the definition of bfq_async_charge_factor for details */ -+static unsigned long bfq_serv_to_charge(struct request *rq, -+ struct bfq_queue *bfqq) -+{ -+ if (bfq_bfqq_sync(bfqq) || bfqq->wr_coeff > 1) -+ return blk_rq_sectors(rq); -+ -+ /* -+ * If there are no weight-raised queues, then amplify service -+ * by just the async charge factor; otherwise amplify service -+ * by twice the async charge factor, to further reduce latency -+ * for weight-raised queues. -+ */ -+ if (bfqq->bfqd->wr_busy_queues == 0) -+ return blk_rq_sectors(rq) * bfq_async_charge_factor; -+ -+ return blk_rq_sectors(rq) * 2 * bfq_async_charge_factor; -+} -+ -+/** -+ * bfq_updated_next_req - update the queue after a new next_rq selection. -+ * @bfqd: the device data the queue belongs to. -+ * @bfqq: the queue to update. -+ * -+ * If the first request of a queue changes we make sure that the queue -+ * has enough budget to serve at least its first request (if the -+ * request has grown). We do this because if the queue has not enough -+ * budget for its first request, it has to go through two dispatch -+ * rounds to actually get it dispatched. -+ */ -+static void bfq_updated_next_req(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq) -+{ -+ struct bfq_entity *entity = &bfqq->entity; -+ struct bfq_service_tree *st = bfq_entity_service_tree(entity); -+ struct request *next_rq = bfqq->next_rq; -+ unsigned long new_budget; -+ -+ if (!next_rq) -+ return; -+ -+ if (bfqq == bfqd->in_service_queue) -+ /* -+ * In order not to break guarantees, budgets cannot be -+ * changed after an entity has been selected. -+ */ -+ return; -+ -+ BUG_ON(entity->tree != &st->active); -+ BUG_ON(entity == entity->sched_data->in_service_entity); -+ -+ new_budget = max_t(unsigned long, bfqq->max_budget, -+ bfq_serv_to_charge(next_rq, bfqq)); -+ if (entity->budget != new_budget) { -+ entity->budget = new_budget; -+ bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu", -+ new_budget); -+ bfq_requeue_bfqq(bfqd, bfqq); -+ } -+} -+ -+static unsigned int bfq_wr_duration(struct bfq_data *bfqd) -+{ -+ u64 dur; -+ -+ if (bfqd->bfq_wr_max_time > 0) -+ return bfqd->bfq_wr_max_time; -+ -+ dur = bfqd->RT_prod; -+ do_div(dur, bfqd->peak_rate); -+ -+ /* -+ * Limit duration between 3 and 13 seconds. Tests show that -+ * higher values than 13 seconds often yield the opposite of -+ * the desired result, i.e., worsen responsiveness by letting -+ * non-interactive and non-soft-real-time applications -+ * preserve weight raising for a too long time interval. -+ * -+ * On the other end, lower values than 3 seconds make it -+ * difficult for most interactive tasks to complete their jobs -+ * before weight-raising finishes. -+ */ -+ if (dur > msecs_to_jiffies(13000)) -+ dur = msecs_to_jiffies(13000); -+ else if (dur < msecs_to_jiffies(3000)) -+ dur = msecs_to_jiffies(3000); -+ -+ return dur; -+} -+ -+static void -+bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd, -+ struct bfq_io_cq *bic, bool bfq_already_existing) -+{ -+ unsigned int old_wr_coeff; -+ bool busy = bfq_already_existing && bfq_bfqq_busy(bfqq); -+ -+ if (bic->saved_idle_window) -+ bfq_mark_bfqq_idle_window(bfqq); -+ else -+ bfq_clear_bfqq_idle_window(bfqq); -+ -+ if (bic->saved_IO_bound) -+ bfq_mark_bfqq_IO_bound(bfqq); -+ else -+ bfq_clear_bfqq_IO_bound(bfqq); -+ -+ if (unlikely(busy)) -+ old_wr_coeff = bfqq->wr_coeff; -+ -+ bfqq->wr_coeff = bic->saved_wr_coeff; -+ bfqq->wr_start_at_switch_to_srt = bic->saved_wr_start_at_switch_to_srt; -+ BUG_ON(time_is_after_jiffies(bfqq->wr_start_at_switch_to_srt)); -+ bfqq->last_wr_start_finish = bic->saved_last_wr_start_finish; -+ bfqq->wr_cur_max_time = bic->saved_wr_cur_max_time; -+ BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish)); -+ -+ if (bfqq->wr_coeff > 1 && (bfq_bfqq_in_large_burst(bfqq) || -+ time_is_before_jiffies(bfqq->last_wr_start_finish + -+ bfqq->wr_cur_max_time))) { -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "resume state: switching off wr (%lu + %lu < %lu)", -+ bfqq->last_wr_start_finish, bfqq->wr_cur_max_time, -+ jiffies); -+ -+ bfqq->wr_coeff = 1; -+ } -+ -+ /* make sure weight will be updated, however we got here */ -+ bfqq->entity.prio_changed = 1; -+ -+ if (likely(!busy)) -+ return; -+ -+ if (old_wr_coeff == 1 && bfqq->wr_coeff > 1) { -+ bfqd->wr_busy_queues++; -+ BUG_ON(bfqd->wr_busy_queues > bfqd->busy_queues); -+ } else if (old_wr_coeff > 1 && bfqq->wr_coeff == 1) { -+ bfqd->wr_busy_queues--; -+ BUG_ON(bfqd->wr_busy_queues < 0); -+ } -+} -+ -+static int bfqq_process_refs(struct bfq_queue *bfqq) -+{ -+ int process_refs, io_refs; -+ -+ lockdep_assert_held(bfqq->bfqd->queue->queue_lock); -+ -+ io_refs = bfqq->allocated[READ] + bfqq->allocated[WRITE]; -+ process_refs = bfqq->ref - io_refs - bfqq->entity.on_st; -+ BUG_ON(process_refs < 0); -+ return process_refs; -+} -+ -+/* Empty burst list and add just bfqq (see comments to bfq_handle_burst) */ -+static void bfq_reset_burst_list(struct bfq_data *bfqd, struct bfq_queue *bfqq) -+{ -+ struct bfq_queue *item; -+ struct hlist_node *n; -+ -+ hlist_for_each_entry_safe(item, n, &bfqd->burst_list, burst_list_node) -+ hlist_del_init(&item->burst_list_node); -+ hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list); -+ bfqd->burst_size = 1; -+ bfqd->burst_parent_entity = bfqq->entity.parent; -+} -+ -+/* Add bfqq to the list of queues in current burst (see bfq_handle_burst) */ -+static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq) -+{ -+ /* Increment burst size to take into account also bfqq */ -+ bfqd->burst_size++; -+ -+ bfq_log_bfqq(bfqd, bfqq, "add_to_burst %d", bfqd->burst_size); -+ -+ BUG_ON(bfqd->burst_size > bfqd->bfq_large_burst_thresh); -+ -+ if (bfqd->burst_size == bfqd->bfq_large_burst_thresh) { -+ struct bfq_queue *pos, *bfqq_item; -+ struct hlist_node *n; -+ -+ /* -+ * Enough queues have been activated shortly after each -+ * other to consider this burst as large. -+ */ -+ bfqd->large_burst = true; -+ bfq_log_bfqq(bfqd, bfqq, "add_to_burst: large burst started"); -+ -+ /* -+ * We can now mark all queues in the burst list as -+ * belonging to a large burst. -+ */ -+ hlist_for_each_entry(bfqq_item, &bfqd->burst_list, -+ burst_list_node) { -+ bfq_mark_bfqq_in_large_burst(bfqq_item); -+ bfq_log_bfqq(bfqd, bfqq_item, "marked in large burst"); -+ } -+ bfq_mark_bfqq_in_large_burst(bfqq); -+ bfq_log_bfqq(bfqd, bfqq, "marked in large burst"); -+ -+ /* -+ * From now on, and until the current burst finishes, any -+ * new queue being activated shortly after the last queue -+ * was inserted in the burst can be immediately marked as -+ * belonging to a large burst. So the burst list is not -+ * needed any more. Remove it. -+ */ -+ hlist_for_each_entry_safe(pos, n, &bfqd->burst_list, -+ burst_list_node) -+ hlist_del_init(&pos->burst_list_node); -+ } else /* -+ * Burst not yet large: add bfqq to the burst list. Do -+ * not increment the ref counter for bfqq, because bfqq -+ * is removed from the burst list before freeing bfqq -+ * in put_queue. -+ */ -+ hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list); -+} -+ -+/* -+ * If many queues belonging to the same group happen to be created -+ * shortly after each other, then the processes associated with these -+ * queues have typically a common goal. In particular, bursts of queue -+ * creations are usually caused by services or applications that spawn -+ * many parallel threads/processes. Examples are systemd during boot, -+ * or git grep. To help these processes get their job done as soon as -+ * possible, it is usually better to not grant either weight-raising -+ * or device idling to their queues. -+ * -+ * In this comment we describe, firstly, the reasons why this fact -+ * holds, and, secondly, the next function, which implements the main -+ * steps needed to properly mark these queues so that they can then be -+ * treated in a different way. -+ * -+ * The above services or applications benefit mostly from a high -+ * throughput: the quicker the requests of the activated queues are -+ * cumulatively served, the sooner the target job of these queues gets -+ * completed. As a consequence, weight-raising any of these queues, -+ * which also implies idling the device for it, is almost always -+ * counterproductive. In most cases it just lowers throughput. -+ * -+ * On the other hand, a burst of queue creations may be caused also by -+ * the start of an application that does not consist of a lot of -+ * parallel I/O-bound threads. In fact, with a complex application, -+ * several short processes may need to be executed to start-up the -+ * application. In this respect, to start an application as quickly as -+ * possible, the best thing to do is in any case to privilege the I/O -+ * related to the application with respect to all other -+ * I/O. Therefore, the best strategy to start as quickly as possible -+ * an application that causes a burst of queue creations is to -+ * weight-raise all the queues created during the burst. This is the -+ * exact opposite of the best strategy for the other type of bursts. -+ * -+ * In the end, to take the best action for each of the two cases, the -+ * two types of bursts need to be distinguished. Fortunately, this -+ * seems relatively easy, by looking at the sizes of the bursts. In -+ * particular, we found a threshold such that only bursts with a -+ * larger size than that threshold are apparently caused by -+ * services or commands such as systemd or git grep. For brevity, -+ * hereafter we call just 'large' these bursts. BFQ *does not* -+ * weight-raise queues whose creation occurs in a large burst. In -+ * addition, for each of these queues BFQ performs or does not perform -+ * idling depending on which choice boosts the throughput more. The -+ * exact choice depends on the device and request pattern at -+ * hand. -+ * -+ * Unfortunately, false positives may occur while an interactive task -+ * is starting (e.g., an application is being started). The -+ * consequence is that the queues associated with the task do not -+ * enjoy weight raising as expected. Fortunately these false positives -+ * are very rare. They typically occur if some service happens to -+ * start doing I/O exactly when the interactive task starts. -+ * -+ * Turning back to the next function, it implements all the steps -+ * needed to detect the occurrence of a large burst and to properly -+ * mark all the queues belonging to it (so that they can then be -+ * treated in a different way). This goal is achieved by maintaining a -+ * "burst list" that holds, temporarily, the queues that belong to the -+ * burst in progress. The list is then used to mark these queues as -+ * belonging to a large burst if the burst does become large. The main -+ * steps are the following. -+ * -+ * . when the very first queue is created, the queue is inserted into the -+ * list (as it could be the first queue in a possible burst) -+ * -+ * . if the current burst has not yet become large, and a queue Q that does -+ * not yet belong to the burst is activated shortly after the last time -+ * at which a new queue entered the burst list, then the function appends -+ * Q to the burst list -+ * -+ * . if, as a consequence of the previous step, the burst size reaches -+ * the large-burst threshold, then -+ * -+ * . all the queues in the burst list are marked as belonging to a -+ * large burst -+ * -+ * . the burst list is deleted; in fact, the burst list already served -+ * its purpose (keeping temporarily track of the queues in a burst, -+ * so as to be able to mark them as belonging to a large burst in the -+ * previous sub-step), and now is not needed any more -+ * -+ * . the device enters a large-burst mode -+ * -+ * . if a queue Q that does not belong to the burst is created while -+ * the device is in large-burst mode and shortly after the last time -+ * at which a queue either entered the burst list or was marked as -+ * belonging to the current large burst, then Q is immediately marked -+ * as belonging to a large burst. -+ * -+ * . if a queue Q that does not belong to the burst is created a while -+ * later, i.e., not shortly after, than the last time at which a queue -+ * either entered the burst list or was marked as belonging to the -+ * current large burst, then the current burst is deemed as finished and: -+ * -+ * . the large-burst mode is reset if set -+ * -+ * . the burst list is emptied -+ * -+ * . Q is inserted in the burst list, as Q may be the first queue -+ * in a possible new burst (then the burst list contains just Q -+ * after this step). -+ */ -+static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq) -+{ -+ /* -+ * If bfqq is already in the burst list or is part of a large -+ * burst, or finally has just been split, then there is -+ * nothing else to do. -+ */ -+ if (!hlist_unhashed(&bfqq->burst_list_node) || -+ bfq_bfqq_in_large_burst(bfqq) || -+ time_is_after_eq_jiffies(bfqq->split_time + -+ msecs_to_jiffies(10))) -+ return; -+ -+ /* -+ * If bfqq's creation happens late enough, or bfqq belongs to -+ * a different group than the burst group, then the current -+ * burst is finished, and related data structures must be -+ * reset. -+ * -+ * In this respect, consider the special case where bfqq is -+ * the very first queue created after BFQ is selected for this -+ * device. In this case, last_ins_in_burst and -+ * burst_parent_entity are not yet significant when we get -+ * here. But it is easy to verify that, whether or not the -+ * following condition is true, bfqq will end up being -+ * inserted into the burst list. In particular the list will -+ * happen to contain only bfqq. And this is exactly what has -+ * to happen, as bfqq may be the first queue of the first -+ * burst. -+ */ -+ if (time_is_before_jiffies(bfqd->last_ins_in_burst + -+ bfqd->bfq_burst_interval) || -+ bfqq->entity.parent != bfqd->burst_parent_entity) { -+ bfqd->large_burst = false; -+ bfq_reset_burst_list(bfqd, bfqq); -+ bfq_log_bfqq(bfqd, bfqq, -+ "handle_burst: late activation or different group"); -+ goto end; -+ } -+ -+ /* -+ * If we get here, then bfqq is being activated shortly after the -+ * last queue. So, if the current burst is also large, we can mark -+ * bfqq as belonging to this large burst immediately. -+ */ -+ if (bfqd->large_burst) { -+ bfq_log_bfqq(bfqd, bfqq, "handle_burst: marked in burst"); -+ bfq_mark_bfqq_in_large_burst(bfqq); -+ goto end; -+ } -+ -+ /* -+ * If we get here, then a large-burst state has not yet been -+ * reached, but bfqq is being activated shortly after the last -+ * queue. Then we add bfqq to the burst. -+ */ -+ bfq_add_to_burst(bfqd, bfqq); -+end: -+ /* -+ * At this point, bfqq either has been added to the current -+ * burst or has caused the current burst to terminate and a -+ * possible new burst to start. In particular, in the second -+ * case, bfqq has become the first queue in the possible new -+ * burst. In both cases last_ins_in_burst needs to be moved -+ * forward. -+ */ -+ bfqd->last_ins_in_burst = jiffies; -+ -+} -+ -+static int bfq_bfqq_budget_left(struct bfq_queue *bfqq) -+{ -+ struct bfq_entity *entity = &bfqq->entity; -+ -+ return entity->budget - entity->service; -+} -+ -+/* -+ * If enough samples have been computed, return the current max budget -+ * stored in bfqd, which is dynamically updated according to the -+ * estimated disk peak rate; otherwise return the default max budget -+ */ -+static int bfq_max_budget(struct bfq_data *bfqd) -+{ -+ if (bfqd->budgets_assigned < bfq_stats_min_budgets) -+ return bfq_default_max_budget; -+ else -+ return bfqd->bfq_max_budget; -+} -+ -+/* -+ * Return min budget, which is a fraction of the current or default -+ * max budget (trying with 1/32) -+ */ -+static int bfq_min_budget(struct bfq_data *bfqd) -+{ -+ if (bfqd->budgets_assigned < bfq_stats_min_budgets) -+ return bfq_default_max_budget / 32; -+ else -+ return bfqd->bfq_max_budget / 32; -+} -+ -+static void bfq_bfqq_expire(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq, -+ bool compensate, -+ enum bfqq_expiration reason); -+ -+/* -+ * The next function, invoked after the input queue bfqq switches from -+ * idle to busy, updates the budget of bfqq. The function also tells -+ * whether the in-service queue should be expired, by returning -+ * true. The purpose of expiring the in-service queue is to give bfqq -+ * the chance to possibly preempt the in-service queue, and the reason -+ * for preempting the in-service queue is to achieve one of the two -+ * goals below. -+ * -+ * 1. Guarantee to bfqq its reserved bandwidth even if bfqq has -+ * expired because it has remained idle. In particular, bfqq may have -+ * expired for one of the following two reasons: -+ * -+ * - BFQ_BFQQ_NO_MORE_REQUEST bfqq did not enjoy any device idling and -+ * did not make it to issue a new request before its last request -+ * was served; -+ * -+ * - BFQ_BFQQ_TOO_IDLE bfqq did enjoy device idling, but did not issue -+ * a new request before the expiration of the idling-time. -+ * -+ * Even if bfqq has expired for one of the above reasons, the process -+ * associated with the queue may be however issuing requests greedily, -+ * and thus be sensitive to the bandwidth it receives (bfqq may have -+ * remained idle for other reasons: CPU high load, bfqq not enjoying -+ * idling, I/O throttling somewhere in the path from the process to -+ * the I/O scheduler, ...). But if, after every expiration for one of -+ * the above two reasons, bfqq has to wait for the service of at least -+ * one full budget of another queue before being served again, then -+ * bfqq is likely to get a much lower bandwidth or resource time than -+ * its reserved ones. To address this issue, two countermeasures need -+ * to be taken. -+ * -+ * First, the budget and the timestamps of bfqq need to be updated in -+ * a special way on bfqq reactivation: they need to be updated as if -+ * bfqq did not remain idle and did not expire. In fact, if they are -+ * computed as if bfqq expired and remained idle until reactivation, -+ * then the process associated with bfqq is treated as if, instead of -+ * being greedy, it stopped issuing requests when bfqq remained idle, -+ * and restarts issuing requests only on this reactivation. In other -+ * words, the scheduler does not help the process recover the "service -+ * hole" between bfqq expiration and reactivation. As a consequence, -+ * the process receives a lower bandwidth than its reserved one. In -+ * contrast, to recover this hole, the budget must be updated as if -+ * bfqq was not expired at all before this reactivation, i.e., it must -+ * be set to the value of the remaining budget when bfqq was -+ * expired. Along the same line, timestamps need to be assigned the -+ * value they had the last time bfqq was selected for service, i.e., -+ * before last expiration. Thus timestamps need to be back-shifted -+ * with respect to their normal computation (see [1] for more details -+ * on this tricky aspect). -+ * -+ * Secondly, to allow the process to recover the hole, the in-service -+ * queue must be expired too, to give bfqq the chance to preempt it -+ * immediately. In fact, if bfqq has to wait for a full budget of the -+ * in-service queue to be completed, then it may become impossible to -+ * let the process recover the hole, even if the back-shifted -+ * timestamps of bfqq are lower than those of the in-service queue. If -+ * this happens for most or all of the holes, then the process may not -+ * receive its reserved bandwidth. In this respect, it is worth noting -+ * that, being the service of outstanding requests unpreemptible, a -+ * little fraction of the holes may however be unrecoverable, thereby -+ * causing a little loss of bandwidth. -+ * -+ * The last important point is detecting whether bfqq does need this -+ * bandwidth recovery. In this respect, the next function deems the -+ * process associated with bfqq greedy, and thus allows it to recover -+ * the hole, if: 1) the process is waiting for the arrival of a new -+ * request (which implies that bfqq expired for one of the above two -+ * reasons), and 2) such a request has arrived soon. The first -+ * condition is controlled through the flag non_blocking_wait_rq, -+ * while the second through the flag arrived_in_time. If both -+ * conditions hold, then the function computes the budget in the -+ * above-described special way, and signals that the in-service queue -+ * should be expired. Timestamp back-shifting is done later in -+ * __bfq_activate_entity. -+ * -+ * 2. Reduce latency. Even if timestamps are not backshifted to let -+ * the process associated with bfqq recover a service hole, bfqq may -+ * however happen to have, after being (re)activated, a lower finish -+ * timestamp than the in-service queue. That is, the next budget of -+ * bfqq may have to be completed before the one of the in-service -+ * queue. If this is the case, then preempting the in-service queue -+ * allows this goal to be achieved, apart from the unpreemptible, -+ * outstanding requests mentioned above. -+ * -+ * Unfortunately, regardless of which of the above two goals one wants -+ * to achieve, service trees need first to be updated to know whether -+ * the in-service queue must be preempted. To have service trees -+ * correctly updated, the in-service queue must be expired and -+ * rescheduled, and bfqq must be scheduled too. This is one of the -+ * most costly operations (in future versions, the scheduling -+ * mechanism may be re-designed in such a way to make it possible to -+ * know whether preemption is needed without needing to update service -+ * trees). In addition, queue preemptions almost always cause random -+ * I/O, and thus loss of throughput. Because of these facts, the next -+ * function adopts the following simple scheme to avoid both costly -+ * operations and too frequent preemptions: it requests the expiration -+ * of the in-service queue (unconditionally) only for queues that need -+ * to recover a hole, or that either are weight-raised or deserve to -+ * be weight-raised. -+ */ -+static bool bfq_bfqq_update_budg_for_activation(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq, -+ bool arrived_in_time, -+ bool wr_or_deserves_wr) -+{ -+ struct bfq_entity *entity = &bfqq->entity; -+ -+ if (bfq_bfqq_non_blocking_wait_rq(bfqq) && arrived_in_time) { -+ /* -+ * We do not clear the flag non_blocking_wait_rq here, as -+ * the latter is used in bfq_activate_bfqq to signal -+ * that timestamps need to be back-shifted (and is -+ * cleared right after). -+ */ -+ -+ /* -+ * In next assignment we rely on that either -+ * entity->service or entity->budget are not updated -+ * on expiration if bfqq is empty (see -+ * __bfq_bfqq_recalc_budget). Thus both quantities -+ * remain unchanged after such an expiration, and the -+ * following statement therefore assigns to -+ * entity->budget the remaining budget on such an -+ * expiration. For clarity, entity->service is not -+ * updated on expiration in any case, and, in normal -+ * operation, is reset only when bfqq is selected for -+ * service (see bfq_get_next_queue). -+ */ -+ BUG_ON(bfqq->max_budget < 0); -+ entity->budget = min_t(unsigned long, -+ bfq_bfqq_budget_left(bfqq), -+ bfqq->max_budget); -+ -+ BUG_ON(entity->budget < 0); -+ return true; -+ } -+ -+ BUG_ON(bfqq->max_budget < 0); -+ entity->budget = max_t(unsigned long, bfqq->max_budget, -+ bfq_serv_to_charge(bfqq->next_rq, bfqq)); -+ BUG_ON(entity->budget < 0); -+ -+ bfq_clear_bfqq_non_blocking_wait_rq(bfqq); -+ return wr_or_deserves_wr; -+} -+ -+static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq, -+ unsigned int old_wr_coeff, -+ bool wr_or_deserves_wr, -+ bool interactive, -+ bool in_burst, -+ bool soft_rt) -+{ -+ if (old_wr_coeff == 1 && wr_or_deserves_wr) { -+ /* start a weight-raising period */ -+ if (interactive) { -+ bfqq->wr_coeff = bfqd->bfq_wr_coeff; -+ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); -+ } else { -+ bfqq->wr_start_at_switch_to_srt = jiffies; -+ bfqq->wr_coeff = bfqd->bfq_wr_coeff * -+ BFQ_SOFTRT_WEIGHT_FACTOR; -+ bfqq->wr_cur_max_time = -+ bfqd->bfq_wr_rt_max_time; -+ } -+ /* -+ * If needed, further reduce budget to make sure it is -+ * close to bfqq's backlog, so as to reduce the -+ * scheduling-error component due to a too large -+ * budget. Do not care about throughput consequences, -+ * but only about latency. Finally, do not assign a -+ * too small budget either, to avoid increasing -+ * latency by causing too frequent expirations. -+ */ -+ bfqq->entity.budget = min_t(unsigned long, -+ bfqq->entity.budget, -+ 2 * bfq_min_budget(bfqd)); -+ -+ bfq_log_bfqq(bfqd, bfqq, -+ "wrais starting at %lu, rais_max_time %u", -+ jiffies, -+ jiffies_to_msecs(bfqq->wr_cur_max_time)); -+ } else if (old_wr_coeff > 1) { -+ if (interactive) { /* update wr coeff and duration */ -+ bfqq->wr_coeff = bfqd->bfq_wr_coeff; -+ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); -+ } else if (in_burst) { -+ bfqq->wr_coeff = 1; -+ bfq_log_bfqq(bfqd, bfqq, -+ "wrais ending at %lu, rais_max_time %u", -+ jiffies, -+ jiffies_to_msecs(bfqq-> -+ wr_cur_max_time)); -+ } else if (soft_rt) { -+ /* -+ * The application is now or still meeting the -+ * requirements for being deemed soft rt. We -+ * can then correctly and safely (re)charge -+ * the weight-raising duration for the -+ * application with the weight-raising -+ * duration for soft rt applications. -+ * -+ * In particular, doing this recharge now, i.e., -+ * before the weight-raising period for the -+ * application finishes, reduces the probability -+ * of the following negative scenario: -+ * 1) the weight of a soft rt application is -+ * raised at startup (as for any newly -+ * created application), -+ * 2) since the application is not interactive, -+ * at a certain time weight-raising is -+ * stopped for the application, -+ * 3) at that time the application happens to -+ * still have pending requests, and hence -+ * is destined to not have a chance to be -+ * deemed soft rt before these requests are -+ * completed (see the comments to the -+ * function bfq_bfqq_softrt_next_start() -+ * for details on soft rt detection), -+ * 4) these pending requests experience a high -+ * latency because the application is not -+ * weight-raised while they are pending. -+ */ -+ if (bfqq->wr_cur_max_time != -+ bfqd->bfq_wr_rt_max_time) { -+ bfqq->wr_start_at_switch_to_srt = -+ bfqq->last_wr_start_finish; -+ BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish)); -+ -+ bfqq->wr_cur_max_time = -+ bfqd->bfq_wr_rt_max_time; -+ bfqq->wr_coeff = bfqd->bfq_wr_coeff * -+ BFQ_SOFTRT_WEIGHT_FACTOR; -+ bfq_log_bfqq(bfqd, bfqq, -+ "switching to soft_rt wr"); -+ } else -+ bfq_log_bfqq(bfqd, bfqq, -+ "moving forward soft_rt wr duration"); -+ bfqq->last_wr_start_finish = jiffies; -+ } -+ } -+} -+ -+static bool bfq_bfqq_idle_for_long_time(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq) -+{ -+ return bfqq->dispatched == 0 && -+ time_is_before_jiffies( -+ bfqq->budget_timeout + -+ bfqd->bfq_wr_min_idle_time); -+} -+ -+static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq, -+ int old_wr_coeff, -+ struct request *rq, -+ bool *interactive) -+{ -+ bool soft_rt, in_burst, wr_or_deserves_wr, -+ bfqq_wants_to_preempt, -+ idle_for_long_time = bfq_bfqq_idle_for_long_time(bfqd, bfqq), -+ /* -+ * See the comments on -+ * bfq_bfqq_update_budg_for_activation for -+ * details on the usage of the next variable. -+ */ -+ arrived_in_time = ktime_get_ns() <= -+ RQ_BIC(rq)->ttime.last_end_request + -+ bfqd->bfq_slice_idle * 3; -+ -+ bfq_log_bfqq(bfqd, bfqq, -+ "bfq_add_request non-busy: " -+ "jiffies %lu, in_time %d, idle_long %d busyw %d " -+ "wr_coeff %u", -+ jiffies, arrived_in_time, -+ idle_for_long_time, -+ bfq_bfqq_non_blocking_wait_rq(bfqq), -+ old_wr_coeff); -+ -+ BUG_ON(bfqq->entity.budget < bfqq->entity.service); -+ -+ BUG_ON(bfqq == bfqd->in_service_queue); -+ bfqg_stats_update_io_add(bfqq_group(RQ_BFQQ(rq)), bfqq, rq->cmd_flags); -+ -+ /* -+ * bfqq deserves to be weight-raised if: -+ * - it is sync, -+ * - it does not belong to a large burst, -+ * - it has been idle for enough time or is soft real-time, -+ * - is linked to a bfq_io_cq (it is not shared in any sense) -+ */ -+ in_burst = bfq_bfqq_in_large_burst(bfqq); -+ soft_rt = bfqd->bfq_wr_max_softrt_rate > 0 && -+ !in_burst && -+ time_is_before_jiffies(bfqq->soft_rt_next_start); -+ *interactive = -+ !in_burst && -+ idle_for_long_time; -+ wr_or_deserves_wr = bfqd->low_latency && -+ (bfqq->wr_coeff > 1 || -+ (bfq_bfqq_sync(bfqq) && -+ bfqq->bic && (*interactive || soft_rt))); -+ -+ bfq_log_bfqq(bfqd, bfqq, -+ "bfq_add_request: " -+ "in_burst %d, " -+ "soft_rt %d (next %lu), inter %d, bic %p", -+ bfq_bfqq_in_large_burst(bfqq), soft_rt, -+ bfqq->soft_rt_next_start, -+ *interactive, -+ bfqq->bic); -+ -+ /* -+ * Using the last flag, update budget and check whether bfqq -+ * may want to preempt the in-service queue. -+ */ -+ bfqq_wants_to_preempt = -+ bfq_bfqq_update_budg_for_activation(bfqd, bfqq, -+ arrived_in_time, -+ wr_or_deserves_wr); -+ -+ /* -+ * If bfqq happened to be activated in a burst, but has been -+ * idle for much more than an interactive queue, then we -+ * assume that, in the overall I/O initiated in the burst, the -+ * I/O associated with bfqq is finished. So bfqq does not need -+ * to be treated as a queue belonging to a burst -+ * anymore. Accordingly, we reset bfqq's in_large_burst flag -+ * if set, and remove bfqq from the burst list if it's -+ * there. We do not decrement burst_size, because the fact -+ * that bfqq does not need to belong to the burst list any -+ * more does not invalidate the fact that bfqq was created in -+ * a burst. -+ */ -+ if (likely(!bfq_bfqq_just_created(bfqq)) && -+ idle_for_long_time && -+ time_is_before_jiffies( -+ bfqq->budget_timeout + -+ msecs_to_jiffies(10000))) { -+ hlist_del_init(&bfqq->burst_list_node); -+ bfq_clear_bfqq_in_large_burst(bfqq); -+ } -+ -+ bfq_clear_bfqq_just_created(bfqq); -+ -+ if (!bfq_bfqq_IO_bound(bfqq)) { -+ if (arrived_in_time) { -+ bfqq->requests_within_timer++; -+ if (bfqq->requests_within_timer >= -+ bfqd->bfq_requests_within_timer) -+ bfq_mark_bfqq_IO_bound(bfqq); -+ } else -+ bfqq->requests_within_timer = 0; -+ bfq_log_bfqq(bfqd, bfqq, "requests in time %d", -+ bfqq->requests_within_timer); -+ } -+ -+ if (bfqd->low_latency) { -+ if (unlikely(time_is_after_jiffies(bfqq->split_time))) -+ /* wraparound */ -+ bfqq->split_time = -+ jiffies - bfqd->bfq_wr_min_idle_time - 1; -+ -+ if (time_is_before_jiffies(bfqq->split_time + -+ bfqd->bfq_wr_min_idle_time)) { -+ bfq_update_bfqq_wr_on_rq_arrival(bfqd, bfqq, -+ old_wr_coeff, -+ wr_or_deserves_wr, -+ *interactive, -+ in_burst, -+ soft_rt); -+ -+ if (old_wr_coeff != bfqq->wr_coeff) -+ bfqq->entity.prio_changed = 1; -+ } -+ } -+ -+ bfqq->last_idle_bklogged = jiffies; -+ bfqq->service_from_backlogged = 0; -+ bfq_clear_bfqq_softrt_update(bfqq); -+ -+ bfq_add_bfqq_busy(bfqd, bfqq); -+ -+ /* -+ * Expire in-service queue only if preemption may be needed -+ * for guarantees. In this respect, the function -+ * next_queue_may_preempt just checks a simple, necessary -+ * condition, and not a sufficient condition based on -+ * timestamps. In fact, for the latter condition to be -+ * evaluated, timestamps would need first to be updated, and -+ * this operation is quite costly (see the comments on the -+ * function bfq_bfqq_update_budg_for_activation). -+ */ -+ if (bfqd->in_service_queue && bfqq_wants_to_preempt && -+ bfqd->in_service_queue->wr_coeff < bfqq->wr_coeff && -+ next_queue_may_preempt(bfqd)) { -+ struct bfq_queue *in_serv = -+ bfqd->in_service_queue; -+ BUG_ON(in_serv == bfqq); -+ -+ bfq_bfqq_expire(bfqd, bfqd->in_service_queue, -+ false, BFQ_BFQQ_PREEMPTED); -+ } -+} -+ -+static void bfq_add_request(struct request *rq) -+{ -+ struct bfq_queue *bfqq = RQ_BFQQ(rq); -+ struct bfq_data *bfqd = bfqq->bfqd; -+ struct request *next_rq, *prev; -+ unsigned int old_wr_coeff = bfqq->wr_coeff; -+ bool interactive = false; -+ -+ bfq_log_bfqq(bfqd, bfqq, "add_request: size %u %s", -+ blk_rq_sectors(rq), rq_is_sync(rq) ? "S" : "A"); -+ -+ if (bfqq->wr_coeff > 1) /* queue is being weight-raised */ -+ bfq_log_bfqq(bfqd, bfqq, -+ "raising period dur %u/%u msec, old coeff %u, w %d(%d)", -+ jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish), -+ jiffies_to_msecs(bfqq->wr_cur_max_time), -+ bfqq->wr_coeff, -+ bfqq->entity.weight, bfqq->entity.orig_weight); -+ -+ bfqq->queued[rq_is_sync(rq)]++; -+ bfqd->queued++; -+ -+ elv_rb_add(&bfqq->sort_list, rq); -+ -+ /* -+ * Check if this request is a better next-to-serve candidate. -+ */ -+ prev = bfqq->next_rq; -+ next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position); -+ BUG_ON(!next_rq); -+ bfqq->next_rq = next_rq; -+ -+ /* -+ * Adjust priority tree position, if next_rq changes. -+ */ -+ if (prev != bfqq->next_rq) -+ bfq_pos_tree_add_move(bfqd, bfqq); -+ -+ if (!bfq_bfqq_busy(bfqq)) /* switching to busy ... */ -+ bfq_bfqq_handle_idle_busy_switch(bfqd, bfqq, old_wr_coeff, -+ rq, &interactive); -+ else { -+ if (bfqd->low_latency && old_wr_coeff == 1 && !rq_is_sync(rq) && -+ time_is_before_jiffies( -+ bfqq->last_wr_start_finish + -+ bfqd->bfq_wr_min_inter_arr_async)) { -+ bfqq->wr_coeff = bfqd->bfq_wr_coeff; -+ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); -+ -+ bfqd->wr_busy_queues++; -+ BUG_ON(bfqd->wr_busy_queues > bfqd->busy_queues); -+ bfqq->entity.prio_changed = 1; -+ bfq_log_bfqq(bfqd, bfqq, -+ "non-idle wrais starting, " -+ "wr_max_time %u wr_busy %d", -+ jiffies_to_msecs(bfqq->wr_cur_max_time), -+ bfqd->wr_busy_queues); -+ } -+ if (prev != bfqq->next_rq) -+ bfq_updated_next_req(bfqd, bfqq); -+ } -+ -+ /* -+ * Assign jiffies to last_wr_start_finish in the following -+ * cases: -+ * -+ * . if bfqq is not going to be weight-raised, because, for -+ * non weight-raised queues, last_wr_start_finish stores the -+ * arrival time of the last request; as of now, this piece -+ * of information is used only for deciding whether to -+ * weight-raise async queues -+ * -+ * . if bfqq is not weight-raised, because, if bfqq is now -+ * switching to weight-raised, then last_wr_start_finish -+ * stores the time when weight-raising starts -+ * -+ * . if bfqq is interactive, because, regardless of whether -+ * bfqq is currently weight-raised, the weight-raising -+ * period must start or restart (this case is considered -+ * separately because it is not detected by the above -+ * conditions, if bfqq is already weight-raised) -+ * -+ * last_wr_start_finish has to be updated also if bfqq is soft -+ * real-time, because the weight-raising period is constantly -+ * restarted on idle-to-busy transitions for these queues, but -+ * this is already done in bfq_bfqq_handle_idle_busy_switch if -+ * needed. -+ */ -+ if (bfqd->low_latency && -+ (old_wr_coeff == 1 || bfqq->wr_coeff == 1 || interactive)) -+ bfqq->last_wr_start_finish = jiffies; -+} -+ -+static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd, -+ struct bio *bio) -+{ -+ struct task_struct *tsk = current; -+ struct bfq_io_cq *bic; -+ struct bfq_queue *bfqq; -+ -+ bic = bfq_bic_lookup(bfqd, tsk->io_context); -+ if (!bic) -+ return NULL; -+ -+ bfqq = bic_to_bfqq(bic, op_is_sync(bio->bi_opf)); -+ if (bfqq) -+ return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio)); -+ -+ return NULL; -+} -+ -+static sector_t get_sdist(sector_t last_pos, struct request *rq) -+{ -+ sector_t sdist = 0; -+ -+ if (last_pos) { -+ if (last_pos < blk_rq_pos(rq)) -+ sdist = blk_rq_pos(rq) - last_pos; -+ else -+ sdist = last_pos - blk_rq_pos(rq); -+ } -+ -+ return sdist; -+} -+ -+static void bfq_activate_request(struct request_queue *q, struct request *rq) -+{ -+ struct bfq_data *bfqd = q->elevator->elevator_data; -+ bfqd->rq_in_driver++; -+} -+ -+static void bfq_deactivate_request(struct request_queue *q, struct request *rq) -+{ -+ struct bfq_data *bfqd = q->elevator->elevator_data; -+ -+ BUG_ON(bfqd->rq_in_driver == 0); -+ bfqd->rq_in_driver--; -+} -+ -+static void bfq_remove_request(struct request *rq) -+{ -+ struct bfq_queue *bfqq = RQ_BFQQ(rq); -+ struct bfq_data *bfqd = bfqq->bfqd; -+ const int sync = rq_is_sync(rq); -+ -+ BUG_ON(bfqq->entity.service > bfqq->entity.budget && -+ bfqq == bfqd->in_service_queue); -+ -+ if (bfqq->next_rq == rq) { -+ bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq); -+ bfq_updated_next_req(bfqd, bfqq); -+ } -+ -+ if (rq->queuelist.prev != &rq->queuelist) -+ list_del_init(&rq->queuelist); -+ BUG_ON(bfqq->queued[sync] == 0); -+ bfqq->queued[sync]--; -+ bfqd->queued--; -+ elv_rb_del(&bfqq->sort_list, rq); -+ -+ if (RB_EMPTY_ROOT(&bfqq->sort_list)) { -+ bfqq->next_rq = NULL; -+ -+ BUG_ON(bfqq->entity.budget < 0); -+ -+ if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->in_service_queue) { -+ BUG_ON(bfqq->ref < 2); /* referred by rq and on tree */ -+ bfq_del_bfqq_busy(bfqd, bfqq, false); -+ /* -+ * bfqq emptied. In normal operation, when -+ * bfqq is empty, bfqq->entity.service and -+ * bfqq->entity.budget must contain, -+ * respectively, the service received and the -+ * budget used last time bfqq emptied. These -+ * facts do not hold in this case, as at least -+ * this last removal occurred while bfqq is -+ * not in service. To avoid inconsistencies, -+ * reset both bfqq->entity.service and -+ * bfqq->entity.budget, if bfqq has still a -+ * process that may issue I/O requests to it. -+ */ -+ bfqq->entity.budget = bfqq->entity.service = 0; -+ } -+ -+ /* -+ * Remove queue from request-position tree as it is empty. -+ */ -+ if (bfqq->pos_root) { -+ rb_erase(&bfqq->pos_node, bfqq->pos_root); -+ bfqq->pos_root = NULL; -+ } -+ } -+ -+ if (rq->cmd_flags & REQ_META) { -+ BUG_ON(bfqq->meta_pending == 0); -+ bfqq->meta_pending--; -+ } -+ bfqg_stats_update_io_remove(bfqq_group(bfqq), rq->cmd_flags); -+} -+ -+static enum elv_merge bfq_merge(struct request_queue *q, struct request **req, -+ struct bio *bio) -+{ -+ struct bfq_data *bfqd = q->elevator->elevator_data; -+ struct request *__rq; -+ -+ __rq = bfq_find_rq_fmerge(bfqd, bio); -+ if (__rq && elv_bio_merge_ok(__rq, bio)) { -+ *req = __rq; -+ return ELEVATOR_FRONT_MERGE; -+ } -+ -+ return ELEVATOR_NO_MERGE; -+} -+ -+static void bfq_merged_request(struct request_queue *q, struct request *req, -+ enum elv_merge type) -+{ -+ if (type == ELEVATOR_FRONT_MERGE && -+ rb_prev(&req->rb_node) && -+ blk_rq_pos(req) < -+ blk_rq_pos(container_of(rb_prev(&req->rb_node), -+ struct request, rb_node))) { -+ struct bfq_queue *bfqq = RQ_BFQQ(req); -+ struct bfq_data *bfqd = bfqq->bfqd; -+ struct request *prev, *next_rq; -+ -+ /* Reposition request in its sort_list */ -+ elv_rb_del(&bfqq->sort_list, req); -+ elv_rb_add(&bfqq->sort_list, req); -+ /* Choose next request to be served for bfqq */ -+ prev = bfqq->next_rq; -+ next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req, -+ bfqd->last_position); -+ BUG_ON(!next_rq); -+ bfqq->next_rq = next_rq; -+ /* -+ * If next_rq changes, update both the queue's budget to -+ * fit the new request and the queue's position in its -+ * rq_pos_tree. -+ */ -+ if (prev != bfqq->next_rq) { -+ bfq_updated_next_req(bfqd, bfqq); -+ bfq_pos_tree_add_move(bfqd, bfqq); -+ } -+ } -+} -+ -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+static void bfq_bio_merged(struct request_queue *q, struct request *req, -+ struct bio *bio) -+{ -+ bfqg_stats_update_io_merged(bfqq_group(RQ_BFQQ(req)), bio->bi_opf); -+} -+#endif -+ -+static void bfq_merged_requests(struct request_queue *q, struct request *rq, -+ struct request *next) -+{ -+ struct bfq_queue *bfqq = RQ_BFQQ(rq), *next_bfqq = RQ_BFQQ(next); -+ -+ /* -+ * If next and rq belong to the same bfq_queue and next is older -+ * than rq, then reposition rq in the fifo (by substituting next -+ * with rq). Otherwise, if next and rq belong to different -+ * bfq_queues, never reposition rq: in fact, we would have to -+ * reposition it with respect to next's position in its own fifo, -+ * which would most certainly be too expensive with respect to -+ * the benefits. -+ */ -+ if (bfqq == next_bfqq && -+ !list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && -+ next->fifo_time < rq->fifo_time) { -+ list_del_init(&rq->queuelist); -+ list_replace_init(&next->queuelist, &rq->queuelist); -+ rq->fifo_time = next->fifo_time; -+ } -+ -+ if (bfqq->next_rq == next) -+ bfqq->next_rq = rq; -+ -+ bfq_remove_request(next); -+ bfqg_stats_update_io_merged(bfqq_group(bfqq), next->cmd_flags); -+} -+ -+/* Must be called with bfqq != NULL */ -+static void bfq_bfqq_end_wr(struct bfq_queue *bfqq) -+{ -+ BUG_ON(!bfqq); -+ -+ if (bfq_bfqq_busy(bfqq)) { -+ bfqq->bfqd->wr_busy_queues--; -+ BUG_ON(bfqq->bfqd->wr_busy_queues < 0); -+ } -+ bfqq->wr_coeff = 1; -+ bfqq->wr_cur_max_time = 0; -+ bfqq->last_wr_start_finish = jiffies; -+ /* -+ * Trigger a weight change on the next invocation of -+ * __bfq_entity_update_weight_prio. -+ */ -+ bfqq->entity.prio_changed = 1; -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "end_wr: wrais ending at %lu, rais_max_time %u", -+ bfqq->last_wr_start_finish, -+ jiffies_to_msecs(bfqq->wr_cur_max_time)); -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "end_wr: wr_busy %d", -+ bfqq->bfqd->wr_busy_queues); -+} -+ -+static void bfq_end_wr_async_queues(struct bfq_data *bfqd, -+ struct bfq_group *bfqg) -+{ -+ int i, j; -+ -+ for (i = 0; i < 2; i++) -+ for (j = 0; j < IOPRIO_BE_NR; j++) -+ if (bfqg->async_bfqq[i][j]) -+ bfq_bfqq_end_wr(bfqg->async_bfqq[i][j]); -+ if (bfqg->async_idle_bfqq) -+ bfq_bfqq_end_wr(bfqg->async_idle_bfqq); -+} -+ -+static void bfq_end_wr(struct bfq_data *bfqd) -+{ -+ struct bfq_queue *bfqq; -+ -+ spin_lock_irq(bfqd->queue->queue_lock); -+ -+ list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list) -+ bfq_bfqq_end_wr(bfqq); -+ list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list) -+ bfq_bfqq_end_wr(bfqq); -+ bfq_end_wr_async(bfqd); -+ -+ spin_unlock_irq(bfqd->queue->queue_lock); -+} -+ -+static sector_t bfq_io_struct_pos(void *io_struct, bool request) -+{ -+ if (request) -+ return blk_rq_pos(io_struct); -+ else -+ return ((struct bio *)io_struct)->bi_iter.bi_sector; -+} -+ -+static int bfq_rq_close_to_sector(void *io_struct, bool request, -+ sector_t sector) -+{ -+ return abs(bfq_io_struct_pos(io_struct, request) - sector) <= -+ BFQQ_CLOSE_THR; -+} -+ -+static struct bfq_queue *bfqq_find_close(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq, -+ sector_t sector) -+{ -+ struct rb_root *root = &bfq_bfqq_to_bfqg(bfqq)->rq_pos_tree; -+ struct rb_node *parent, *node; -+ struct bfq_queue *__bfqq; -+ -+ if (RB_EMPTY_ROOT(root)) -+ return NULL; -+ -+ /* -+ * First, if we find a request starting at the end of the last -+ * request, choose it. -+ */ -+ __bfqq = bfq_rq_pos_tree_lookup(bfqd, root, sector, &parent, NULL); -+ if (__bfqq) -+ return __bfqq; -+ -+ /* -+ * If the exact sector wasn't found, the parent of the NULL leaf -+ * will contain the closest sector (rq_pos_tree sorted by -+ * next_request position). -+ */ -+ __bfqq = rb_entry(parent, struct bfq_queue, pos_node); -+ if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector)) -+ return __bfqq; -+ -+ if (blk_rq_pos(__bfqq->next_rq) < sector) -+ node = rb_next(&__bfqq->pos_node); -+ else -+ node = rb_prev(&__bfqq->pos_node); -+ if (!node) -+ return NULL; -+ -+ __bfqq = rb_entry(node, struct bfq_queue, pos_node); -+ if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector)) -+ return __bfqq; -+ -+ return NULL; -+} -+ -+static struct bfq_queue *bfq_find_close_cooperator(struct bfq_data *bfqd, -+ struct bfq_queue *cur_bfqq, -+ sector_t sector) -+{ -+ struct bfq_queue *bfqq; -+ -+ /* -+ * We shall notice if some of the queues are cooperating, -+ * e.g., working closely on the same area of the device. In -+ * that case, we can group them together and: 1) don't waste -+ * time idling, and 2) serve the union of their requests in -+ * the best possible order for throughput. -+ */ -+ bfqq = bfqq_find_close(bfqd, cur_bfqq, sector); -+ if (!bfqq || bfqq == cur_bfqq) -+ return NULL; -+ -+ return bfqq; -+} -+ -+static struct bfq_queue * -+bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) -+{ -+ int process_refs, new_process_refs; -+ struct bfq_queue *__bfqq; -+ -+ /* -+ * If there are no process references on the new_bfqq, then it is -+ * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain -+ * may have dropped their last reference (not just their last process -+ * reference). -+ */ -+ if (!bfqq_process_refs(new_bfqq)) -+ return NULL; -+ -+ /* Avoid a circular list and skip interim queue merges. */ -+ while ((__bfqq = new_bfqq->new_bfqq)) { -+ if (__bfqq == bfqq) -+ return NULL; -+ new_bfqq = __bfqq; -+ } -+ -+ process_refs = bfqq_process_refs(bfqq); -+ new_process_refs = bfqq_process_refs(new_bfqq); -+ /* -+ * If the process for the bfqq has gone away, there is no -+ * sense in merging the queues. -+ */ -+ if (process_refs == 0 || new_process_refs == 0) -+ return NULL; -+ -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d", -+ new_bfqq->pid); -+ -+ /* -+ * Merging is just a redirection: the requests of the process -+ * owning one of the two queues are redirected to the other queue. -+ * The latter queue, in its turn, is set as shared if this is the -+ * first time that the requests of some process are redirected to -+ * it. -+ * -+ * We redirect bfqq to new_bfqq and not the opposite, because we -+ * are in the context of the process owning bfqq, hence we have -+ * the io_cq of this process. So we can immediately configure this -+ * io_cq to redirect the requests of the process to new_bfqq. -+ * -+ * NOTE, even if new_bfqq coincides with the in-service queue, the -+ * io_cq of new_bfqq is not available, because, if the in-service -+ * queue is shared, bfqd->in_service_bic may not point to the -+ * io_cq of the in-service queue. -+ * Redirecting the requests of the process owning bfqq to the -+ * currently in-service queue is in any case the best option, as -+ * we feed the in-service queue with new requests close to the -+ * last request served and, by doing so, hopefully increase the -+ * throughput. -+ */ -+ bfqq->new_bfqq = new_bfqq; -+ new_bfqq->ref += process_refs; -+ return new_bfqq; -+} -+ -+static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq, -+ struct bfq_queue *new_bfqq) -+{ -+ if (bfq_class_idle(bfqq) || bfq_class_idle(new_bfqq) || -+ (bfqq->ioprio_class != new_bfqq->ioprio_class)) -+ return false; -+ -+ /* -+ * If either of the queues has already been detected as seeky, -+ * then merging it with the other queue is unlikely to lead to -+ * sequential I/O. -+ */ -+ if (BFQQ_SEEKY(bfqq) || BFQQ_SEEKY(new_bfqq)) -+ return false; -+ -+ /* -+ * Interleaved I/O is known to be done by (some) applications -+ * only for reads, so it does not make sense to merge async -+ * queues. -+ */ -+ if (!bfq_bfqq_sync(bfqq) || !bfq_bfqq_sync(new_bfqq)) -+ return false; -+ -+ return true; -+} -+ -+/* -+ * If this function returns true, then bfqq cannot be merged. The idea -+ * is that true cooperation happens very early after processes start -+ * to do I/O. Usually, late cooperations are just accidental false -+ * positives. In case bfqq is weight-raised, such false positives -+ * would evidently degrade latency guarantees for bfqq. -+ */ -+static bool wr_from_too_long(struct bfq_queue *bfqq) -+{ -+ return bfqq->wr_coeff > 1 && -+ time_is_before_jiffies(bfqq->last_wr_start_finish + -+ msecs_to_jiffies(100)); -+} -+ -+/* -+ * Attempt to schedule a merge of bfqq with the currently in-service -+ * queue or with a close queue among the scheduled queues. Return -+ * NULL if no merge was scheduled, a pointer to the shared bfq_queue -+ * structure otherwise. -+ * -+ * The OOM queue is not allowed to participate to cooperation: in fact, since -+ * the requests temporarily redirected to the OOM queue could be redirected -+ * again to dedicated queues at any time, the state needed to correctly -+ * handle merging with the OOM queue would be quite complex and expensive -+ * to maintain. Besides, in such a critical condition as an out of memory, -+ * the benefits of queue merging may be little relevant, or even negligible. -+ * -+ * Weight-raised queues can be merged only if their weight-raising -+ * period has just started. In fact cooperating processes are usually -+ * started together. Thus, with this filter we avoid false positives -+ * that would jeopardize low-latency guarantees. -+ * -+ * WARNING: queue merging may impair fairness among non-weight raised -+ * queues, for at least two reasons: 1) the original weight of a -+ * merged queue may change during the merged state, 2) even being the -+ * weight the same, a merged queue may be bloated with many more -+ * requests than the ones produced by its originally-associated -+ * process. -+ */ -+static struct bfq_queue * -+bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, -+ void *io_struct, bool request) -+{ -+ struct bfq_queue *in_service_bfqq, *new_bfqq; -+ -+ if (bfqq->new_bfqq) -+ return bfqq->new_bfqq; -+ -+ if (io_struct && wr_from_too_long(bfqq) && -+ likely(bfqq != &bfqd->oom_bfqq)) -+ bfq_log_bfqq(bfqd, bfqq, -+ "would have looked for coop, but bfq%d wr", -+ bfqq->pid); -+ -+ if (!io_struct || -+ wr_from_too_long(bfqq) || -+ unlikely(bfqq == &bfqd->oom_bfqq)) -+ return NULL; -+ -+ /* If there is only one backlogged queue, don't search. */ -+ if (bfqd->busy_queues == 1) -+ return NULL; -+ -+ in_service_bfqq = bfqd->in_service_queue; -+ -+ if (in_service_bfqq && in_service_bfqq != bfqq && -+ bfqd->in_service_bic && wr_from_too_long(in_service_bfqq) -+ && likely(in_service_bfqq == &bfqd->oom_bfqq)) -+ bfq_log_bfqq(bfqd, bfqq, -+ "would have tried merge with in-service-queue, but wr"); -+ -+ if (!in_service_bfqq || in_service_bfqq == bfqq || -+ !bfqd->in_service_bic || wr_from_too_long(in_service_bfqq) || -+ unlikely(in_service_bfqq == &bfqd->oom_bfqq)) -+ goto check_scheduled; -+ -+ if (bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) && -+ bfqq->entity.parent == in_service_bfqq->entity.parent && -+ bfq_may_be_close_cooperator(bfqq, in_service_bfqq)) { -+ new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq); -+ if (new_bfqq) -+ return new_bfqq; -+ } -+ /* -+ * Check whether there is a cooperator among currently scheduled -+ * queues. The only thing we need is that the bio/request is not -+ * NULL, as we need it to establish whether a cooperator exists. -+ */ -+check_scheduled: -+ new_bfqq = bfq_find_close_cooperator(bfqd, bfqq, -+ bfq_io_struct_pos(io_struct, request)); -+ -+ BUG_ON(new_bfqq && bfqq->entity.parent != new_bfqq->entity.parent); -+ -+ if (new_bfqq && wr_from_too_long(new_bfqq) && -+ likely(new_bfqq != &bfqd->oom_bfqq) && -+ bfq_may_be_close_cooperator(bfqq, new_bfqq)) -+ bfq_log_bfqq(bfqd, bfqq, -+ "would have merged with bfq%d, but wr", -+ new_bfqq->pid); -+ -+ if (new_bfqq && !wr_from_too_long(new_bfqq) && -+ likely(new_bfqq != &bfqd->oom_bfqq) && -+ bfq_may_be_close_cooperator(bfqq, new_bfqq)) -+ return bfq_setup_merge(bfqq, new_bfqq); -+ -+ return NULL; -+} -+ -+static void bfq_bfqq_save_state(struct bfq_queue *bfqq) -+{ -+ struct bfq_io_cq *bic = bfqq->bic; -+ -+ /* -+ * If !bfqq->bic, the queue is already shared or its requests -+ * have already been redirected to a shared queue; both idle window -+ * and weight raising state have already been saved. Do nothing. -+ */ -+ if (!bic) -+ return; -+ -+ bic->saved_idle_window = bfq_bfqq_idle_window(bfqq); -+ bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq); -+ bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq); -+ bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node); -+ bic->saved_wr_coeff = bfqq->wr_coeff; -+ bic->saved_wr_start_at_switch_to_srt = bfqq->wr_start_at_switch_to_srt; -+ bic->saved_last_wr_start_finish = bfqq->last_wr_start_finish; -+ bic->saved_wr_cur_max_time = bfqq->wr_cur_max_time; -+ BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish)); -+} -+ -+static void bfq_get_bic_reference(struct bfq_queue *bfqq) -+{ -+ /* -+ * If bfqq->bic has a non-NULL value, the bic to which it belongs -+ * is about to begin using a shared bfq_queue. -+ */ -+ if (bfqq->bic) -+ atomic_long_inc(&bfqq->bic->icq.ioc->refcount); -+} -+ -+static void -+bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic, -+ struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) -+{ -+ bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu", -+ (unsigned long) new_bfqq->pid); -+ /* Save weight raising and idle window of the merged queues */ -+ bfq_bfqq_save_state(bfqq); -+ bfq_bfqq_save_state(new_bfqq); -+ if (bfq_bfqq_IO_bound(bfqq)) -+ bfq_mark_bfqq_IO_bound(new_bfqq); -+ bfq_clear_bfqq_IO_bound(bfqq); -+ -+ /* -+ * If bfqq is weight-raised, then let new_bfqq inherit -+ * weight-raising. To reduce false positives, neglect the case -+ * where bfqq has just been created, but has not yet made it -+ * to be weight-raised (which may happen because EQM may merge -+ * bfqq even before bfq_add_request is executed for the first -+ * time for bfqq). Handling this case would however be very -+ * easy, thanks to the flag just_created. -+ */ -+ if (new_bfqq->wr_coeff == 1 && bfqq->wr_coeff > 1) { -+ new_bfqq->wr_coeff = bfqq->wr_coeff; -+ new_bfqq->wr_cur_max_time = bfqq->wr_cur_max_time; -+ new_bfqq->last_wr_start_finish = bfqq->last_wr_start_finish; -+ new_bfqq->wr_start_at_switch_to_srt = -+ bfqq->wr_start_at_switch_to_srt; -+ if (bfq_bfqq_busy(new_bfqq)) { -+ bfqd->wr_busy_queues++; -+ BUG_ON(bfqd->wr_busy_queues > bfqd->busy_queues); -+ } -+ -+ new_bfqq->entity.prio_changed = 1; -+ bfq_log_bfqq(bfqd, new_bfqq, -+ "wr start after merge with %d, rais_max_time %u", -+ bfqq->pid, -+ jiffies_to_msecs(bfqq->wr_cur_max_time)); -+ } -+ -+ if (bfqq->wr_coeff > 1) { /* bfqq has given its wr to new_bfqq */ -+ bfqq->wr_coeff = 1; -+ bfqq->entity.prio_changed = 1; -+ if (bfq_bfqq_busy(bfqq)) { -+ bfqd->wr_busy_queues--; -+ BUG_ON(bfqd->wr_busy_queues < 0); -+ } -+ -+ } -+ -+ bfq_log_bfqq(bfqd, new_bfqq, "merge_bfqqs: wr_busy %d", -+ bfqd->wr_busy_queues); -+ -+ /* -+ * Grab a reference to the bic, to prevent it from being destroyed -+ * before being possibly touched by a bfq_split_bfqq(). -+ */ -+ bfq_get_bic_reference(bfqq); -+ bfq_get_bic_reference(new_bfqq); -+ /* -+ * Merge queues (that is, let bic redirect its requests to new_bfqq) -+ */ -+ bic_set_bfqq(bic, new_bfqq, 1); -+ bfq_mark_bfqq_coop(new_bfqq); -+ /* -+ * new_bfqq now belongs to at least two bics (it is a shared queue): -+ * set new_bfqq->bic to NULL. bfqq either: -+ * - does not belong to any bic any more, and hence bfqq->bic must -+ * be set to NULL, or -+ * - is a queue whose owning bics have already been redirected to a -+ * different queue, hence the queue is destined to not belong to -+ * any bic soon and bfqq->bic is already NULL (therefore the next -+ * assignment causes no harm). -+ */ -+ new_bfqq->bic = NULL; -+ bfqq->bic = NULL; -+ /* release process reference to bfqq */ -+ bfq_put_queue(bfqq); -+} -+ -+static int bfq_allow_bio_merge(struct request_queue *q, struct request *rq, -+ struct bio *bio) -+{ -+ struct bfq_data *bfqd = q->elevator->elevator_data; -+ bool is_sync = op_is_sync(bio->bi_opf); -+ struct bfq_io_cq *bic; -+ struct bfq_queue *bfqq, *new_bfqq; -+ -+ /* -+ * Disallow merge of a sync bio into an async request. -+ */ -+ if (is_sync && !rq_is_sync(rq)) -+ return false; -+ -+ /* -+ * Lookup the bfqq that this bio will be queued with. Allow -+ * merge only if rq is queued there. -+ * Queue lock is held here. -+ */ -+ bic = bfq_bic_lookup(bfqd, current->io_context); -+ if (!bic) -+ return false; -+ -+ bfqq = bic_to_bfqq(bic, is_sync); -+ /* -+ * We take advantage of this function to perform an early merge -+ * of the queues of possible cooperating processes. -+ */ -+ if (bfqq) { -+ new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false); -+ if (new_bfqq) { -+ bfq_merge_bfqqs(bfqd, bic, bfqq, new_bfqq); -+ /* -+ * If we get here, the bio will be queued in the -+ * shared queue, i.e., new_bfqq, so use new_bfqq -+ * to decide whether bio and rq can be merged. -+ */ -+ bfqq = new_bfqq; -+ } -+ } -+ -+ return bfqq == RQ_BFQQ(rq); -+} -+ -+static int bfq_allow_rq_merge(struct request_queue *q, struct request *rq, -+ struct request *next) -+{ -+ return RQ_BFQQ(rq) == RQ_BFQQ(next); -+} -+ -+/* -+ * Set the maximum time for the in-service queue to consume its -+ * budget. This prevents seeky processes from lowering the throughput. -+ * In practice, a time-slice service scheme is used with seeky -+ * processes. -+ */ -+static void bfq_set_budget_timeout(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq) -+{ -+ unsigned int timeout_coeff; -+ -+ if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time) -+ timeout_coeff = 1; -+ else -+ timeout_coeff = bfqq->entity.weight / bfqq->entity.orig_weight; -+ -+ bfqd->last_budget_start = ktime_get(); -+ -+ bfqq->budget_timeout = jiffies + -+ bfqd->bfq_timeout * timeout_coeff; -+ -+ bfq_log_bfqq(bfqd, bfqq, "set budget_timeout %u", -+ jiffies_to_msecs(bfqd->bfq_timeout * timeout_coeff)); -+} -+ -+static void __bfq_set_in_service_queue(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq) -+{ -+ if (bfqq) { -+ bfqg_stats_update_avg_queue_size(bfqq_group(bfqq)); -+ bfq_mark_bfqq_must_alloc(bfqq); -+ bfq_clear_bfqq_fifo_expire(bfqq); -+ -+ bfqd->budgets_assigned = (bfqd->budgets_assigned*7 + 256) / 8; -+ -+ BUG_ON(bfqq == bfqd->in_service_queue); -+ BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list)); -+ -+ if (time_is_before_jiffies(bfqq->last_wr_start_finish) && -+ bfqq->wr_coeff > 1 && -+ bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time && -+ time_is_before_jiffies(bfqq->budget_timeout)) { -+ /* -+ * For soft real-time queues, move the start -+ * of the weight-raising period forward by the -+ * time the queue has not received any -+ * service. Otherwise, a relatively long -+ * service delay is likely to cause the -+ * weight-raising period of the queue to end, -+ * because of the short duration of the -+ * weight-raising period of a soft real-time -+ * queue. It is worth noting that this move -+ * is not so dangerous for the other queues, -+ * because soft real-time queues are not -+ * greedy. -+ * -+ * To not add a further variable, we use the -+ * overloaded field budget_timeout to -+ * determine for how long the queue has not -+ * received service, i.e., how much time has -+ * elapsed since the queue expired. However, -+ * this is a little imprecise, because -+ * budget_timeout is set to jiffies if bfqq -+ * not only expires, but also remains with no -+ * request. -+ */ -+ if (time_after(bfqq->budget_timeout, -+ bfqq->last_wr_start_finish)) -+ bfqq->last_wr_start_finish += -+ jiffies - bfqq->budget_timeout; -+ else -+ bfqq->last_wr_start_finish = jiffies; -+ -+ if (time_is_after_jiffies(bfqq->last_wr_start_finish)) { -+ pr_crit( -+ "BFQ WARNING:last %lu budget %lu jiffies %lu", -+ bfqq->last_wr_start_finish, -+ bfqq->budget_timeout, -+ jiffies); -+ pr_crit("diff %lu", jiffies - -+ max_t(unsigned long, -+ bfqq->last_wr_start_finish, -+ bfqq->budget_timeout)); -+ bfqq->last_wr_start_finish = jiffies; -+ } -+ } -+ -+ bfq_set_budget_timeout(bfqd, bfqq); -+ bfq_log_bfqq(bfqd, bfqq, -+ "set_in_service_queue, cur-budget = %d", -+ bfqq->entity.budget); -+ } else -+ bfq_log(bfqd, "set_in_service_queue: NULL"); -+ -+ bfqd->in_service_queue = bfqq; -+} -+ -+/* -+ * Get and set a new queue for service. -+ */ -+static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd) -+{ -+ struct bfq_queue *bfqq = bfq_get_next_queue(bfqd); -+ -+ __bfq_set_in_service_queue(bfqd, bfqq); -+ return bfqq; -+} -+ -+static void bfq_arm_slice_timer(struct bfq_data *bfqd) -+{ -+ struct bfq_queue *bfqq = bfqd->in_service_queue; -+ struct bfq_io_cq *bic; -+ u32 sl; -+ -+ BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list)); -+ -+ /* Processes have exited, don't wait. */ -+ bic = bfqd->in_service_bic; -+ if (!bic || atomic_read(&bic->icq.ioc->active_ref) == 0) -+ return; -+ -+ bfq_mark_bfqq_wait_request(bfqq); -+ -+ /* -+ * We don't want to idle for seeks, but we do want to allow -+ * fair distribution of slice time for a process doing back-to-back -+ * seeks. So allow a little bit of time for him to submit a new rq. -+ * -+ * To prevent processes with (partly) seeky workloads from -+ * being too ill-treated, grant them a small fraction of the -+ * assigned budget before reducing the waiting time to -+ * BFQ_MIN_TT. This happened to help reduce latency. -+ */ -+ sl = bfqd->bfq_slice_idle; -+ /* -+ * Unless the queue is being weight-raised or the scenario is -+ * asymmetric, grant only minimum idle time if the queue -+ * is seeky. A long idling is preserved for a weight-raised -+ * queue, or, more in general, in an asymemtric scenario, -+ * because a long idling is needed for guaranteeing to a queue -+ * its reserved share of the throughput (in particular, it is -+ * needed if the queue has a higher weight than some other -+ * queue). -+ */ -+ if (BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 && -+ bfq_symmetric_scenario(bfqd)) -+ sl = min_t(u32, sl, BFQ_MIN_TT); -+ -+ bfqd->last_idling_start = ktime_get(); -+ hrtimer_start(&bfqd->idle_slice_timer, ns_to_ktime(sl), -+ HRTIMER_MODE_REL); -+ bfqg_stats_set_start_idle_time(bfqq_group(bfqq)); -+ bfq_log(bfqd, "arm idle: %ld/%ld ms", -+ sl / NSEC_PER_MSEC, bfqd->bfq_slice_idle / NSEC_PER_MSEC); -+} -+ -+/* -+ * In autotuning mode, max_budget is dynamically recomputed as the -+ * amount of sectors transferred in timeout at the estimated peak -+ * rate. This enables BFQ to utilize a full timeslice with a full -+ * budget, even if the in-service queue is served at peak rate. And -+ * this maximises throughput with sequential workloads. -+ */ -+static unsigned long bfq_calc_max_budget(struct bfq_data *bfqd) -+{ -+ return (u64)bfqd->peak_rate * USEC_PER_MSEC * -+ jiffies_to_msecs(bfqd->bfq_timeout)>>BFQ_RATE_SHIFT; -+} -+ -+/* -+ * Update parameters related to throughput and responsiveness, as a -+ * function of the estimated peak rate. See comments on -+ * bfq_calc_max_budget(), and on T_slow and T_fast arrays. -+ */ -+static void update_thr_responsiveness_params(struct bfq_data *bfqd) -+{ -+ int dev_type = blk_queue_nonrot(bfqd->queue); -+ -+ if (bfqd->bfq_user_max_budget == 0) { -+ bfqd->bfq_max_budget = -+ bfq_calc_max_budget(bfqd); -+ BUG_ON(bfqd->bfq_max_budget < 0); -+ bfq_log(bfqd, "new max_budget = %d", -+ bfqd->bfq_max_budget); -+ } -+ -+ if (bfqd->device_speed == BFQ_BFQD_FAST && -+ bfqd->peak_rate < device_speed_thresh[dev_type]) { -+ bfqd->device_speed = BFQ_BFQD_SLOW; -+ bfqd->RT_prod = R_slow[dev_type] * -+ T_slow[dev_type]; -+ } else if (bfqd->device_speed == BFQ_BFQD_SLOW && -+ bfqd->peak_rate > device_speed_thresh[dev_type]) { -+ bfqd->device_speed = BFQ_BFQD_FAST; -+ bfqd->RT_prod = R_fast[dev_type] * -+ T_fast[dev_type]; -+ } -+ -+ bfq_log(bfqd, -+"dev_type %s dev_speed_class = %s (%llu sects/sec), thresh %llu setcs/sec", -+ dev_type == 0 ? "ROT" : "NONROT", -+ bfqd->device_speed == BFQ_BFQD_FAST ? "FAST" : "SLOW", -+ bfqd->device_speed == BFQ_BFQD_FAST ? -+ (USEC_PER_SEC*(u64)R_fast[dev_type])>>BFQ_RATE_SHIFT : -+ (USEC_PER_SEC*(u64)R_slow[dev_type])>>BFQ_RATE_SHIFT, -+ (USEC_PER_SEC*(u64)device_speed_thresh[dev_type])>> -+ BFQ_RATE_SHIFT); -+} -+ -+static void bfq_reset_rate_computation(struct bfq_data *bfqd, struct request *rq) -+{ -+ if (rq != NULL) { /* new rq dispatch now, reset accordingly */ -+ bfqd->last_dispatch = bfqd->first_dispatch = ktime_get_ns() ; -+ bfqd->peak_rate_samples = 1; -+ bfqd->sequential_samples = 0; -+ bfqd->tot_sectors_dispatched = bfqd->last_rq_max_size = -+ blk_rq_sectors(rq); -+ } else /* no new rq dispatched, just reset the number of samples */ -+ bfqd->peak_rate_samples = 0; /* full re-init on next disp. */ -+ -+ bfq_log(bfqd, -+ "reset_rate_computation at end, sample %u/%u tot_sects %llu", -+ bfqd->peak_rate_samples, bfqd->sequential_samples, -+ bfqd->tot_sectors_dispatched); -+} -+ -+static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq) -+{ -+ u32 rate, weight, divisor; -+ -+ /* -+ * For the convergence property to hold (see comments on -+ * bfq_update_peak_rate()) and for the assessment to be -+ * reliable, a minimum number of samples must be present, and -+ * a minimum amount of time must have elapsed. If not so, do -+ * not compute new rate. Just reset parameters, to get ready -+ * for a new evaluation attempt. -+ */ -+ if (bfqd->peak_rate_samples < BFQ_RATE_MIN_SAMPLES || -+ bfqd->delta_from_first < BFQ_RATE_MIN_INTERVAL) { -+ bfq_log(bfqd, -+ "update_rate_reset: only resetting, delta_first %lluus samples %d", -+ bfqd->delta_from_first>>10, bfqd->peak_rate_samples); -+ goto reset_computation; -+ } -+ -+ /* -+ * If a new request completion has occurred after last -+ * dispatch, then, to approximate the rate at which requests -+ * have been served by the device, it is more precise to -+ * extend the observation interval to the last completion. -+ */ -+ bfqd->delta_from_first = -+ max_t(u64, bfqd->delta_from_first, -+ bfqd->last_completion - bfqd->first_dispatch); -+ -+ BUG_ON(bfqd->delta_from_first == 0); -+ /* -+ * Rate computed in sects/usec, and not sects/nsec, for -+ * precision issues. -+ */ -+ rate = div64_ul(bfqd->tot_sectors_dispatched<delta_from_first, NSEC_PER_USEC)); -+ -+ bfq_log(bfqd, -+"update_rate_reset: tot_sects %llu delta_first %lluus rate %llu sects/s (%d)", -+ bfqd->tot_sectors_dispatched, bfqd->delta_from_first>>10, -+ ((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT), -+ rate > 20< 20M sectors/sec) -+ */ -+ if ((bfqd->sequential_samples < (3 * bfqd->peak_rate_samples)>>2 && -+ rate <= bfqd->peak_rate) || -+ rate > 20<peak_rate_samples, bfqd->sequential_samples, -+ ((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT), -+ ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT)); -+ goto reset_computation; -+ } else { -+ bfq_log(bfqd, -+ "update_rate_reset: do update, samples %u/%u rate/peak %llu/%llu", -+ bfqd->peak_rate_samples, bfqd->sequential_samples, -+ ((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT), -+ ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT)); -+ } -+ -+ /* -+ * We have to update the peak rate, at last! To this purpose, -+ * we use a low-pass filter. We compute the smoothing constant -+ * of the filter as a function of the 'weight' of the new -+ * measured rate. -+ * -+ * As can be seen in next formulas, we define this weight as a -+ * quantity proportional to how sequential the workload is, -+ * and to how long the observation time interval is. -+ * -+ * The weight runs from 0 to 8. The maximum value of the -+ * weight, 8, yields the minimum value for the smoothing -+ * constant. At this minimum value for the smoothing constant, -+ * the measured rate contributes for half of the next value of -+ * the estimated peak rate. -+ * -+ * So, the first step is to compute the weight as a function -+ * of how sequential the workload is. Note that the weight -+ * cannot reach 9, because bfqd->sequential_samples cannot -+ * become equal to bfqd->peak_rate_samples, which, in its -+ * turn, holds true because bfqd->sequential_samples is not -+ * incremented for the first sample. -+ */ -+ weight = (9 * bfqd->sequential_samples) / bfqd->peak_rate_samples; -+ -+ /* -+ * Second step: further refine the weight as a function of the -+ * duration of the observation interval. -+ */ -+ weight = min_t(u32, 8, -+ div_u64(weight * bfqd->delta_from_first, -+ BFQ_RATE_REF_INTERVAL)); -+ -+ /* -+ * Divisor ranging from 10, for minimum weight, to 2, for -+ * maximum weight. -+ */ -+ divisor = 10 - weight; -+ BUG_ON(divisor == 0); -+ -+ /* -+ * Finally, update peak rate: -+ * -+ * peak_rate = peak_rate * (divisor-1) / divisor + rate / divisor -+ */ -+ bfqd->peak_rate *= divisor-1; -+ bfqd->peak_rate /= divisor; -+ rate /= divisor; /* smoothing constant alpha = 1/divisor */ -+ -+ bfq_log(bfqd, -+ "update_rate_reset: divisor %d tmp_peak_rate %llu tmp_rate %u", -+ divisor, -+ ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT), -+ (u32)((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT)); -+ -+ BUG_ON(bfqd->peak_rate == 0); -+ BUG_ON(bfqd->peak_rate > 20<peak_rate += rate; -+ update_thr_responsiveness_params(bfqd); -+ BUG_ON(bfqd->peak_rate > 20<peak_rate_samples == 0) { /* first dispatch */ -+ bfq_log(bfqd, -+ "update_peak_rate: goto reset, samples %d", -+ bfqd->peak_rate_samples) ; -+ bfq_reset_rate_computation(bfqd, rq); -+ goto update_last_values; /* will add one sample */ -+ } -+ -+ /* -+ * Device idle for very long: the observation interval lasting -+ * up to this dispatch cannot be a valid observation interval -+ * for computing a new peak rate (similarly to the late- -+ * completion event in bfq_completed_request()). Go to -+ * update_rate_and_reset to have the following three steps -+ * taken: -+ * - close the observation interval at the last (previous) -+ * request dispatch or completion -+ * - compute rate, if possible, for that observation interval -+ * - start a new observation interval with this dispatch -+ */ -+ if (now_ns - bfqd->last_dispatch > 100*NSEC_PER_MSEC && -+ bfqd->rq_in_driver == 0) { -+ bfq_log(bfqd, -+"update_peak_rate: jumping to updating&resetting delta_last %lluus samples %d", -+ (now_ns - bfqd->last_dispatch)>>10, -+ bfqd->peak_rate_samples) ; -+ goto update_rate_and_reset; -+ } -+ -+ /* Update sampling information */ -+ bfqd->peak_rate_samples++; -+ -+ if ((bfqd->rq_in_driver > 0 || -+ now_ns - bfqd->last_completion < BFQ_MIN_TT) -+ && get_sdist(bfqd->last_position, rq) < BFQQ_SEEK_THR) -+ bfqd->sequential_samples++; -+ -+ bfqd->tot_sectors_dispatched += blk_rq_sectors(rq); -+ -+ /* Reset max observed rq size every 32 dispatches */ -+ if (likely(bfqd->peak_rate_samples % 32)) -+ bfqd->last_rq_max_size = -+ max_t(u32, blk_rq_sectors(rq), bfqd->last_rq_max_size); -+ else -+ bfqd->last_rq_max_size = blk_rq_sectors(rq); -+ -+ bfqd->delta_from_first = now_ns - bfqd->first_dispatch; -+ -+ bfq_log(bfqd, -+ "update_peak_rate: added samples %u/%u tot_sects %llu delta_first %lluus", -+ bfqd->peak_rate_samples, bfqd->sequential_samples, -+ bfqd->tot_sectors_dispatched, -+ bfqd->delta_from_first>>10); -+ -+ /* Target observation interval not yet reached, go on sampling */ -+ if (bfqd->delta_from_first < BFQ_RATE_REF_INTERVAL) -+ goto update_last_values; -+ -+update_rate_and_reset: -+ bfq_update_rate_reset(bfqd, rq); -+update_last_values: -+ bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq); -+ bfqd->last_dispatch = now_ns; -+ -+ bfq_log(bfqd, -+ "update_peak_rate: delta_first %lluus last_pos %llu peak_rate %llu", -+ (now_ns - bfqd->first_dispatch)>>10, -+ (unsigned long long) bfqd->last_position, -+ ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT)); -+ bfq_log(bfqd, -+ "update_peak_rate: samples at end %d", bfqd->peak_rate_samples); -+} -+ -+/* -+ * Move request from internal lists to the dispatch list of the request queue -+ */ -+static void bfq_dispatch_insert(struct request_queue *q, struct request *rq) -+{ -+ struct bfq_queue *bfqq = RQ_BFQQ(rq); -+ -+ /* -+ * For consistency, the next instruction should have been executed -+ * after removing the request from the queue and dispatching it. -+ * We execute instead this instruction before bfq_remove_request() -+ * (and hence introduce a temporary inconsistency), for efficiency. -+ * In fact, in a forced_dispatch, this prevents two counters related -+ * to bfqq->dispatched to risk to be uselessly decremented if bfqq -+ * is not in service, and then to be incremented again after -+ * incrementing bfqq->dispatched. -+ */ -+ bfqq->dispatched++; -+ bfq_update_peak_rate(q->elevator->elevator_data, rq); -+ -+ bfq_remove_request(rq); -+ elv_dispatch_sort(q, rq); -+} -+ -+static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq) -+{ -+ BUG_ON(bfqq != bfqd->in_service_queue); -+ -+ /* -+ * If this bfqq is shared between multiple processes, check -+ * to make sure that those processes are still issuing I/Os -+ * within the mean seek distance. If not, it may be time to -+ * break the queues apart again. -+ */ -+ if (bfq_bfqq_coop(bfqq) && BFQQ_SEEKY(bfqq)) -+ bfq_mark_bfqq_split_coop(bfqq); -+ -+ if (RB_EMPTY_ROOT(&bfqq->sort_list)) { -+ if (bfqq->dispatched == 0) -+ /* -+ * Overloading budget_timeout field to store -+ * the time at which the queue remains with no -+ * backlog and no outstanding request; used by -+ * the weight-raising mechanism. -+ */ -+ bfqq->budget_timeout = jiffies; -+ -+ bfq_del_bfqq_busy(bfqd, bfqq, true); -+ } else { -+ bfq_requeue_bfqq(bfqd, bfqq); -+ /* -+ * Resort priority tree of potential close cooperators. -+ */ -+ bfq_pos_tree_add_move(bfqd, bfqq); -+ } -+ -+ /* -+ * All in-service entities must have been properly deactivated -+ * or requeued before executing the next function, which -+ * resets all in-service entites as no more in service. -+ */ -+ __bfq_bfqd_reset_in_service(bfqd); -+} -+ -+/** -+ * __bfq_bfqq_recalc_budget - try to adapt the budget to the @bfqq behavior. -+ * @bfqd: device data. -+ * @bfqq: queue to update. -+ * @reason: reason for expiration. -+ * -+ * Handle the feedback on @bfqq budget at queue expiration. -+ * See the body for detailed comments. -+ */ -+static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq, -+ enum bfqq_expiration reason) -+{ -+ struct request *next_rq; -+ int budget, min_budget; -+ -+ BUG_ON(bfqq != bfqd->in_service_queue); -+ -+ min_budget = bfq_min_budget(bfqd); -+ -+ if (bfqq->wr_coeff == 1) -+ budget = bfqq->max_budget; -+ else /* -+ * Use a constant, low budget for weight-raised queues, -+ * to help achieve a low latency. Keep it slightly higher -+ * than the minimum possible budget, to cause a little -+ * bit fewer expirations. -+ */ -+ budget = 2 * min_budget; -+ -+ bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %d, budg left %d", -+ bfqq->entity.budget, bfq_bfqq_budget_left(bfqq)); -+ bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %d, min budg %d", -+ budget, bfq_min_budget(bfqd)); -+ bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d", -+ bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue)); -+ -+ if (bfq_bfqq_sync(bfqq) && bfqq->wr_coeff == 1) { -+ switch (reason) { -+ /* -+ * Caveat: in all the following cases we trade latency -+ * for throughput. -+ */ -+ case BFQ_BFQQ_TOO_IDLE: -+ /* -+ * This is the only case where we may reduce -+ * the budget: if there is no request of the -+ * process still waiting for completion, then -+ * we assume (tentatively) that the timer has -+ * expired because the batch of requests of -+ * the process could have been served with a -+ * smaller budget. Hence, betting that -+ * process will behave in the same way when it -+ * becomes backlogged again, we reduce its -+ * next budget. As long as we guess right, -+ * this budget cut reduces the latency -+ * experienced by the process. -+ * -+ * However, if there are still outstanding -+ * requests, then the process may have not yet -+ * issued its next request just because it is -+ * still waiting for the completion of some of -+ * the still outstanding ones. So in this -+ * subcase we do not reduce its budget, on the -+ * contrary we increase it to possibly boost -+ * the throughput, as discussed in the -+ * comments to the BUDGET_TIMEOUT case. -+ */ -+ if (bfqq->dispatched > 0) /* still outstanding reqs */ -+ budget = min(budget * 2, bfqd->bfq_max_budget); -+ else { -+ if (budget > 5 * min_budget) -+ budget -= 4 * min_budget; -+ else -+ budget = min_budget; -+ } -+ break; -+ case BFQ_BFQQ_BUDGET_TIMEOUT: -+ /* -+ * We double the budget here because it gives -+ * the chance to boost the throughput if this -+ * is not a seeky process (and has bumped into -+ * this timeout because of, e.g., ZBR). -+ */ -+ budget = min(budget * 2, bfqd->bfq_max_budget); -+ break; -+ case BFQ_BFQQ_BUDGET_EXHAUSTED: -+ /* -+ * The process still has backlog, and did not -+ * let either the budget timeout or the disk -+ * idling timeout expire. Hence it is not -+ * seeky, has a short thinktime and may be -+ * happy with a higher budget too. So -+ * definitely increase the budget of this good -+ * candidate to boost the disk throughput. -+ */ -+ budget = min(budget * 4, bfqd->bfq_max_budget); -+ break; -+ case BFQ_BFQQ_NO_MORE_REQUESTS: -+ /* -+ * For queues that expire for this reason, it -+ * is particularly important to keep the -+ * budget close to the actual service they -+ * need. Doing so reduces the timestamp -+ * misalignment problem described in the -+ * comments in the body of -+ * __bfq_activate_entity. In fact, suppose -+ * that a queue systematically expires for -+ * BFQ_BFQQ_NO_MORE_REQUESTS and presents a -+ * new request in time to enjoy timestamp -+ * back-shifting. The larger the budget of the -+ * queue is with respect to the service the -+ * queue actually requests in each service -+ * slot, the more times the queue can be -+ * reactivated with the same virtual finish -+ * time. It follows that, even if this finish -+ * time is pushed to the system virtual time -+ * to reduce the consequent timestamp -+ * misalignment, the queue unjustly enjoys for -+ * many re-activations a lower finish time -+ * than all newly activated queues. -+ * -+ * The service needed by bfqq is measured -+ * quite precisely by bfqq->entity.service. -+ * Since bfqq does not enjoy device idling, -+ * bfqq->entity.service is equal to the number -+ * of sectors that the process associated with -+ * bfqq requested to read/write before waiting -+ * for request completions, or blocking for -+ * other reasons. -+ */ -+ budget = max_t(int, bfqq->entity.service, min_budget); -+ break; -+ default: -+ return; -+ } -+ } else if (!bfq_bfqq_sync(bfqq)) -+ /* -+ * Async queues get always the maximum possible -+ * budget, as for them we do not care about latency -+ * (in addition, their ability to dispatch is limited -+ * by the charging factor). -+ */ -+ budget = bfqd->bfq_max_budget; -+ -+ bfqq->max_budget = budget; -+ -+ if (bfqd->budgets_assigned >= bfq_stats_min_budgets && -+ !bfqd->bfq_user_max_budget) -+ bfqq->max_budget = min(bfqq->max_budget, bfqd->bfq_max_budget); -+ -+ /* -+ * If there is still backlog, then assign a new budget, making -+ * sure that it is large enough for the next request. Since -+ * the finish time of bfqq must be kept in sync with the -+ * budget, be sure to call __bfq_bfqq_expire() *after* this -+ * update. -+ * -+ * If there is no backlog, then no need to update the budget; -+ * it will be updated on the arrival of a new request. -+ */ -+ next_rq = bfqq->next_rq; -+ if (next_rq) { -+ BUG_ON(reason == BFQ_BFQQ_TOO_IDLE || -+ reason == BFQ_BFQQ_NO_MORE_REQUESTS); -+ bfqq->entity.budget = max_t(unsigned long, bfqq->max_budget, -+ bfq_serv_to_charge(next_rq, bfqq)); -+ BUG_ON(!bfq_bfqq_busy(bfqq)); -+ BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list)); -+ } -+ -+ bfq_log_bfqq(bfqd, bfqq, "head sect: %u, new budget %d", -+ next_rq ? blk_rq_sectors(next_rq) : 0, -+ bfqq->entity.budget); -+} -+ -+/* -+ * Return true if the process associated with bfqq is "slow". The slow -+ * flag is used, in addition to the budget timeout, to reduce the -+ * amount of service provided to seeky processes, and thus reduce -+ * their chances to lower the throughput. More details in the comments -+ * on the function bfq_bfqq_expire(). -+ * -+ * An important observation is in order: as discussed in the comments -+ * on the function bfq_update_peak_rate(), with devices with internal -+ * queues, it is hard if ever possible to know when and for how long -+ * an I/O request is processed by the device (apart from the trivial -+ * I/O pattern where a new request is dispatched only after the -+ * previous one has been completed). This makes it hard to evaluate -+ * the real rate at which the I/O requests of each bfq_queue are -+ * served. In fact, for an I/O scheduler like BFQ, serving a -+ * bfq_queue means just dispatching its requests during its service -+ * slot (i.e., until the budget of the queue is exhausted, or the -+ * queue remains idle, or, finally, a timeout fires). But, during the -+ * service slot of a bfq_queue, around 100 ms at most, the device may -+ * be even still processing requests of bfq_queues served in previous -+ * service slots. On the opposite end, the requests of the in-service -+ * bfq_queue may be completed after the service slot of the queue -+ * finishes. -+ * -+ * Anyway, unless more sophisticated solutions are used -+ * (where possible), the sum of the sizes of the requests dispatched -+ * during the service slot of a bfq_queue is probably the only -+ * approximation available for the service received by the bfq_queue -+ * during its service slot. And this sum is the quantity used in this -+ * function to evaluate the I/O speed of a process. -+ */ -+static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq, -+ bool compensate, enum bfqq_expiration reason, -+ unsigned long *delta_ms) -+{ -+ ktime_t delta_ktime; -+ u32 delta_usecs; -+ bool slow = BFQQ_SEEKY(bfqq); /* if delta too short, use seekyness */ -+ -+ if (!bfq_bfqq_sync(bfqq)) -+ return false; -+ -+ if (compensate) -+ delta_ktime = bfqd->last_idling_start; -+ else -+ delta_ktime = ktime_get(); -+ delta_ktime = ktime_sub(delta_ktime, bfqd->last_budget_start); -+ delta_usecs = ktime_to_us(delta_ktime); -+ -+ /* don't use too short time intervals */ -+ if (delta_usecs < 1000) { -+ if (blk_queue_nonrot(bfqd->queue)) -+ /* -+ * give same worst-case guarantees as idling -+ * for seeky -+ */ -+ *delta_ms = BFQ_MIN_TT / NSEC_PER_MSEC; -+ else /* charge at least one seek */ -+ *delta_ms = bfq_slice_idle / NSEC_PER_MSEC; -+ -+ bfq_log(bfqd, "bfq_bfqq_is_slow: too short %u", delta_usecs); -+ -+ return slow; -+ } -+ -+ *delta_ms = delta_usecs / USEC_PER_MSEC; -+ -+ /* -+ * Use only long (> 20ms) intervals to filter out excessive -+ * spikes in service rate estimation. -+ */ -+ if (delta_usecs > 20000) { -+ /* -+ * Caveat for rotational devices: processes doing I/O -+ * in the slower disk zones tend to be slow(er) even -+ * if not seeky. In this respect, the estimated peak -+ * rate is likely to be an average over the disk -+ * surface. Accordingly, to not be too harsh with -+ * unlucky processes, a process is deemed slow only if -+ * its rate has been lower than half of the estimated -+ * peak rate. -+ */ -+ slow = bfqq->entity.service < bfqd->bfq_max_budget / 2; -+ bfq_log(bfqd, "bfq_bfqq_is_slow: relative rate %d/%d", -+ bfqq->entity.service, bfqd->bfq_max_budget); -+ } -+ -+ bfq_log_bfqq(bfqd, bfqq, "bfq_bfqq_is_slow: slow %d", slow); -+ -+ return slow; -+} -+ -+/* -+ * To be deemed as soft real-time, an application must meet two -+ * requirements. First, the application must not require an average -+ * bandwidth higher than the approximate bandwidth required to playback or -+ * record a compressed high-definition video. -+ * The next function is invoked on the completion of the last request of a -+ * batch, to compute the next-start time instant, soft_rt_next_start, such -+ * that, if the next request of the application does not arrive before -+ * soft_rt_next_start, then the above requirement on the bandwidth is met. -+ * -+ * The second requirement is that the request pattern of the application is -+ * isochronous, i.e., that, after issuing a request or a batch of requests, -+ * the application stops issuing new requests until all its pending requests -+ * have been completed. After that, the application may issue a new batch, -+ * and so on. -+ * For this reason the next function is invoked to compute -+ * soft_rt_next_start only for applications that meet this requirement, -+ * whereas soft_rt_next_start is set to infinity for applications that do -+ * not. -+ * -+ * Unfortunately, even a greedy application may happen to behave in an -+ * isochronous way if the CPU load is high. In fact, the application may -+ * stop issuing requests while the CPUs are busy serving other processes, -+ * then restart, then stop again for a while, and so on. In addition, if -+ * the disk achieves a low enough throughput with the request pattern -+ * issued by the application (e.g., because the request pattern is random -+ * and/or the device is slow), then the application may meet the above -+ * bandwidth requirement too. To prevent such a greedy application to be -+ * deemed as soft real-time, a further rule is used in the computation of -+ * soft_rt_next_start: soft_rt_next_start must be higher than the current -+ * time plus the maximum time for which the arrival of a request is waited -+ * for when a sync queue becomes idle, namely bfqd->bfq_slice_idle. -+ * This filters out greedy applications, as the latter issue instead their -+ * next request as soon as possible after the last one has been completed -+ * (in contrast, when a batch of requests is completed, a soft real-time -+ * application spends some time processing data). -+ * -+ * Unfortunately, the last filter may easily generate false positives if -+ * only bfqd->bfq_slice_idle is used as a reference time interval and one -+ * or both the following cases occur: -+ * 1) HZ is so low that the duration of a jiffy is comparable to or higher -+ * than bfqd->bfq_slice_idle. This happens, e.g., on slow devices with -+ * HZ=100. -+ * 2) jiffies, instead of increasing at a constant rate, may stop increasing -+ * for a while, then suddenly 'jump' by several units to recover the lost -+ * increments. This seems to happen, e.g., inside virtual machines. -+ * To address this issue, we do not use as a reference time interval just -+ * bfqd->bfq_slice_idle, but bfqd->bfq_slice_idle plus a few jiffies. In -+ * particular we add the minimum number of jiffies for which the filter -+ * seems to be quite precise also in embedded systems and KVM/QEMU virtual -+ * machines. -+ */ -+static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq) -+{ -+ bfq_log_bfqq(bfqd, bfqq, -+"softrt_next_start: service_blkg %lu soft_rate %u sects/sec interval %u", -+ bfqq->service_from_backlogged, -+ bfqd->bfq_wr_max_softrt_rate, -+ jiffies_to_msecs(HZ * bfqq->service_from_backlogged / -+ bfqd->bfq_wr_max_softrt_rate)); -+ -+ return max(bfqq->last_idle_bklogged + -+ HZ * bfqq->service_from_backlogged / -+ bfqd->bfq_wr_max_softrt_rate, -+ jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4); -+} -+ -+/* -+ * Return the farthest future time instant according to jiffies -+ * macros. -+ */ -+static unsigned long bfq_greatest_from_now(void) -+{ -+ return jiffies + MAX_JIFFY_OFFSET; -+} -+ -+/* -+ * Return the farthest past time instant according to jiffies -+ * macros. -+ */ -+static unsigned long bfq_smallest_from_now(void) -+{ -+ return jiffies - MAX_JIFFY_OFFSET; -+} -+ -+/** -+ * bfq_bfqq_expire - expire a queue. -+ * @bfqd: device owning the queue. -+ * @bfqq: the queue to expire. -+ * @compensate: if true, compensate for the time spent idling. -+ * @reason: the reason causing the expiration. -+ * -+ * If the process associated with bfqq does slow I/O (e.g., because it -+ * issues random requests), we charge bfqq with the time it has been -+ * in service instead of the service it has received (see -+ * bfq_bfqq_charge_time for details on how this goal is achieved). As -+ * a consequence, bfqq will typically get higher timestamps upon -+ * reactivation, and hence it will be rescheduled as if it had -+ * received more service than what it has actually received. In the -+ * end, bfqq receives less service in proportion to how slowly its -+ * associated process consumes its budgets (and hence how seriously it -+ * tends to lower the throughput). In addition, this time-charging -+ * strategy guarantees time fairness among slow processes. In -+ * contrast, if the process associated with bfqq is not slow, we -+ * charge bfqq exactly with the service it has received. -+ * -+ * Charging time to the first type of queues and the exact service to -+ * the other has the effect of using the WF2Q+ policy to schedule the -+ * former on a timeslice basis, without violating service domain -+ * guarantees among the latter. -+ */ -+static void bfq_bfqq_expire(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq, -+ bool compensate, -+ enum bfqq_expiration reason) -+{ -+ bool slow; -+ unsigned long delta = 0; -+ struct bfq_entity *entity = &bfqq->entity; -+ int ref; -+ -+ BUG_ON(bfqq != bfqd->in_service_queue); -+ -+ /* -+ * Check whether the process is slow (see bfq_bfqq_is_slow). -+ */ -+ slow = bfq_bfqq_is_slow(bfqd, bfqq, compensate, reason, &delta); -+ -+ /* -+ * Increase service_from_backlogged before next statement, -+ * because the possible next invocation of -+ * bfq_bfqq_charge_time would likely inflate -+ * entity->service. In contrast, service_from_backlogged must -+ * contain real service, to enable the soft real-time -+ * heuristic to correctly compute the bandwidth consumed by -+ * bfqq. -+ */ -+ bfqq->service_from_backlogged += entity->service; -+ -+ /* -+ * As above explained, charge slow (typically seeky) and -+ * timed-out queues with the time and not the service -+ * received, to favor sequential workloads. -+ * -+ * Processes doing I/O in the slower disk zones will tend to -+ * be slow(er) even if not seeky. Therefore, since the -+ * estimated peak rate is actually an average over the disk -+ * surface, these processes may timeout just for bad luck. To -+ * avoid punishing them, do not charge time to processes that -+ * succeeded in consuming at least 2/3 of their budget. This -+ * allows BFQ to preserve enough elasticity to still perform -+ * bandwidth, and not time, distribution with little unlucky -+ * or quasi-sequential processes. -+ */ -+ if (bfqq->wr_coeff == 1 && -+ (slow || -+ (reason == BFQ_BFQQ_BUDGET_TIMEOUT && -+ bfq_bfqq_budget_left(bfqq) >= entity->budget / 3))) -+ bfq_bfqq_charge_time(bfqd, bfqq, delta); -+ -+ BUG_ON(bfqq->entity.budget < bfqq->entity.service); -+ -+ if (reason == BFQ_BFQQ_TOO_IDLE && -+ entity->service <= 2 * entity->budget / 10) -+ bfq_clear_bfqq_IO_bound(bfqq); -+ -+ if (bfqd->low_latency && bfqq->wr_coeff == 1) -+ bfqq->last_wr_start_finish = jiffies; -+ -+ if (bfqd->low_latency && bfqd->bfq_wr_max_softrt_rate > 0 && -+ RB_EMPTY_ROOT(&bfqq->sort_list)) { -+ /* -+ * If we get here, and there are no outstanding -+ * requests, then the request pattern is isochronous -+ * (see the comments on the function -+ * bfq_bfqq_softrt_next_start()). Thus we can compute -+ * soft_rt_next_start. If, instead, the queue still -+ * has outstanding requests, then we have to wait for -+ * the completion of all the outstanding requests to -+ * discover whether the request pattern is actually -+ * isochronous. -+ */ -+ BUG_ON(bfqd->busy_queues < 1); -+ if (bfqq->dispatched == 0) { -+ bfqq->soft_rt_next_start = -+ bfq_bfqq_softrt_next_start(bfqd, bfqq); -+ bfq_log_bfqq(bfqd, bfqq, "new soft_rt_next %lu", -+ bfqq->soft_rt_next_start); -+ } else { -+ /* -+ * The application is still waiting for the -+ * completion of one or more requests: -+ * prevent it from possibly being incorrectly -+ * deemed as soft real-time by setting its -+ * soft_rt_next_start to infinity. In fact, -+ * without this assignment, the application -+ * would be incorrectly deemed as soft -+ * real-time if: -+ * 1) it issued a new request before the -+ * completion of all its in-flight -+ * requests, and -+ * 2) at that time, its soft_rt_next_start -+ * happened to be in the past. -+ */ -+ bfqq->soft_rt_next_start = -+ bfq_greatest_from_now(); -+ /* -+ * Schedule an update of soft_rt_next_start to when -+ * the task may be discovered to be isochronous. -+ */ -+ bfq_mark_bfqq_softrt_update(bfqq); -+ } -+ } -+ -+ bfq_log_bfqq(bfqd, bfqq, -+ "expire (%d, slow %d, num_disp %d, idle_win %d, weight %d)", -+ reason, slow, bfqq->dispatched, -+ bfq_bfqq_idle_window(bfqq), entity->weight); -+ -+ /* -+ * Increase, decrease or leave budget unchanged according to -+ * reason. -+ */ -+ BUG_ON(bfqq->entity.budget < bfqq->entity.service); -+ __bfq_bfqq_recalc_budget(bfqd, bfqq, reason); -+ BUG_ON(bfqq->next_rq == NULL && -+ bfqq->entity.budget < bfqq->entity.service); -+ ref = bfqq->ref; -+ __bfq_bfqq_expire(bfqd, bfqq); -+ -+ BUG_ON(ref > 1 && -+ !bfq_bfqq_busy(bfqq) && reason == BFQ_BFQQ_BUDGET_EXHAUSTED && -+ !bfq_class_idle(bfqq)); -+ -+ /* mark bfqq as waiting a request only if a bic still points to it */ -+ if (ref > 1 && !bfq_bfqq_busy(bfqq) && -+ reason != BFQ_BFQQ_BUDGET_TIMEOUT && -+ reason != BFQ_BFQQ_BUDGET_EXHAUSTED) -+ bfq_mark_bfqq_non_blocking_wait_rq(bfqq); -+} -+ -+/* -+ * Budget timeout is not implemented through a dedicated timer, but -+ * just checked on request arrivals and completions, as well as on -+ * idle timer expirations. -+ */ -+static bool bfq_bfqq_budget_timeout(struct bfq_queue *bfqq) -+{ -+ return time_is_before_eq_jiffies(bfqq->budget_timeout); -+} -+ -+/* -+ * If we expire a queue that is actively waiting (i.e., with the -+ * device idled) for the arrival of a new request, then we may incur -+ * the timestamp misalignment problem described in the body of the -+ * function __bfq_activate_entity. Hence we return true only if this -+ * condition does not hold, or if the queue is slow enough to deserve -+ * only to be kicked off for preserving a high throughput. -+ */ -+static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq) -+{ -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "may_budget_timeout: wait_request %d left %d timeout %d", -+ bfq_bfqq_wait_request(bfqq), -+ bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3, -+ bfq_bfqq_budget_timeout(bfqq)); -+ -+ return (!bfq_bfqq_wait_request(bfqq) || -+ bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3) -+ && -+ bfq_bfqq_budget_timeout(bfqq); -+} -+ -+/* -+ * For a queue that becomes empty, device idling is allowed only if -+ * this function returns true for that queue. As a consequence, since -+ * device idling plays a critical role for both throughput boosting -+ * and service guarantees, the return value of this function plays a -+ * critical role as well. -+ * -+ * In a nutshell, this function returns true only if idling is -+ * beneficial for throughput or, even if detrimental for throughput, -+ * idling is however necessary to preserve service guarantees (low -+ * latency, desired throughput distribution, ...). In particular, on -+ * NCQ-capable devices, this function tries to return false, so as to -+ * help keep the drives' internal queues full, whenever this helps the -+ * device boost the throughput without causing any service-guarantee -+ * issue. -+ * -+ * In more detail, the return value of this function is obtained by, -+ * first, computing a number of boolean variables that take into -+ * account throughput and service-guarantee issues, and, then, -+ * combining these variables in a logical expression. Most of the -+ * issues taken into account are not trivial. We discuss these issues -+ * while introducing the variables. -+ */ -+static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq) -+{ -+ struct bfq_data *bfqd = bfqq->bfqd; -+ bool idling_boosts_thr, idling_boosts_thr_without_issues, -+ idling_needed_for_service_guarantees, -+ asymmetric_scenario; -+ -+ if (bfqd->strict_guarantees) -+ return true; -+ -+ /* -+ * The next variable takes into account the cases where idling -+ * boosts the throughput. -+ * -+ * The value of the variable is computed considering, first, that -+ * idling is virtually always beneficial for the throughput if: -+ * (a) the device is not NCQ-capable, or -+ * (b) regardless of the presence of NCQ, the device is rotational -+ * and the request pattern for bfqq is I/O-bound and sequential. -+ * -+ * Secondly, and in contrast to the above item (b), idling an -+ * NCQ-capable flash-based device would not boost the -+ * throughput even with sequential I/O; rather it would lower -+ * the throughput in proportion to how fast the device -+ * is. Accordingly, the next variable is true if any of the -+ * above conditions (a) and (b) is true, and, in particular, -+ * happens to be false if bfqd is an NCQ-capable flash-based -+ * device. -+ */ -+ idling_boosts_thr = !bfqd->hw_tag || -+ (!blk_queue_nonrot(bfqd->queue) && bfq_bfqq_IO_bound(bfqq) && -+ bfq_bfqq_idle_window(bfqq)); -+ -+ /* -+ * The value of the next variable, -+ * idling_boosts_thr_without_issues, is equal to that of -+ * idling_boosts_thr, unless a special case holds. In this -+ * special case, described below, idling may cause problems to -+ * weight-raised queues. -+ * -+ * When the request pool is saturated (e.g., in the presence -+ * of write hogs), if the processes associated with -+ * non-weight-raised queues ask for requests at a lower rate, -+ * then processes associated with weight-raised queues have a -+ * higher probability to get a request from the pool -+ * immediately (or at least soon) when they need one. Thus -+ * they have a higher probability to actually get a fraction -+ * of the device throughput proportional to their high -+ * weight. This is especially true with NCQ-capable drives, -+ * which enqueue several requests in advance, and further -+ * reorder internally-queued requests. -+ * -+ * For this reason, we force to false the value of -+ * idling_boosts_thr_without_issues if there are weight-raised -+ * busy queues. In this case, and if bfqq is not weight-raised, -+ * this guarantees that the device is not idled for bfqq (if, -+ * instead, bfqq is weight-raised, then idling will be -+ * guaranteed by another variable, see below). Combined with -+ * the timestamping rules of BFQ (see [1] for details), this -+ * behavior causes bfqq, and hence any sync non-weight-raised -+ * queue, to get a lower number of requests served, and thus -+ * to ask for a lower number of requests from the request -+ * pool, before the busy weight-raised queues get served -+ * again. This often mitigates starvation problems in the -+ * presence of heavy write workloads and NCQ, thereby -+ * guaranteeing a higher application and system responsiveness -+ * in these hostile scenarios. -+ */ -+ idling_boosts_thr_without_issues = idling_boosts_thr && -+ bfqd->wr_busy_queues == 0; -+ -+ /* -+ * There is then a case where idling must be performed not -+ * for throughput concerns, but to preserve service -+ * guarantees. -+ * -+ * To introduce this case, we can note that allowing the drive -+ * to enqueue more than one request at a time, and hence -+ * delegating de facto final scheduling decisions to the -+ * drive's internal scheduler, entails loss of control on the -+ * actual request service order. In particular, the critical -+ * situation is when requests from different processes happen -+ * to be present, at the same time, in the internal queue(s) -+ * of the drive. In such a situation, the drive, by deciding -+ * the service order of the internally-queued requests, does -+ * determine also the actual throughput distribution among -+ * these processes. But the drive typically has no notion or -+ * concern about per-process throughput distribution, and -+ * makes its decisions only on a per-request basis. Therefore, -+ * the service distribution enforced by the drive's internal -+ * scheduler is likely to coincide with the desired -+ * device-throughput distribution only in a completely -+ * symmetric scenario where: -+ * (i) each of these processes must get the same throughput as -+ * the others; -+ * (ii) all these processes have the same I/O pattern -+ * (either sequential or random). -+ * In fact, in such a scenario, the drive will tend to treat -+ * the requests of each of these processes in about the same -+ * way as the requests of the others, and thus to provide -+ * each of these processes with about the same throughput -+ * (which is exactly the desired throughput distribution). In -+ * contrast, in any asymmetric scenario, device idling is -+ * certainly needed to guarantee that bfqq receives its -+ * assigned fraction of the device throughput (see [1] for -+ * details). -+ * -+ * We address this issue by controlling, actually, only the -+ * symmetry sub-condition (i), i.e., provided that -+ * sub-condition (i) holds, idling is not performed, -+ * regardless of whether sub-condition (ii) holds. In other -+ * words, only if sub-condition (i) holds, then idling is -+ * allowed, and the device tends to be prevented from queueing -+ * many requests, possibly of several processes. The reason -+ * for not controlling also sub-condition (ii) is that we -+ * exploit preemption to preserve guarantees in case of -+ * symmetric scenarios, even if (ii) does not hold, as -+ * explained in the next two paragraphs. -+ * -+ * Even if a queue, say Q, is expired when it remains idle, Q -+ * can still preempt the new in-service queue if the next -+ * request of Q arrives soon (see the comments on -+ * bfq_bfqq_update_budg_for_activation). If all queues and -+ * groups have the same weight, this form of preemption, -+ * combined with the hole-recovery heuristic described in the -+ * comments on function bfq_bfqq_update_budg_for_activation, -+ * are enough to preserve a correct bandwidth distribution in -+ * the mid term, even without idling. In fact, even if not -+ * idling allows the internal queues of the device to contain -+ * many requests, and thus to reorder requests, we can rather -+ * safely assume that the internal scheduler still preserves a -+ * minimum of mid-term fairness. The motivation for using -+ * preemption instead of idling is that, by not idling, -+ * service guarantees are preserved without minimally -+ * sacrificing throughput. In other words, both a high -+ * throughput and its desired distribution are obtained. -+ * -+ * More precisely, this preemption-based, idleless approach -+ * provides fairness in terms of IOPS, and not sectors per -+ * second. This can be seen with a simple example. Suppose -+ * that there are two queues with the same weight, but that -+ * the first queue receives requests of 8 sectors, while the -+ * second queue receives requests of 1024 sectors. In -+ * addition, suppose that each of the two queues contains at -+ * most one request at a time, which implies that each queue -+ * always remains idle after it is served. Finally, after -+ * remaining idle, each queue receives very quickly a new -+ * request. It follows that the two queues are served -+ * alternatively, preempting each other if needed. This -+ * implies that, although both queues have the same weight, -+ * the queue with large requests receives a service that is -+ * 1024/8 times as high as the service received by the other -+ * queue. -+ * -+ * On the other hand, device idling is performed, and thus -+ * pure sector-domain guarantees are provided, for the -+ * following queues, which are likely to need stronger -+ * throughput guarantees: weight-raised queues, and queues -+ * with a higher weight than other queues. When such queues -+ * are active, sub-condition (i) is false, which triggers -+ * device idling. -+ * -+ * According to the above considerations, the next variable is -+ * true (only) if sub-condition (i) holds. To compute the -+ * value of this variable, we not only use the return value of -+ * the function bfq_symmetric_scenario(), but also check -+ * whether bfqq is being weight-raised, because -+ * bfq_symmetric_scenario() does not take into account also -+ * weight-raised queues (see comments on -+ * bfq_weights_tree_add()). -+ * -+ * As a side note, it is worth considering that the above -+ * device-idling countermeasures may however fail in the -+ * following unlucky scenario: if idling is (correctly) -+ * disabled in a time period during which all symmetry -+ * sub-conditions hold, and hence the device is allowed to -+ * enqueue many requests, but at some later point in time some -+ * sub-condition stops to hold, then it may become impossible -+ * to let requests be served in the desired order until all -+ * the requests already queued in the device have been served. -+ */ -+ asymmetric_scenario = bfqq->wr_coeff > 1 || -+ !bfq_symmetric_scenario(bfqd); -+ -+ /* -+ * Finally, there is a case where maximizing throughput is the -+ * best choice even if it may cause unfairness toward -+ * bfqq. Such a case is when bfqq became active in a burst of -+ * queue activations. Queues that became active during a large -+ * burst benefit only from throughput, as discussed in the -+ * comments on bfq_handle_burst. Thus, if bfqq became active -+ * in a burst and not idling the device maximizes throughput, -+ * then the device must no be idled, because not idling the -+ * device provides bfqq and all other queues in the burst with -+ * maximum benefit. Combining this and the above case, we can -+ * now establish when idling is actually needed to preserve -+ * service guarantees. -+ */ -+ idling_needed_for_service_guarantees = -+ asymmetric_scenario && !bfq_bfqq_in_large_burst(bfqq); -+ -+ /* -+ * We have now all the components we need to compute the return -+ * value of the function, which is true only if both the following -+ * conditions hold: -+ * 1) bfqq is sync, because idling make sense only for sync queues; -+ * 2) idling either boosts the throughput (without issues), or -+ * is necessary to preserve service guarantees. -+ */ -+ bfq_log_bfqq(bfqd, bfqq, "may_idle: sync %d idling_boosts_thr %d", -+ bfq_bfqq_sync(bfqq), idling_boosts_thr); -+ -+ bfq_log_bfqq(bfqd, bfqq, -+ "may_idle: wr_busy %d boosts %d IO-bound %d guar %d", -+ bfqd->wr_busy_queues, -+ idling_boosts_thr_without_issues, -+ bfq_bfqq_IO_bound(bfqq), -+ idling_needed_for_service_guarantees); -+ -+ return bfq_bfqq_sync(bfqq) && -+ (idling_boosts_thr_without_issues || -+ idling_needed_for_service_guarantees); -+} -+ -+/* -+ * If the in-service queue is empty but the function bfq_bfqq_may_idle -+ * returns true, then: -+ * 1) the queue must remain in service and cannot be expired, and -+ * 2) the device must be idled to wait for the possible arrival of a new -+ * request for the queue. -+ * See the comments on the function bfq_bfqq_may_idle for the reasons -+ * why performing device idling is the best choice to boost the throughput -+ * and preserve service guarantees when bfq_bfqq_may_idle itself -+ * returns true. -+ */ -+static bool bfq_bfqq_must_idle(struct bfq_queue *bfqq) -+{ -+ struct bfq_data *bfqd = bfqq->bfqd; -+ -+ return RB_EMPTY_ROOT(&bfqq->sort_list) && bfqd->bfq_slice_idle != 0 && -+ bfq_bfqq_may_idle(bfqq); -+} -+ -+/* -+ * Select a queue for service. If we have a current queue in service, -+ * check whether to continue servicing it, or retrieve and set a new one. -+ */ -+static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd) -+{ -+ struct bfq_queue *bfqq; -+ struct request *next_rq; -+ enum bfqq_expiration reason = BFQ_BFQQ_BUDGET_TIMEOUT; -+ -+ bfqq = bfqd->in_service_queue; -+ if (!bfqq) -+ goto new_queue; -+ -+ bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue"); -+ -+ if (bfq_may_expire_for_budg_timeout(bfqq) && -+ !hrtimer_active(&bfqd->idle_slice_timer) && -+ !bfq_bfqq_must_idle(bfqq)) -+ goto expire; -+ -+check_queue: -+ /* -+ * This loop is rarely executed more than once. Even when it -+ * happens, it is much more convenient to re-execute this loop -+ * than to return NULL and trigger a new dispatch to get a -+ * request served. -+ */ -+ next_rq = bfqq->next_rq; -+ /* -+ * If bfqq has requests queued and it has enough budget left to -+ * serve them, keep the queue, otherwise expire it. -+ */ -+ if (next_rq) { -+ BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list)); -+ -+ if (bfq_serv_to_charge(next_rq, bfqq) > -+ bfq_bfqq_budget_left(bfqq)) { -+ /* -+ * Expire the queue for budget exhaustion, -+ * which makes sure that the next budget is -+ * enough to serve the next request, even if -+ * it comes from the fifo expired path. -+ */ -+ reason = BFQ_BFQQ_BUDGET_EXHAUSTED; -+ goto expire; -+ } else { -+ /* -+ * The idle timer may be pending because we may -+ * not disable disk idling even when a new request -+ * arrives. -+ */ -+ if (bfq_bfqq_wait_request(bfqq)) { -+ BUG_ON(!hrtimer_active(&bfqd->idle_slice_timer)); -+ /* -+ * If we get here: 1) at least a new request -+ * has arrived but we have not disabled the -+ * timer because the request was too small, -+ * 2) then the block layer has unplugged -+ * the device, causing the dispatch to be -+ * invoked. -+ * -+ * Since the device is unplugged, now the -+ * requests are probably large enough to -+ * provide a reasonable throughput. -+ * So we disable idling. -+ */ -+ bfq_clear_bfqq_wait_request(bfqq); -+ hrtimer_try_to_cancel(&bfqd->idle_slice_timer); -+ bfqg_stats_update_idle_time(bfqq_group(bfqq)); -+ } -+ goto keep_queue; -+ } -+ } -+ -+ /* -+ * No requests pending. However, if the in-service queue is idling -+ * for a new request, or has requests waiting for a completion and -+ * may idle after their completion, then keep it anyway. -+ */ -+ if (hrtimer_active(&bfqd->idle_slice_timer) || -+ (bfqq->dispatched != 0 && bfq_bfqq_may_idle(bfqq))) { -+ bfqq = NULL; -+ goto keep_queue; -+ } -+ -+ reason = BFQ_BFQQ_NO_MORE_REQUESTS; -+expire: -+ bfq_bfqq_expire(bfqd, bfqq, false, reason); -+new_queue: -+ bfqq = bfq_set_in_service_queue(bfqd); -+ if (bfqq) { -+ bfq_log_bfqq(bfqd, bfqq, "select_queue: checking new queue"); -+ goto check_queue; -+ } -+keep_queue: -+ if (bfqq) -+ bfq_log_bfqq(bfqd, bfqq, "select_queue: returned this queue"); -+ else -+ bfq_log(bfqd, "select_queue: no queue returned"); -+ -+ return bfqq; -+} -+ -+static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq) -+{ -+ struct bfq_entity *entity = &bfqq->entity; -+ -+ if (bfqq->wr_coeff > 1) { /* queue is being weight-raised */ -+ BUG_ON(bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time && -+ time_is_after_jiffies(bfqq->last_wr_start_finish)); -+ -+ bfq_log_bfqq(bfqd, bfqq, -+ "raising period dur %u/%u msec, old coeff %u, w %d(%d)", -+ jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish), -+ jiffies_to_msecs(bfqq->wr_cur_max_time), -+ bfqq->wr_coeff, -+ bfqq->entity.weight, bfqq->entity.orig_weight); -+ -+ BUG_ON(bfqq != bfqd->in_service_queue && entity->weight != -+ entity->orig_weight * bfqq->wr_coeff); -+ if (entity->prio_changed) -+ bfq_log_bfqq(bfqd, bfqq, "WARN: pending prio change"); -+ -+ /* -+ * If the queue was activated in a burst, or too much -+ * time has elapsed from the beginning of this -+ * weight-raising period, then end weight raising. -+ */ -+ if (bfq_bfqq_in_large_burst(bfqq)) -+ bfq_bfqq_end_wr(bfqq); -+ else if (time_is_before_jiffies(bfqq->last_wr_start_finish + -+ bfqq->wr_cur_max_time)) { -+ if (bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time || -+ time_is_before_jiffies(bfqq->wr_start_at_switch_to_srt + -+ bfq_wr_duration(bfqd))) -+ bfq_bfqq_end_wr(bfqq); -+ else { -+ /* switch back to interactive wr */ -+ bfqq->wr_coeff = bfqd->bfq_wr_coeff; -+ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); -+ bfqq->last_wr_start_finish = -+ bfqq->wr_start_at_switch_to_srt; -+ BUG_ON(time_is_after_jiffies( -+ bfqq->last_wr_start_finish)); -+ bfqq->entity.prio_changed = 1; -+ bfq_log_bfqq(bfqd, bfqq, -+ "back to interactive wr"); -+ } -+ } -+ } -+ /* -+ * To improve latency (for this or other queues), immediately -+ * update weight both if it must be raised and if it must be -+ * lowered. Since, entity may be on some active tree here, and -+ * might have a pending change of its ioprio class, invoke -+ * next function with the last parameter unset (see the -+ * comments on the function). -+ */ -+ if ((entity->weight > entity->orig_weight) != (bfqq->wr_coeff > 1)) -+ __bfq_entity_update_weight_prio(bfq_entity_service_tree(entity), -+ entity, false); -+} -+ -+/* -+ * Dispatch one request from bfqq, moving it to the request queue -+ * dispatch list. -+ */ -+static int bfq_dispatch_request(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq) -+{ -+ int dispatched = 0; -+ struct request *rq = bfqq->next_rq; -+ unsigned long service_to_charge; -+ -+ BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list)); -+ BUG_ON(!rq); -+ service_to_charge = bfq_serv_to_charge(rq, bfqq); -+ -+ BUG_ON(service_to_charge > bfq_bfqq_budget_left(bfqq)); -+ -+ BUG_ON(bfqq->entity.budget < bfqq->entity.service); -+ -+ bfq_bfqq_served(bfqq, service_to_charge); -+ -+ BUG_ON(bfqq->entity.budget < bfqq->entity.service); -+ -+ bfq_dispatch_insert(bfqd->queue, rq); -+ -+ /* -+ * If weight raising has to terminate for bfqq, then next -+ * function causes an immediate update of bfqq's weight, -+ * without waiting for next activation. As a consequence, on -+ * expiration, bfqq will be timestamped as if has never been -+ * weight-raised during this service slot, even if it has -+ * received part or even most of the service as a -+ * weight-raised queue. This inflates bfqq's timestamps, which -+ * is beneficial, as bfqq is then more willing to leave the -+ * device immediately to possible other weight-raised queues. -+ */ -+ bfq_update_wr_data(bfqd, bfqq); -+ -+ bfq_log_bfqq(bfqd, bfqq, -+ "dispatched %u sec req (%llu), budg left %d", -+ blk_rq_sectors(rq), -+ (unsigned long long) blk_rq_pos(rq), -+ bfq_bfqq_budget_left(bfqq)); -+ -+ dispatched++; -+ -+ if (!bfqd->in_service_bic) { -+ atomic_long_inc(&RQ_BIC(rq)->icq.ioc->refcount); -+ bfqd->in_service_bic = RQ_BIC(rq); -+ } -+ -+ if (bfqd->busy_queues > 1 && bfq_class_idle(bfqq)) -+ goto expire; -+ -+ return dispatched; -+ -+expire: -+ bfq_bfqq_expire(bfqd, bfqq, false, BFQ_BFQQ_BUDGET_EXHAUSTED); -+ return dispatched; -+} -+ -+static int __bfq_forced_dispatch_bfqq(struct bfq_queue *bfqq) -+{ -+ int dispatched = 0; -+ -+ while (bfqq->next_rq) { -+ bfq_dispatch_insert(bfqq->bfqd->queue, bfqq->next_rq); -+ dispatched++; -+ } -+ -+ BUG_ON(!list_empty(&bfqq->fifo)); -+ return dispatched; -+} -+ -+/* -+ * Drain our current requests. -+ * Used for barriers and when switching io schedulers on-the-fly. -+ */ -+static int bfq_forced_dispatch(struct bfq_data *bfqd) -+{ -+ struct bfq_queue *bfqq, *n; -+ struct bfq_service_tree *st; -+ int dispatched = 0; -+ -+ bfqq = bfqd->in_service_queue; -+ if (bfqq) -+ __bfq_bfqq_expire(bfqd, bfqq); -+ -+ /* -+ * Loop through classes, and be careful to leave the scheduler -+ * in a consistent state, as feedback mechanisms and vtime -+ * updates cannot be disabled during the process. -+ */ -+ list_for_each_entry_safe(bfqq, n, &bfqd->active_list, bfqq_list) { -+ st = bfq_entity_service_tree(&bfqq->entity); -+ -+ dispatched += __bfq_forced_dispatch_bfqq(bfqq); -+ -+ bfqq->max_budget = bfq_max_budget(bfqd); -+ bfq_forget_idle(st); -+ } -+ -+ BUG_ON(bfqd->busy_queues != 0); -+ -+ return dispatched; -+} -+ -+static int bfq_dispatch_requests(struct request_queue *q, int force) -+{ -+ struct bfq_data *bfqd = q->elevator->elevator_data; -+ struct bfq_queue *bfqq; -+ -+ bfq_log(bfqd, "dispatch requests: %d busy queues", bfqd->busy_queues); -+ -+ if (bfqd->busy_queues == 0) -+ return 0; -+ -+ if (unlikely(force)) -+ return bfq_forced_dispatch(bfqd); -+ -+ /* -+ * Force device to serve one request at a time if -+ * strict_guarantees is true. Forcing this service scheme is -+ * currently the ONLY way to guarantee that the request -+ * service order enforced by the scheduler is respected by a -+ * queueing device. Otherwise the device is free even to make -+ * some unlucky request wait for as long as the device -+ * wishes. -+ * -+ * Of course, serving one request at at time may cause loss of -+ * throughput. -+ */ -+ if (bfqd->strict_guarantees && bfqd->rq_in_driver > 0) -+ return 0; -+ -+ bfqq = bfq_select_queue(bfqd); -+ if (!bfqq) -+ return 0; -+ -+ BUG_ON(bfqq->entity.budget < bfqq->entity.service); -+ -+ BUG_ON(bfq_bfqq_wait_request(bfqq)); -+ -+ if (!bfq_dispatch_request(bfqd, bfqq)) -+ return 0; -+ -+ bfq_log_bfqq(bfqd, bfqq, "dispatched %s request", -+ bfq_bfqq_sync(bfqq) ? "sync" : "async"); -+ -+ BUG_ON(bfqq->next_rq == NULL && -+ bfqq->entity.budget < bfqq->entity.service); -+ return 1; -+} -+ -+/* -+ * Task holds one reference to the queue, dropped when task exits. Each rq -+ * in-flight on this queue also holds a reference, dropped when rq is freed. -+ * -+ * Queue lock must be held here. Recall not to use bfqq after calling -+ * this function on it. -+ */ -+static void bfq_put_queue(struct bfq_queue *bfqq) -+{ -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+ struct bfq_group *bfqg = bfqq_group(bfqq); -+#endif -+ -+ BUG_ON(bfqq->ref <= 0); -+ -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d", bfqq, bfqq->ref); -+ bfqq->ref--; -+ if (bfqq->ref) -+ return; -+ -+ BUG_ON(rb_first(&bfqq->sort_list)); -+ BUG_ON(bfqq->allocated[READ] + bfqq->allocated[WRITE] != 0); -+ BUG_ON(bfqq->entity.tree); -+ BUG_ON(bfq_bfqq_busy(bfqq)); -+ -+ if (bfq_bfqq_sync(bfqq)) -+ /* -+ * The fact that this queue is being destroyed does not -+ * invalidate the fact that this queue may have been -+ * activated during the current burst. As a consequence, -+ * although the queue does not exist anymore, and hence -+ * needs to be removed from the burst list if there, -+ * the burst size has not to be decremented. -+ */ -+ hlist_del_init(&bfqq->burst_list_node); -+ -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p freed", bfqq); -+ -+ kmem_cache_free(bfq_pool, bfqq); -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+ bfqg_put(bfqg); -+#endif -+} -+ -+static void bfq_put_cooperator(struct bfq_queue *bfqq) -+{ -+ struct bfq_queue *__bfqq, *next; -+ -+ /* -+ * If this queue was scheduled to merge with another queue, be -+ * sure to drop the reference taken on that queue (and others in -+ * the merge chain). See bfq_setup_merge and bfq_merge_bfqqs. -+ */ -+ __bfqq = bfqq->new_bfqq; -+ while (__bfqq) { -+ if (__bfqq == bfqq) -+ break; -+ next = __bfqq->new_bfqq; -+ bfq_put_queue(__bfqq); -+ __bfqq = next; -+ } -+} -+ -+static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) -+{ -+ if (bfqq == bfqd->in_service_queue) { -+ __bfq_bfqq_expire(bfqd, bfqq); -+ bfq_schedule_dispatch(bfqd); -+ } -+ -+ bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, bfqq->ref); -+ -+ bfq_put_cooperator(bfqq); -+ -+ bfq_put_queue(bfqq); /* release process reference */ -+} -+ -+static void bfq_init_icq(struct io_cq *icq) -+{ -+ icq_to_bic(icq)->ttime.last_end_request = ktime_get_ns() - (1ULL<<32); -+} -+ -+static void bfq_exit_icq(struct io_cq *icq) -+{ -+ struct bfq_io_cq *bic = icq_to_bic(icq); -+ struct bfq_data *bfqd = bic_to_bfqd(bic); -+ -+ if (bic_to_bfqq(bic, false)) { -+ bfq_exit_bfqq(bfqd, bic_to_bfqq(bic, false)); -+ bic_set_bfqq(bic, NULL, false); -+ } -+ -+ if (bic_to_bfqq(bic, true)) { -+ /* -+ * If the bic is using a shared queue, put the reference -+ * taken on the io_context when the bic started using a -+ * shared bfq_queue. -+ */ -+ if (bfq_bfqq_coop(bic_to_bfqq(bic, true))) -+ put_io_context(icq->ioc); -+ bfq_exit_bfqq(bfqd, bic_to_bfqq(bic, true)); -+ bic_set_bfqq(bic, NULL, true); -+ } -+} -+ -+/* -+ * Update the entity prio values; note that the new values will not -+ * be used until the next (re)activation. -+ */ -+static void bfq_set_next_ioprio_data(struct bfq_queue *bfqq, -+ struct bfq_io_cq *bic) -+{ -+ struct task_struct *tsk = current; -+ int ioprio_class; -+ -+ ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio); -+ switch (ioprio_class) { -+ default: -+ dev_err(bfqq->bfqd->queue->backing_dev_info->dev, -+ "bfq: bad prio class %d\n", ioprio_class); -+ case IOPRIO_CLASS_NONE: -+ /* -+ * No prio set, inherit CPU scheduling settings. -+ */ -+ bfqq->new_ioprio = task_nice_ioprio(tsk); -+ bfqq->new_ioprio_class = task_nice_ioclass(tsk); -+ break; -+ case IOPRIO_CLASS_RT: -+ bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio); -+ bfqq->new_ioprio_class = IOPRIO_CLASS_RT; -+ break; -+ case IOPRIO_CLASS_BE: -+ bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio); -+ bfqq->new_ioprio_class = IOPRIO_CLASS_BE; -+ break; -+ case IOPRIO_CLASS_IDLE: -+ bfqq->new_ioprio_class = IOPRIO_CLASS_IDLE; -+ bfqq->new_ioprio = 7; -+ bfq_clear_bfqq_idle_window(bfqq); -+ break; -+ } -+ -+ if (bfqq->new_ioprio >= IOPRIO_BE_NR) { -+ pr_crit("bfq_set_next_ioprio_data: new_ioprio %d\n", -+ bfqq->new_ioprio); -+ BUG(); -+ } -+ -+ bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->new_ioprio); -+ bfqq->entity.prio_changed = 1; -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "set_next_ioprio_data: bic_class %d prio %d class %d", -+ ioprio_class, bfqq->new_ioprio, bfqq->new_ioprio_class); -+} -+ -+static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio) -+{ -+ struct bfq_data *bfqd = bic_to_bfqd(bic); -+ struct bfq_queue *bfqq; -+ unsigned long uninitialized_var(flags); -+ int ioprio = bic->icq.ioc->ioprio; -+ -+ /* -+ * This condition may trigger on a newly created bic, be sure to -+ * drop the lock before returning. -+ */ -+ if (unlikely(!bfqd) || likely(bic->ioprio == ioprio)) -+ return; -+ -+ bic->ioprio = ioprio; -+ -+ bfqq = bic_to_bfqq(bic, false); -+ if (bfqq) { -+ /* release process reference on this queue */ -+ bfq_put_queue(bfqq); -+ bfqq = bfq_get_queue(bfqd, bio, BLK_RW_ASYNC, bic); -+ bic_set_bfqq(bic, bfqq, false); -+ bfq_log_bfqq(bfqd, bfqq, -+ "check_ioprio_change: bfqq %p %d", -+ bfqq, bfqq->ref); -+ } -+ -+ bfqq = bic_to_bfqq(bic, true); -+ if (bfqq) -+ bfq_set_next_ioprio_data(bfqq, bic); -+} -+ -+static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, -+ struct bfq_io_cq *bic, pid_t pid, int is_sync) -+{ -+ RB_CLEAR_NODE(&bfqq->entity.rb_node); -+ INIT_LIST_HEAD(&bfqq->fifo); -+ INIT_HLIST_NODE(&bfqq->burst_list_node); -+ BUG_ON(!hlist_unhashed(&bfqq->burst_list_node)); -+ -+ bfqq->ref = 0; -+ bfqq->bfqd = bfqd; -+ -+ if (bic) -+ bfq_set_next_ioprio_data(bfqq, bic); -+ -+ if (is_sync) { -+ if (!bfq_class_idle(bfqq)) -+ bfq_mark_bfqq_idle_window(bfqq); -+ bfq_mark_bfqq_sync(bfqq); -+ bfq_mark_bfqq_just_created(bfqq); -+ } else -+ bfq_clear_bfqq_sync(bfqq); -+ bfq_mark_bfqq_IO_bound(bfqq); -+ -+ /* Tentative initial value to trade off between thr and lat */ -+ bfqq->max_budget = (2 * bfq_max_budget(bfqd)) / 3; -+ bfqq->pid = pid; -+ -+ bfqq->wr_coeff = 1; -+ bfqq->last_wr_start_finish = jiffies; -+ bfqq->wr_start_at_switch_to_srt = bfq_smallest_from_now(); -+ bfqq->budget_timeout = bfq_smallest_from_now(); -+ bfqq->split_time = bfq_smallest_from_now(); -+ -+ /* -+ * Set to the value for which bfqq will not be deemed as -+ * soft rt when it becomes backlogged. -+ */ -+ bfqq->soft_rt_next_start = bfq_greatest_from_now(); -+ -+ /* first request is almost certainly seeky */ -+ bfqq->seek_history = 1; -+} -+ -+static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd, -+ struct bfq_group *bfqg, -+ int ioprio_class, int ioprio) -+{ -+ switch (ioprio_class) { -+ case IOPRIO_CLASS_RT: -+ return &bfqg->async_bfqq[0][ioprio]; -+ case IOPRIO_CLASS_NONE: -+ ioprio = IOPRIO_NORM; -+ /* fall through */ -+ case IOPRIO_CLASS_BE: -+ return &bfqg->async_bfqq[1][ioprio]; -+ case IOPRIO_CLASS_IDLE: -+ return &bfqg->async_idle_bfqq; -+ default: -+ BUG(); -+ } -+} -+ -+static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd, -+ struct bio *bio, bool is_sync, -+ struct bfq_io_cq *bic) -+{ -+ const int ioprio = IOPRIO_PRIO_DATA(bic->ioprio); -+ const int ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio); -+ struct bfq_queue **async_bfqq = NULL; -+ struct bfq_queue *bfqq; -+ struct bfq_group *bfqg; -+ -+ rcu_read_lock(); -+ -+ bfqg = bfq_find_set_group(bfqd, bio_blkcg(bio)); -+ if (!bfqg) { -+ bfqq = &bfqd->oom_bfqq; -+ goto out; -+ } -+ -+ if (!is_sync) { -+ async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class, -+ ioprio); -+ bfqq = *async_bfqq; -+ if (bfqq) -+ goto out; -+ } -+ -+ bfqq = kmem_cache_alloc_node(bfq_pool, -+ GFP_NOWAIT | __GFP_ZERO | __GFP_NOWARN, -+ bfqd->queue->node); -+ -+ if (bfqq) { -+ bfq_init_bfqq(bfqd, bfqq, bic, current->pid, -+ is_sync); -+ bfq_init_entity(&bfqq->entity, bfqg); -+ bfq_log_bfqq(bfqd, bfqq, "allocated"); -+ } else { -+ bfqq = &bfqd->oom_bfqq; -+ bfq_log_bfqq(bfqd, bfqq, "using oom bfqq"); -+ goto out; -+ } -+ -+ /* -+ * Pin the queue now that it's allocated, scheduler exit will -+ * prune it. -+ */ -+ if (async_bfqq) { -+ bfqq->ref++; /* -+ * Extra group reference, w.r.t. sync -+ * queue. This extra reference is removed -+ * only if bfqq->bfqg disappears, to -+ * guarantee that this queue is not freed -+ * until its group goes away. -+ */ -+ bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d", -+ bfqq, bfqq->ref); -+ *async_bfqq = bfqq; -+ } -+ -+out: -+ bfqq->ref++; /* get a process reference to this queue */ -+ bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq, bfqq->ref); -+ rcu_read_unlock(); -+ return bfqq; -+} -+ -+static void bfq_update_io_thinktime(struct bfq_data *bfqd, -+ struct bfq_io_cq *bic) -+{ -+ struct bfq_ttime *ttime = &bic->ttime; -+ u64 elapsed = ktime_get_ns() - bic->ttime.last_end_request; -+ -+ elapsed = min_t(u64, elapsed, 2 * bfqd->bfq_slice_idle); -+ -+ ttime->ttime_samples = (7*bic->ttime.ttime_samples + 256) / 8; -+ ttime->ttime_total = div_u64(7*ttime->ttime_total + 256*elapsed, 8); -+ ttime->ttime_mean = div64_ul(ttime->ttime_total + 128, -+ ttime->ttime_samples); -+} -+ -+static void -+bfq_update_io_seektime(struct bfq_data *bfqd, struct bfq_queue *bfqq, -+ struct request *rq) -+{ -+ bfqq->seek_history <<= 1; -+ bfqq->seek_history |= -+ get_sdist(bfqq->last_request_pos, rq) > BFQQ_SEEK_THR && -+ (!blk_queue_nonrot(bfqd->queue) || -+ blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT); -+} -+ -+/* -+ * Disable idle window if the process thinks too long or seeks so much that -+ * it doesn't matter. -+ */ -+static void bfq_update_idle_window(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq, -+ struct bfq_io_cq *bic) -+{ -+ int enable_idle; -+ -+ /* Don't idle for async or idle io prio class. */ -+ if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq)) -+ return; -+ -+ /* Idle window just restored, statistics are meaningless. */ -+ if (time_is_after_eq_jiffies(bfqq->split_time + -+ bfqd->bfq_wr_min_idle_time)) -+ return; -+ -+ enable_idle = bfq_bfqq_idle_window(bfqq); -+ -+ if (atomic_read(&bic->icq.ioc->active_ref) == 0 || -+ bfqd->bfq_slice_idle == 0 || -+ (bfqd->hw_tag && BFQQ_SEEKY(bfqq) && -+ bfqq->wr_coeff == 1)) -+ enable_idle = 0; -+ else if (bfq_sample_valid(bic->ttime.ttime_samples)) { -+ if (bic->ttime.ttime_mean > bfqd->bfq_slice_idle && -+ bfqq->wr_coeff == 1) -+ enable_idle = 0; -+ else -+ enable_idle = 1; -+ } -+ bfq_log_bfqq(bfqd, bfqq, "update_idle_window: enable_idle %d", -+ enable_idle); -+ -+ if (enable_idle) -+ bfq_mark_bfqq_idle_window(bfqq); -+ else -+ bfq_clear_bfqq_idle_window(bfqq); -+} -+ -+/* -+ * Called when a new fs request (rq) is added to bfqq. Check if there's -+ * something we should do about it. -+ */ -+static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, -+ struct request *rq) -+{ -+ struct bfq_io_cq *bic = RQ_BIC(rq); -+ -+ if (rq->cmd_flags & REQ_META) -+ bfqq->meta_pending++; -+ -+ bfq_update_io_thinktime(bfqd, bic); -+ bfq_update_io_seektime(bfqd, bfqq, rq); -+ if (bfqq->entity.service > bfq_max_budget(bfqd) / 8 || -+ !BFQQ_SEEKY(bfqq)) -+ bfq_update_idle_window(bfqd, bfqq, bic); -+ -+ bfq_log_bfqq(bfqd, bfqq, -+ "rq_enqueued: idle_window=%d (seeky %d)", -+ bfq_bfqq_idle_window(bfqq), BFQQ_SEEKY(bfqq)); -+ -+ bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); -+ -+ if (bfqq == bfqd->in_service_queue && bfq_bfqq_wait_request(bfqq)) { -+ bool small_req = bfqq->queued[rq_is_sync(rq)] == 1 && -+ blk_rq_sectors(rq) < 32; -+ bool budget_timeout = bfq_bfqq_budget_timeout(bfqq); -+ -+ /* -+ * There is just this request queued: if the request -+ * is small and the queue is not to be expired, then -+ * just exit. -+ * -+ * In this way, if the device is being idled to wait -+ * for a new request from the in-service queue, we -+ * avoid unplugging the device and committing the -+ * device to serve just a small request. On the -+ * contrary, we wait for the block layer to decide -+ * when to unplug the device: hopefully, new requests -+ * will be merged to this one quickly, then the device -+ * will be unplugged and larger requests will be -+ * dispatched. -+ */ -+ if (small_req && !budget_timeout) -+ return; -+ -+ /* -+ * A large enough request arrived, or the queue is to -+ * be expired: in both cases disk idling is to be -+ * stopped, so clear wait_request flag and reset -+ * timer. -+ */ -+ bfq_clear_bfqq_wait_request(bfqq); -+ hrtimer_try_to_cancel(&bfqd->idle_slice_timer); -+ bfqg_stats_update_idle_time(bfqq_group(bfqq)); -+ -+ /* -+ * The queue is not empty, because a new request just -+ * arrived. Hence we can safely expire the queue, in -+ * case of budget timeout, without risking that the -+ * timestamps of the queue are not updated correctly. -+ * See [1] for more details. -+ */ -+ if (budget_timeout) -+ bfq_bfqq_expire(bfqd, bfqq, false, -+ BFQ_BFQQ_BUDGET_TIMEOUT); -+ -+ /* -+ * Let the request rip immediately, or let a new queue be -+ * selected if bfqq has just been expired. -+ */ -+ __blk_run_queue(bfqd->queue); -+ } -+} -+ -+static void bfq_insert_request(struct request_queue *q, struct request *rq) -+{ -+ struct bfq_data *bfqd = q->elevator->elevator_data; -+ struct bfq_queue *bfqq = RQ_BFQQ(rq), *new_bfqq; -+ -+ assert_spin_locked(bfqd->queue->queue_lock); -+ -+ /* -+ * An unplug may trigger a requeue of a request from the device -+ * driver: make sure we are in process context while trying to -+ * merge two bfq_queues. -+ */ -+ if (!in_interrupt()) { -+ new_bfqq = bfq_setup_cooperator(bfqd, bfqq, rq, true); -+ if (new_bfqq) { -+ if (bic_to_bfqq(RQ_BIC(rq), 1) != bfqq) -+ new_bfqq = bic_to_bfqq(RQ_BIC(rq), 1); -+ /* -+ * Release the request's reference to the old bfqq -+ * and make sure one is taken to the shared queue. -+ */ -+ new_bfqq->allocated[rq_data_dir(rq)]++; -+ bfqq->allocated[rq_data_dir(rq)]--; -+ new_bfqq->ref++; -+ bfq_clear_bfqq_just_created(bfqq); -+ if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq) -+ bfq_merge_bfqqs(bfqd, RQ_BIC(rq), -+ bfqq, new_bfqq); -+ /* -+ * rq is about to be enqueued into new_bfqq, -+ * release rq reference on bfqq -+ */ -+ bfq_put_queue(bfqq); -+ rq->elv.priv[1] = new_bfqq; -+ bfqq = new_bfqq; -+ } -+ } -+ -+ bfq_add_request(rq); -+ -+ rq->fifo_time = ktime_get_ns() + bfqd->bfq_fifo_expire[rq_is_sync(rq)]; -+ list_add_tail(&rq->queuelist, &bfqq->fifo); -+ -+ bfq_rq_enqueued(bfqd, bfqq, rq); -+} -+ -+static void bfq_update_hw_tag(struct bfq_data *bfqd) -+{ -+ bfqd->max_rq_in_driver = max_t(int, bfqd->max_rq_in_driver, -+ bfqd->rq_in_driver); -+ -+ if (bfqd->hw_tag == 1) -+ return; -+ -+ /* -+ * This sample is valid if the number of outstanding requests -+ * is large enough to allow a queueing behavior. Note that the -+ * sum is not exact, as it's not taking into account deactivated -+ * requests. -+ */ -+ if (bfqd->rq_in_driver + bfqd->queued < BFQ_HW_QUEUE_THRESHOLD) -+ return; -+ -+ if (bfqd->hw_tag_samples++ < BFQ_HW_QUEUE_SAMPLES) -+ return; -+ -+ bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD; -+ bfqd->max_rq_in_driver = 0; -+ bfqd->hw_tag_samples = 0; -+} -+ -+static void bfq_completed_request(struct request_queue *q, struct request *rq) -+{ -+ struct bfq_queue *bfqq = RQ_BFQQ(rq); -+ struct bfq_data *bfqd = bfqq->bfqd; -+ u64 now_ns; -+ u32 delta_us; -+ -+ bfq_log_bfqq(bfqd, bfqq, "completed one req with %u sects left", -+ blk_rq_sectors(rq)); -+ -+ assert_spin_locked(bfqd->queue->queue_lock); -+ bfq_update_hw_tag(bfqd); -+ -+ BUG_ON(!bfqd->rq_in_driver); -+ BUG_ON(!bfqq->dispatched); -+ bfqd->rq_in_driver--; -+ bfqq->dispatched--; -+ bfqg_stats_update_completion(bfqq_group(bfqq), -+ rq_start_time_ns(rq), -+ rq_io_start_time_ns(rq), -+ rq->cmd_flags); -+ -+ if (!bfqq->dispatched && !bfq_bfqq_busy(bfqq)) { -+ BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list)); -+ /* -+ * Set budget_timeout (which we overload to store the -+ * time at which the queue remains with no backlog and -+ * no outstanding request; used by the weight-raising -+ * mechanism). -+ */ -+ bfqq->budget_timeout = jiffies; -+ -+ bfq_weights_tree_remove(bfqd, &bfqq->entity, -+ &bfqd->queue_weights_tree); -+ } -+ -+ now_ns = ktime_get_ns(); -+ -+ RQ_BIC(rq)->ttime.last_end_request = now_ns; -+ -+ /* -+ * Using us instead of ns, to get a reasonable precision in -+ * computing rate in next check. -+ */ -+ delta_us = div_u64(now_ns - bfqd->last_completion, NSEC_PER_USEC); -+ -+ bfq_log(bfqd, "rq_completed: delta %uus/%luus max_size %u rate %llu/%llu", -+ delta_us, BFQ_MIN_TT/NSEC_PER_USEC, bfqd->last_rq_max_size, -+ (USEC_PER_SEC* -+ (u64)((bfqd->last_rq_max_size<>BFQ_RATE_SHIFT, -+ (USEC_PER_SEC*(u64)(1UL<<(BFQ_RATE_SHIFT-10)))>>BFQ_RATE_SHIFT); -+ -+ /* -+ * If the request took rather long to complete, and, according -+ * to the maximum request size recorded, this completion latency -+ * implies that the request was certainly served at a very low -+ * rate (less than 1M sectors/sec), then the whole observation -+ * interval that lasts up to this time instant cannot be a -+ * valid time interval for computing a new peak rate. Invoke -+ * bfq_update_rate_reset to have the following three steps -+ * taken: -+ * - close the observation interval at the last (previous) -+ * request dispatch or completion -+ * - compute rate, if possible, for that observation interval -+ * - reset to zero samples, which will trigger a proper -+ * re-initialization of the observation interval on next -+ * dispatch -+ */ -+ if (delta_us > BFQ_MIN_TT/NSEC_PER_USEC && -+ (bfqd->last_rq_max_size<last_completion = now_ns; -+ -+ /* -+ * If we are waiting to discover whether the request pattern -+ * of the task associated with the queue is actually -+ * isochronous, and both requisites for this condition to hold -+ * are now satisfied, then compute soft_rt_next_start (see the -+ * comments on the function bfq_bfqq_softrt_next_start()). We -+ * schedule this delayed check when bfqq expires, if it still -+ * has in-flight requests. -+ */ -+ if (bfq_bfqq_softrt_update(bfqq) && bfqq->dispatched == 0 && -+ RB_EMPTY_ROOT(&bfqq->sort_list)) -+ bfqq->soft_rt_next_start = -+ bfq_bfqq_softrt_next_start(bfqd, bfqq); -+ -+ /* -+ * If this is the in-service queue, check if it needs to be expired, -+ * or if we want to idle in case it has no pending requests. -+ */ -+ if (bfqd->in_service_queue == bfqq) { -+ if (bfqq->dispatched == 0 && bfq_bfqq_must_idle(bfqq)) { -+ bfq_arm_slice_timer(bfqd); -+ goto out; -+ } else if (bfq_may_expire_for_budg_timeout(bfqq)) -+ bfq_bfqq_expire(bfqd, bfqq, false, -+ BFQ_BFQQ_BUDGET_TIMEOUT); -+ else if (RB_EMPTY_ROOT(&bfqq->sort_list) && -+ (bfqq->dispatched == 0 || -+ !bfq_bfqq_may_idle(bfqq))) -+ bfq_bfqq_expire(bfqd, bfqq, false, -+ BFQ_BFQQ_NO_MORE_REQUESTS); -+ } -+ -+ if (!bfqd->rq_in_driver) -+ bfq_schedule_dispatch(bfqd); -+ -+out: -+ return; -+} -+ -+static int __bfq_may_queue(struct bfq_queue *bfqq) -+{ -+ if (bfq_bfqq_wait_request(bfqq) && bfq_bfqq_must_alloc(bfqq)) { -+ bfq_clear_bfqq_must_alloc(bfqq); -+ return ELV_MQUEUE_MUST; -+ } -+ -+ return ELV_MQUEUE_MAY; -+} -+ -+static int bfq_may_queue(struct request_queue *q, unsigned int op) -+{ -+ struct bfq_data *bfqd = q->elevator->elevator_data; -+ struct task_struct *tsk = current; -+ struct bfq_io_cq *bic; -+ struct bfq_queue *bfqq; -+ -+ /* -+ * Don't force setup of a queue from here, as a call to may_queue -+ * does not necessarily imply that a request actually will be -+ * queued. So just lookup a possibly existing queue, or return -+ * 'may queue' if that fails. -+ */ -+ bic = bfq_bic_lookup(bfqd, tsk->io_context); -+ if (!bic) -+ return ELV_MQUEUE_MAY; -+ -+ bfqq = bic_to_bfqq(bic, op_is_sync(op)); -+ if (bfqq) -+ return __bfq_may_queue(bfqq); -+ -+ return ELV_MQUEUE_MAY; -+} -+ -+/* -+ * Queue lock held here. -+ */ -+static void bfq_put_request(struct request *rq) -+{ -+ struct bfq_queue *bfqq = RQ_BFQQ(rq); -+ -+ if (bfqq) { -+ const int rw = rq_data_dir(rq); -+ -+ BUG_ON(!bfqq->allocated[rw]); -+ bfqq->allocated[rw]--; -+ -+ rq->elv.priv[0] = NULL; -+ rq->elv.priv[1] = NULL; -+ -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "put_request %p, %d", -+ bfqq, bfqq->ref); -+ bfq_put_queue(bfqq); -+ } -+} -+ -+/* -+ * Returns NULL if a new bfqq should be allocated, or the old bfqq if this -+ * was the last process referring to that bfqq. -+ */ -+static struct bfq_queue * -+bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq) -+{ -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue"); -+ -+ put_io_context(bic->icq.ioc); -+ -+ if (bfqq_process_refs(bfqq) == 1) { -+ bfqq->pid = current->pid; -+ bfq_clear_bfqq_coop(bfqq); -+ bfq_clear_bfqq_split_coop(bfqq); -+ return bfqq; -+ } -+ -+ bic_set_bfqq(bic, NULL, 1); -+ -+ bfq_put_cooperator(bfqq); -+ -+ bfq_put_queue(bfqq); -+ return NULL; -+} -+ -+/* -+ * Allocate bfq data structures associated with this request. -+ */ -+static int bfq_set_request(struct request_queue *q, struct request *rq, -+ struct bio *bio, gfp_t gfp_mask) -+{ -+ struct bfq_data *bfqd = q->elevator->elevator_data; -+ struct bfq_io_cq *bic = icq_to_bic(rq->elv.icq); -+ const int rw = rq_data_dir(rq); -+ const int is_sync = rq_is_sync(rq); -+ struct bfq_queue *bfqq; -+ unsigned long flags; -+ bool bfqq_already_existing = false, split = false; -+ -+ spin_lock_irqsave(q->queue_lock, flags); -+ -+ if (!bic) -+ goto queue_fail; -+ -+ bfq_check_ioprio_change(bic, bio); -+ -+ bfq_bic_update_cgroup(bic, bio); -+ -+new_queue: -+ bfqq = bic_to_bfqq(bic, is_sync); -+ if (!bfqq || bfqq == &bfqd->oom_bfqq) { -+ if (bfqq) -+ bfq_put_queue(bfqq); -+ bfqq = bfq_get_queue(bfqd, bio, is_sync, bic); -+ BUG_ON(!hlist_unhashed(&bfqq->burst_list_node)); -+ -+ bic_set_bfqq(bic, bfqq, is_sync); -+ if (split && is_sync) { -+ bfq_log_bfqq(bfqd, bfqq, -+ "set_request: was_in_list %d " -+ "was_in_large_burst %d " -+ "large burst in progress %d", -+ bic->was_in_burst_list, -+ bic->saved_in_large_burst, -+ bfqd->large_burst); -+ -+ if ((bic->was_in_burst_list && bfqd->large_burst) || -+ bic->saved_in_large_burst) { -+ bfq_log_bfqq(bfqd, bfqq, -+ "set_request: marking in " -+ "large burst"); -+ bfq_mark_bfqq_in_large_burst(bfqq); -+ } else { -+ bfq_log_bfqq(bfqd, bfqq, -+ "set_request: clearing in " -+ "large burst"); -+ bfq_clear_bfqq_in_large_burst(bfqq); -+ if (bic->was_in_burst_list) -+ hlist_add_head(&bfqq->burst_list_node, -+ &bfqd->burst_list); -+ } -+ bfqq->split_time = jiffies; -+ } -+ } else { -+ /* If the queue was seeky for too long, break it apart. */ -+ if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) { -+ bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq"); -+ -+ /* Update bic before losing reference to bfqq */ -+ if (bfq_bfqq_in_large_burst(bfqq)) -+ bic->saved_in_large_burst = true; -+ -+ bfqq = bfq_split_bfqq(bic, bfqq); -+ split = true; -+ if (!bfqq) -+ goto new_queue; -+ else -+ bfqq_already_existing = true; -+ } -+ } -+ -+ bfqq->allocated[rw]++; -+ bfqq->ref++; -+ bfq_log_bfqq(bfqd, bfqq, "set_request: bfqq %p, %d", bfqq, bfqq->ref); -+ -+ rq->elv.priv[0] = bic; -+ rq->elv.priv[1] = bfqq; -+ -+ /* -+ * If a bfq_queue has only one process reference, it is owned -+ * by only one bfq_io_cq: we can set the bic field of the -+ * bfq_queue to the address of that structure. Also, if the -+ * queue has just been split, mark a flag so that the -+ * information is available to the other scheduler hooks. -+ */ -+ if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) { -+ bfqq->bic = bic; -+ if (split) { -+ /* -+ * If the queue has just been split from a shared -+ * queue, restore the idle window and the possible -+ * weight raising period. -+ */ -+ bfq_bfqq_resume_state(bfqq, bfqd, bic, -+ bfqq_already_existing); -+ } -+ } -+ -+ if (unlikely(bfq_bfqq_just_created(bfqq))) -+ bfq_handle_burst(bfqd, bfqq); -+ -+ spin_unlock_irqrestore(q->queue_lock, flags); -+ -+ return 0; -+ -+queue_fail: -+ bfq_schedule_dispatch(bfqd); -+ spin_unlock_irqrestore(q->queue_lock, flags); -+ -+ return 1; -+} -+ -+static void bfq_kick_queue(struct work_struct *work) -+{ -+ struct bfq_data *bfqd = -+ container_of(work, struct bfq_data, unplug_work); -+ struct request_queue *q = bfqd->queue; -+ -+ spin_lock_irq(q->queue_lock); -+ __blk_run_queue(q); -+ spin_unlock_irq(q->queue_lock); -+} -+ -+/* -+ * Handler of the expiration of the timer running if the in-service queue -+ * is idling inside its time slice. -+ */ -+static enum hrtimer_restart bfq_idle_slice_timer(struct hrtimer *timer) -+{ -+ struct bfq_data *bfqd = container_of(timer, struct bfq_data, -+ idle_slice_timer); -+ struct bfq_queue *bfqq; -+ unsigned long flags; -+ enum bfqq_expiration reason; -+ -+ spin_lock_irqsave(bfqd->queue->queue_lock, flags); -+ -+ bfqq = bfqd->in_service_queue; -+ /* -+ * Theoretical race here: the in-service queue can be NULL or -+ * different from the queue that was idling if the timer handler -+ * spins on the queue_lock and a new request arrives for the -+ * current queue and there is a full dispatch cycle that changes -+ * the in-service queue. This can hardly happen, but in the worst -+ * case we just expire a queue too early. -+ */ -+ if (bfqq) { -+ bfq_log_bfqq(bfqd, bfqq, "slice_timer expired"); -+ bfq_clear_bfqq_wait_request(bfqq); -+ -+ if (bfq_bfqq_budget_timeout(bfqq)) -+ /* -+ * Also here the queue can be safely expired -+ * for budget timeout without wasting -+ * guarantees -+ */ -+ reason = BFQ_BFQQ_BUDGET_TIMEOUT; -+ else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0) -+ /* -+ * The queue may not be empty upon timer expiration, -+ * because we may not disable the timer when the -+ * first request of the in-service queue arrives -+ * during disk idling. -+ */ -+ reason = BFQ_BFQQ_TOO_IDLE; -+ else -+ goto schedule_dispatch; -+ -+ bfq_bfqq_expire(bfqd, bfqq, true, reason); -+ } -+ -+schedule_dispatch: -+ bfq_schedule_dispatch(bfqd); -+ -+ spin_unlock_irqrestore(bfqd->queue->queue_lock, flags); -+ return HRTIMER_NORESTART; -+} -+ -+static void bfq_shutdown_timer_wq(struct bfq_data *bfqd) -+{ -+ hrtimer_cancel(&bfqd->idle_slice_timer); -+ cancel_work_sync(&bfqd->unplug_work); -+} -+ -+static void __bfq_put_async_bfqq(struct bfq_data *bfqd, -+ struct bfq_queue **bfqq_ptr) -+{ -+ struct bfq_group *root_group = bfqd->root_group; -+ struct bfq_queue *bfqq = *bfqq_ptr; -+ -+ bfq_log(bfqd, "put_async_bfqq: %p", bfqq); -+ if (bfqq) { -+ bfq_bfqq_move(bfqd, bfqq, root_group); -+ bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d", -+ bfqq, bfqq->ref); -+ bfq_put_queue(bfqq); -+ *bfqq_ptr = NULL; -+ } -+} -+ -+/* -+ * Release all the bfqg references to its async queues. If we are -+ * deallocating the group these queues may still contain requests, so -+ * we reparent them to the root cgroup (i.e., the only one that will -+ * exist for sure until all the requests on a device are gone). -+ */ -+static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg) -+{ -+ int i, j; -+ -+ for (i = 0; i < 2; i++) -+ for (j = 0; j < IOPRIO_BE_NR; j++) -+ __bfq_put_async_bfqq(bfqd, &bfqg->async_bfqq[i][j]); -+ -+ __bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq); -+} -+ -+static void bfq_exit_queue(struct elevator_queue *e) -+{ -+ struct bfq_data *bfqd = e->elevator_data; -+ struct request_queue *q = bfqd->queue; -+ struct bfq_queue *bfqq, *n; -+ -+ bfq_shutdown_timer_wq(bfqd); -+ -+ spin_lock_irq(q->queue_lock); -+ -+ BUG_ON(bfqd->in_service_queue); -+ list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list) -+ bfq_deactivate_bfqq(bfqd, bfqq, false, false); -+ -+ spin_unlock_irq(q->queue_lock); -+ -+ bfq_shutdown_timer_wq(bfqd); -+ -+ BUG_ON(hrtimer_active(&bfqd->idle_slice_timer)); -+ -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+ blkcg_deactivate_policy(q, &blkcg_policy_bfq); -+#else -+ bfq_put_async_queues(bfqd, bfqd->root_group); -+ kfree(bfqd->root_group); -+#endif -+ -+ kfree(bfqd); -+} -+ -+static void bfq_init_root_group(struct bfq_group *root_group, -+ struct bfq_data *bfqd) -+{ -+ int i; -+ -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+ root_group->entity.parent = NULL; -+ root_group->my_entity = NULL; -+ root_group->bfqd = bfqd; -+#endif -+ root_group->rq_pos_tree = RB_ROOT; -+ for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) -+ root_group->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT; -+ root_group->sched_data.bfq_class_idle_last_service = jiffies; -+} -+ -+static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) -+{ -+ struct bfq_data *bfqd; -+ struct elevator_queue *eq; -+ -+ eq = elevator_alloc(q, e); -+ if (!eq) -+ return -ENOMEM; -+ -+ bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node); -+ if (!bfqd) { -+ kobject_put(&eq->kobj); -+ return -ENOMEM; -+ } -+ eq->elevator_data = bfqd; -+ -+ /* -+ * Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues. -+ * Grab a permanent reference to it, so that the normal code flow -+ * will not attempt to free it. -+ */ -+ bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, NULL, 1, 0); -+ bfqd->oom_bfqq.ref++; -+ bfqd->oom_bfqq.new_ioprio = BFQ_DEFAULT_QUEUE_IOPRIO; -+ bfqd->oom_bfqq.new_ioprio_class = IOPRIO_CLASS_BE; -+ bfqd->oom_bfqq.entity.new_weight = -+ bfq_ioprio_to_weight(bfqd->oom_bfqq.new_ioprio); -+ -+ /* oom_bfqq does not participate to bursts */ -+ bfq_clear_bfqq_just_created(&bfqd->oom_bfqq); -+ /* -+ * Trigger weight initialization, according to ioprio, at the -+ * oom_bfqq's first activation. The oom_bfqq's ioprio and ioprio -+ * class won't be changed any more. -+ */ -+ bfqd->oom_bfqq.entity.prio_changed = 1; -+ -+ bfqd->queue = q; -+ -+ spin_lock_irq(q->queue_lock); -+ q->elevator = eq; -+ spin_unlock_irq(q->queue_lock); -+ -+ bfqd->root_group = bfq_create_group_hierarchy(bfqd, q->node); -+ if (!bfqd->root_group) -+ goto out_free; -+ bfq_init_root_group(bfqd->root_group, bfqd); -+ bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group); -+ -+ hrtimer_init(&bfqd->idle_slice_timer, CLOCK_MONOTONIC, -+ HRTIMER_MODE_REL); -+ bfqd->idle_slice_timer.function = bfq_idle_slice_timer; -+ -+ bfqd->queue_weights_tree = RB_ROOT; -+ bfqd->group_weights_tree = RB_ROOT; -+ -+ INIT_WORK(&bfqd->unplug_work, bfq_kick_queue); -+ -+ INIT_LIST_HEAD(&bfqd->active_list); -+ INIT_LIST_HEAD(&bfqd->idle_list); -+ INIT_HLIST_HEAD(&bfqd->burst_list); -+ -+ bfqd->hw_tag = -1; -+ -+ bfqd->bfq_max_budget = bfq_default_max_budget; -+ -+ bfqd->bfq_fifo_expire[0] = bfq_fifo_expire[0]; -+ bfqd->bfq_fifo_expire[1] = bfq_fifo_expire[1]; -+ bfqd->bfq_back_max = bfq_back_max; -+ bfqd->bfq_back_penalty = bfq_back_penalty; -+ bfqd->bfq_slice_idle = bfq_slice_idle; -+ bfqd->bfq_timeout = bfq_timeout; -+ -+ bfqd->bfq_requests_within_timer = 120; -+ -+ bfqd->bfq_large_burst_thresh = 8; -+ bfqd->bfq_burst_interval = msecs_to_jiffies(180); -+ -+ bfqd->low_latency = true; -+ -+ /* -+ * Trade-off between responsiveness and fairness. -+ */ -+ bfqd->bfq_wr_coeff = 30; -+ bfqd->bfq_wr_rt_max_time = msecs_to_jiffies(300); -+ bfqd->bfq_wr_max_time = 0; -+ bfqd->bfq_wr_min_idle_time = msecs_to_jiffies(2000); -+ bfqd->bfq_wr_min_inter_arr_async = msecs_to_jiffies(500); -+ bfqd->bfq_wr_max_softrt_rate = 7000; /* -+ * Approximate rate required -+ * to playback or record a -+ * high-definition compressed -+ * video. -+ */ -+ bfqd->wr_busy_queues = 0; -+ -+ /* -+ * Begin by assuming, optimistically, that the device is a -+ * high-speed one, and that its peak rate is equal to 2/3 of -+ * the highest reference rate. -+ */ -+ bfqd->RT_prod = R_fast[blk_queue_nonrot(bfqd->queue)] * -+ T_fast[blk_queue_nonrot(bfqd->queue)]; -+ bfqd->peak_rate = R_fast[blk_queue_nonrot(bfqd->queue)] * 2 / 3; -+ bfqd->device_speed = BFQ_BFQD_FAST; -+ -+ return 0; -+ -+out_free: -+ kfree(bfqd); -+ kobject_put(&eq->kobj); -+ return -ENOMEM; -+} -+ -+static void bfq_slab_kill(void) -+{ -+ kmem_cache_destroy(bfq_pool); -+} -+ -+static int __init bfq_slab_setup(void) -+{ -+ bfq_pool = KMEM_CACHE(bfq_queue, 0); -+ if (!bfq_pool) -+ return -ENOMEM; -+ return 0; -+} -+ -+static ssize_t bfq_var_show(unsigned int var, char *page) -+{ -+ return sprintf(page, "%u\n", var); -+} -+ -+static ssize_t bfq_var_store(unsigned long *var, const char *page, -+ size_t count) -+{ -+ unsigned long new_val; -+ int ret = kstrtoul(page, 10, &new_val); -+ -+ if (ret == 0) -+ *var = new_val; -+ -+ return count; -+} -+ -+static ssize_t bfq_wr_max_time_show(struct elevator_queue *e, char *page) -+{ -+ struct bfq_data *bfqd = e->elevator_data; -+ -+ return sprintf(page, "%d\n", bfqd->bfq_wr_max_time > 0 ? -+ jiffies_to_msecs(bfqd->bfq_wr_max_time) : -+ jiffies_to_msecs(bfq_wr_duration(bfqd))); -+} -+ -+static ssize_t bfq_weights_show(struct elevator_queue *e, char *page) -+{ -+ struct bfq_queue *bfqq; -+ struct bfq_data *bfqd = e->elevator_data; -+ ssize_t num_char = 0; -+ -+ num_char += sprintf(page + num_char, "Tot reqs queued %d\n\n", -+ bfqd->queued); -+ -+ spin_lock_irq(bfqd->queue->queue_lock); -+ -+ num_char += sprintf(page + num_char, "Active:\n"); -+ list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list) { -+ num_char += sprintf(page + num_char, -+ "pid%d: weight %hu, nr_queued %d %d, ", -+ bfqq->pid, -+ bfqq->entity.weight, -+ bfqq->queued[0], -+ bfqq->queued[1]); -+ num_char += sprintf(page + num_char, -+ "dur %d/%u\n", -+ jiffies_to_msecs( -+ jiffies - -+ bfqq->last_wr_start_finish), -+ jiffies_to_msecs(bfqq->wr_cur_max_time)); -+ } -+ -+ num_char += sprintf(page + num_char, "Idle:\n"); -+ list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list) { -+ num_char += sprintf(page + num_char, -+ "pid%d: weight %hu, dur %d/%u\n", -+ bfqq->pid, -+ bfqq->entity.weight, -+ jiffies_to_msecs(jiffies - -+ bfqq->last_wr_start_finish), -+ jiffies_to_msecs(bfqq->wr_cur_max_time)); -+ } -+ -+ spin_unlock_irq(bfqd->queue->queue_lock); -+ -+ return num_char; -+} -+ -+#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ -+static ssize_t __FUNC(struct elevator_queue *e, char *page) \ -+{ \ -+ struct bfq_data *bfqd = e->elevator_data; \ -+ u64 __data = __VAR; \ -+ if (__CONV == 1) \ -+ __data = jiffies_to_msecs(__data); \ -+ else if (__CONV == 2) \ -+ __data = div_u64(__data, NSEC_PER_MSEC); \ -+ return bfq_var_show(__data, (page)); \ -+} -+SHOW_FUNCTION(bfq_fifo_expire_sync_show, bfqd->bfq_fifo_expire[1], 2); -+SHOW_FUNCTION(bfq_fifo_expire_async_show, bfqd->bfq_fifo_expire[0], 2); -+SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0); -+SHOW_FUNCTION(bfq_back_seek_penalty_show, bfqd->bfq_back_penalty, 0); -+SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 2); -+SHOW_FUNCTION(bfq_max_budget_show, bfqd->bfq_user_max_budget, 0); -+SHOW_FUNCTION(bfq_timeout_sync_show, bfqd->bfq_timeout, 1); -+SHOW_FUNCTION(bfq_strict_guarantees_show, bfqd->strict_guarantees, 0); -+SHOW_FUNCTION(bfq_low_latency_show, bfqd->low_latency, 0); -+SHOW_FUNCTION(bfq_wr_coeff_show, bfqd->bfq_wr_coeff, 0); -+SHOW_FUNCTION(bfq_wr_rt_max_time_show, bfqd->bfq_wr_rt_max_time, 1); -+SHOW_FUNCTION(bfq_wr_min_idle_time_show, bfqd->bfq_wr_min_idle_time, 1); -+SHOW_FUNCTION(bfq_wr_min_inter_arr_async_show, bfqd->bfq_wr_min_inter_arr_async, -+ 1); -+SHOW_FUNCTION(bfq_wr_max_softrt_rate_show, bfqd->bfq_wr_max_softrt_rate, 0); -+#undef SHOW_FUNCTION -+ -+#define USEC_SHOW_FUNCTION(__FUNC, __VAR) \ -+static ssize_t __FUNC(struct elevator_queue *e, char *page) \ -+{ \ -+ struct bfq_data *bfqd = e->elevator_data; \ -+ u64 __data = __VAR; \ -+ __data = div_u64(__data, NSEC_PER_USEC); \ -+ return bfq_var_show(__data, (page)); \ -+} -+USEC_SHOW_FUNCTION(bfq_slice_idle_us_show, bfqd->bfq_slice_idle); -+#undef USEC_SHOW_FUNCTION -+ -+#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ -+static ssize_t \ -+__FUNC(struct elevator_queue *e, const char *page, size_t count) \ -+{ \ -+ struct bfq_data *bfqd = e->elevator_data; \ -+ unsigned long uninitialized_var(__data); \ -+ int ret = bfq_var_store(&__data, (page), count); \ -+ if (__data < (MIN)) \ -+ __data = (MIN); \ -+ else if (__data > (MAX)) \ -+ __data = (MAX); \ -+ if (__CONV == 1) \ -+ *(__PTR) = msecs_to_jiffies(__data); \ -+ else if (__CONV == 2) \ -+ *(__PTR) = (u64)__data * NSEC_PER_MSEC; \ -+ else \ -+ *(__PTR) = __data; \ -+ return ret; \ -+} -+STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1, -+ INT_MAX, 2); -+STORE_FUNCTION(bfq_fifo_expire_async_store, &bfqd->bfq_fifo_expire[0], 1, -+ INT_MAX, 2); -+STORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0); -+STORE_FUNCTION(bfq_back_seek_penalty_store, &bfqd->bfq_back_penalty, 1, -+ INT_MAX, 0); -+STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 2); -+STORE_FUNCTION(bfq_wr_coeff_store, &bfqd->bfq_wr_coeff, 1, INT_MAX, 0); -+STORE_FUNCTION(bfq_wr_max_time_store, &bfqd->bfq_wr_max_time, 0, INT_MAX, 1); -+STORE_FUNCTION(bfq_wr_rt_max_time_store, &bfqd->bfq_wr_rt_max_time, 0, INT_MAX, -+ 1); -+STORE_FUNCTION(bfq_wr_min_idle_time_store, &bfqd->bfq_wr_min_idle_time, 0, -+ INT_MAX, 1); -+STORE_FUNCTION(bfq_wr_min_inter_arr_async_store, -+ &bfqd->bfq_wr_min_inter_arr_async, 0, INT_MAX, 1); -+STORE_FUNCTION(bfq_wr_max_softrt_rate_store, &bfqd->bfq_wr_max_softrt_rate, 0, -+ INT_MAX, 0); -+#undef STORE_FUNCTION -+ -+#define USEC_STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \ -+static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)\ -+{ \ -+ struct bfq_data *bfqd = e->elevator_data; \ -+ unsigned long uninitialized_var(__data); \ -+ int ret = bfq_var_store(&__data, (page), count); \ -+ if (__data < (MIN)) \ -+ __data = (MIN); \ -+ else if (__data > (MAX)) \ -+ __data = (MAX); \ -+ *(__PTR) = (u64)__data * NSEC_PER_USEC; \ -+ return ret; \ -+} -+USEC_STORE_FUNCTION(bfq_slice_idle_us_store, &bfqd->bfq_slice_idle, 0, -+ UINT_MAX); -+#undef USEC_STORE_FUNCTION -+ -+/* do nothing for the moment */ -+static ssize_t bfq_weights_store(struct elevator_queue *e, -+ const char *page, size_t count) -+{ -+ return count; -+} -+ -+static ssize_t bfq_max_budget_store(struct elevator_queue *e, -+ const char *page, size_t count) -+{ -+ struct bfq_data *bfqd = e->elevator_data; -+ unsigned long uninitialized_var(__data); -+ int ret = bfq_var_store(&__data, (page), count); -+ -+ if (__data == 0) -+ bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd); -+ else { -+ if (__data > INT_MAX) -+ __data = INT_MAX; -+ bfqd->bfq_max_budget = __data; -+ } -+ -+ bfqd->bfq_user_max_budget = __data; -+ -+ return ret; -+} -+ -+/* -+ * Leaving this name to preserve name compatibility with cfq -+ * parameters, but this timeout is used for both sync and async. -+ */ -+static ssize_t bfq_timeout_sync_store(struct elevator_queue *e, -+ const char *page, size_t count) -+{ -+ struct bfq_data *bfqd = e->elevator_data; -+ unsigned long uninitialized_var(__data); -+ int ret = bfq_var_store(&__data, (page), count); -+ -+ if (__data < 1) -+ __data = 1; -+ else if (__data > INT_MAX) -+ __data = INT_MAX; -+ -+ bfqd->bfq_timeout = msecs_to_jiffies(__data); -+ if (bfqd->bfq_user_max_budget == 0) -+ bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd); -+ -+ return ret; -+} -+ -+static ssize_t bfq_strict_guarantees_store(struct elevator_queue *e, -+ const char *page, size_t count) -+{ -+ struct bfq_data *bfqd = e->elevator_data; -+ unsigned long uninitialized_var(__data); -+ int ret = bfq_var_store(&__data, (page), count); -+ -+ if (__data > 1) -+ __data = 1; -+ if (!bfqd->strict_guarantees && __data == 1 -+ && bfqd->bfq_slice_idle < 8 * NSEC_PER_MSEC) -+ bfqd->bfq_slice_idle = 8 * NSEC_PER_MSEC; -+ -+ bfqd->strict_guarantees = __data; -+ -+ return ret; -+} -+ -+static ssize_t bfq_low_latency_store(struct elevator_queue *e, -+ const char *page, size_t count) -+{ -+ struct bfq_data *bfqd = e->elevator_data; -+ unsigned long uninitialized_var(__data); -+ int ret = bfq_var_store(&__data, (page), count); -+ -+ if (__data > 1) -+ __data = 1; -+ if (__data == 0 && bfqd->low_latency != 0) -+ bfq_end_wr(bfqd); -+ bfqd->low_latency = __data; -+ -+ return ret; -+} -+ -+#define BFQ_ATTR(name) \ -+ __ATTR(name, S_IRUGO|S_IWUSR, bfq_##name##_show, bfq_##name##_store) -+ -+static struct elv_fs_entry bfq_attrs[] = { -+ BFQ_ATTR(fifo_expire_sync), -+ BFQ_ATTR(fifo_expire_async), -+ BFQ_ATTR(back_seek_max), -+ BFQ_ATTR(back_seek_penalty), -+ BFQ_ATTR(slice_idle), -+ BFQ_ATTR(slice_idle_us), -+ BFQ_ATTR(max_budget), -+ BFQ_ATTR(timeout_sync), -+ BFQ_ATTR(strict_guarantees), -+ BFQ_ATTR(low_latency), -+ BFQ_ATTR(wr_coeff), -+ BFQ_ATTR(wr_max_time), -+ BFQ_ATTR(wr_rt_max_time), -+ BFQ_ATTR(wr_min_idle_time), -+ BFQ_ATTR(wr_min_inter_arr_async), -+ BFQ_ATTR(wr_max_softrt_rate), -+ BFQ_ATTR(weights), -+ __ATTR_NULL -+}; -+ -+static struct elevator_type iosched_bfq = { -+ .ops.sq = { -+ .elevator_merge_fn = bfq_merge, -+ .elevator_merged_fn = bfq_merged_request, -+ .elevator_merge_req_fn = bfq_merged_requests, -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+ .elevator_bio_merged_fn = bfq_bio_merged, -+#endif -+ .elevator_allow_bio_merge_fn = bfq_allow_bio_merge, -+ .elevator_allow_rq_merge_fn = bfq_allow_rq_merge, -+ .elevator_dispatch_fn = bfq_dispatch_requests, -+ .elevator_add_req_fn = bfq_insert_request, -+ .elevator_activate_req_fn = bfq_activate_request, -+ .elevator_deactivate_req_fn = bfq_deactivate_request, -+ .elevator_completed_req_fn = bfq_completed_request, -+ .elevator_former_req_fn = elv_rb_former_request, -+ .elevator_latter_req_fn = elv_rb_latter_request, -+ .elevator_init_icq_fn = bfq_init_icq, -+ .elevator_exit_icq_fn = bfq_exit_icq, -+ .elevator_set_req_fn = bfq_set_request, -+ .elevator_put_req_fn = bfq_put_request, -+ .elevator_may_queue_fn = bfq_may_queue, -+ .elevator_init_fn = bfq_init_queue, -+ .elevator_exit_fn = bfq_exit_queue, -+ }, -+ .icq_size = sizeof(struct bfq_io_cq), -+ .icq_align = __alignof__(struct bfq_io_cq), -+ .elevator_attrs = bfq_attrs, -+ .elevator_name = "bfq-sq", -+ .elevator_owner = THIS_MODULE, -+}; -+ -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+static struct blkcg_policy blkcg_policy_bfq = { -+ .dfl_cftypes = bfq_blkg_files, -+ .legacy_cftypes = bfq_blkcg_legacy_files, -+ -+ .cpd_alloc_fn = bfq_cpd_alloc, -+ .cpd_init_fn = bfq_cpd_init, -+ .cpd_bind_fn = bfq_cpd_init, -+ .cpd_free_fn = bfq_cpd_free, -+ -+ .pd_alloc_fn = bfq_pd_alloc, -+ .pd_init_fn = bfq_pd_init, -+ .pd_offline_fn = bfq_pd_offline, -+ .pd_free_fn = bfq_pd_free, -+ .pd_reset_stats_fn = bfq_pd_reset_stats, -+}; -+#endif -+ -+static int __init bfq_init(void) -+{ -+ int ret; -+ char msg[60] = "BFQ I/O-scheduler: v8r12"; -+ -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+ ret = blkcg_policy_register(&blkcg_policy_bfq); -+ if (ret) -+ return ret; -+#endif -+ -+ ret = -ENOMEM; -+ if (bfq_slab_setup()) -+ goto err_pol_unreg; -+ -+ /* -+ * Times to load large popular applications for the typical -+ * systems installed on the reference devices (see the -+ * comments before the definitions of the next two -+ * arrays). Actually, we use slightly slower values, as the -+ * estimated peak rate tends to be smaller than the actual -+ * peak rate. The reason for this last fact is that estimates -+ * are computed over much shorter time intervals than the long -+ * intervals typically used for benchmarking. Why? First, to -+ * adapt more quickly to variations. Second, because an I/O -+ * scheduler cannot rely on a peak-rate-evaluation workload to -+ * be run for a long time. -+ */ -+ T_slow[0] = msecs_to_jiffies(3500); /* actually 4 sec */ -+ T_slow[1] = msecs_to_jiffies(6000); /* actually 6.5 sec */ -+ T_fast[0] = msecs_to_jiffies(7000); /* actually 8 sec */ -+ T_fast[1] = msecs_to_jiffies(2500); /* actually 3 sec */ -+ -+ /* -+ * Thresholds that determine the switch between speed classes -+ * (see the comments before the definition of the array -+ * device_speed_thresh). These thresholds are biased towards -+ * transitions to the fast class. This is safer than the -+ * opposite bias. In fact, a wrong transition to the slow -+ * class results in short weight-raising periods, because the -+ * speed of the device then tends to be higher that the -+ * reference peak rate. On the opposite end, a wrong -+ * transition to the fast class tends to increase -+ * weight-raising periods, because of the opposite reason. -+ */ -+ device_speed_thresh[0] = (4 * R_slow[0]) / 3; -+ device_speed_thresh[1] = (4 * R_slow[1]) / 3; -+ -+ ret = elv_register(&iosched_bfq); -+ if (ret) -+ goto err_pol_unreg; -+ -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+ strcat(msg, " (with cgroups support)"); -+#endif -+ pr_info("%s", msg); -+ -+ return 0; -+ -+err_pol_unreg: -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+ blkcg_policy_unregister(&blkcg_policy_bfq); -+#endif -+ return ret; -+} -+ -+static void __exit bfq_exit(void) -+{ -+ elv_unregister(&iosched_bfq); -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+ blkcg_policy_unregister(&blkcg_policy_bfq); -+#endif -+ bfq_slab_kill(); -+} -+ -+module_init(bfq_init); -+module_exit(bfq_exit); -+ -+MODULE_AUTHOR("Arianna Avanzini, Fabio Checconi, Paolo Valente"); -+MODULE_LICENSE("GPL"); -diff --git a/block/bfq.h b/block/bfq.h -new file mode 100644 -index 000000000000..f5751ea59d98 ---- /dev/null -+++ b/block/bfq.h -@@ -0,0 +1,948 @@ -+/* -+ * BFQ v8r12 for 4.11.0: data structures and common functions prototypes. -+ * -+ * Based on ideas and code from CFQ: -+ * Copyright (C) 2003 Jens Axboe -+ * -+ * Copyright (C) 2008 Fabio Checconi -+ * Paolo Valente -+ * -+ * Copyright (C) 2015 Paolo Valente -+ * -+ * Copyright (C) 2017 Paolo Valente -+ */ -+ -+#ifndef _BFQ_H -+#define _BFQ_H -+ -+#include -+#include -+#include -+ -+#define BFQ_IOPRIO_CLASSES 3 -+#define BFQ_CL_IDLE_TIMEOUT (HZ/5) -+ -+#define BFQ_MIN_WEIGHT 1 -+#define BFQ_MAX_WEIGHT 1000 -+#define BFQ_WEIGHT_CONVERSION_COEFF 10 -+ -+#define BFQ_DEFAULT_QUEUE_IOPRIO 4 -+ -+#define BFQ_WEIGHT_LEGACY_DFL 100 -+#define BFQ_DEFAULT_GRP_IOPRIO 0 -+#define BFQ_DEFAULT_GRP_CLASS IOPRIO_CLASS_BE -+ -+/* -+ * Soft real-time applications are extremely more latency sensitive -+ * than interactive ones. Over-raise the weight of the former to -+ * privilege them against the latter. -+ */ -+#define BFQ_SOFTRT_WEIGHT_FACTOR 100 -+ -+struct bfq_entity; -+ -+/** -+ * struct bfq_service_tree - per ioprio_class service tree. -+ * -+ * Each service tree represents a B-WF2Q+ scheduler on its own. Each -+ * ioprio_class has its own independent scheduler, and so its own -+ * bfq_service_tree. All the fields are protected by the queue lock -+ * of the containing bfqd. -+ */ -+struct bfq_service_tree { -+ /* tree for active entities (i.e., those backlogged) */ -+ struct rb_root active; -+ /* tree for idle entities (i.e., not backlogged, with V <= F_i)*/ -+ struct rb_root idle; -+ -+ struct bfq_entity *first_idle; /* idle entity with minimum F_i */ -+ struct bfq_entity *last_idle; /* idle entity with maximum F_i */ -+ -+ u64 vtime; /* scheduler virtual time */ -+ /* scheduler weight sum; active and idle entities contribute to it */ -+ unsigned long wsum; -+}; -+ -+/** -+ * struct bfq_sched_data - multi-class scheduler. -+ * -+ * bfq_sched_data is the basic scheduler queue. It supports three -+ * ioprio_classes, and can be used either as a toplevel queue or as an -+ * intermediate queue on a hierarchical setup. @next_in_service -+ * points to the active entity of the sched_data service trees that -+ * will be scheduled next. It is used to reduce the number of steps -+ * needed for each hierarchical-schedule update. -+ * -+ * The supported ioprio_classes are the same as in CFQ, in descending -+ * priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE. -+ * Requests from higher priority queues are served before all the -+ * requests from lower priority queues; among requests of the same -+ * queue requests are served according to B-WF2Q+. -+ * All the fields are protected by the queue lock of the containing bfqd. -+ */ -+struct bfq_sched_data { -+ struct bfq_entity *in_service_entity; /* entity in service */ -+ /* head-of-the-line entity in the scheduler (see comments above) */ -+ struct bfq_entity *next_in_service; -+ /* array of service trees, one per ioprio_class */ -+ struct bfq_service_tree service_tree[BFQ_IOPRIO_CLASSES]; -+ /* last time CLASS_IDLE was served */ -+ unsigned long bfq_class_idle_last_service; -+ -+}; -+ -+/** -+ * struct bfq_weight_counter - counter of the number of all active entities -+ * with a given weight. -+ */ -+struct bfq_weight_counter { -+ unsigned int weight; /* weight of the entities this counter refers to */ -+ unsigned int num_active; /* nr of active entities with this weight */ -+ /* -+ * Weights tree member (see bfq_data's @queue_weights_tree and -+ * @group_weights_tree) -+ */ -+ struct rb_node weights_node; -+}; -+ -+/** -+ * struct bfq_entity - schedulable entity. -+ * -+ * A bfq_entity is used to represent either a bfq_queue (leaf node in the -+ * cgroup hierarchy) or a bfq_group into the upper level scheduler. Each -+ * entity belongs to the sched_data of the parent group in the cgroup -+ * hierarchy. Non-leaf entities have also their own sched_data, stored -+ * in @my_sched_data. -+ * -+ * Each entity stores independently its priority values; this would -+ * allow different weights on different devices, but this -+ * functionality is not exported to userspace by now. Priorities and -+ * weights are updated lazily, first storing the new values into the -+ * new_* fields, then setting the @prio_changed flag. As soon as -+ * there is a transition in the entity state that allows the priority -+ * update to take place the effective and the requested priority -+ * values are synchronized. -+ * -+ * Unless cgroups are used, the weight value is calculated from the -+ * ioprio to export the same interface as CFQ. When dealing with -+ * ``well-behaved'' queues (i.e., queues that do not spend too much -+ * time to consume their budget and have true sequential behavior, and -+ * when there are no external factors breaking anticipation) the -+ * relative weights at each level of the cgroups hierarchy should be -+ * guaranteed. All the fields are protected by the queue lock of the -+ * containing bfqd. -+ */ -+struct bfq_entity { -+ struct rb_node rb_node; /* service_tree member */ -+ /* pointer to the weight counter associated with this entity */ -+ struct bfq_weight_counter *weight_counter; -+ -+ /* -+ * Flag, true if the entity is on a tree (either the active or -+ * the idle one of its service_tree) or is in service. -+ */ -+ bool on_st; -+ -+ u64 finish; /* B-WF2Q+ finish timestamp (aka F_i) */ -+ u64 start; /* B-WF2Q+ start timestamp (aka S_i) */ -+ -+ /* tree the entity is enqueued into; %NULL if not on a tree */ -+ struct rb_root *tree; -+ -+ /* -+ * minimum start time of the (active) subtree rooted at this -+ * entity; used for O(log N) lookups into active trees -+ */ -+ u64 min_start; -+ -+ /* amount of service received during the last service slot */ -+ int service; -+ -+ /* budget, used also to calculate F_i: F_i = S_i + @budget / @weight */ -+ int budget; -+ -+ unsigned int weight; /* weight of the queue */ -+ unsigned int new_weight; /* next weight if a change is in progress */ -+ -+ /* original weight, used to implement weight boosting */ -+ unsigned int orig_weight; -+ -+ /* parent entity, for hierarchical scheduling */ -+ struct bfq_entity *parent; -+ -+ /* -+ * For non-leaf nodes in the hierarchy, the associated -+ * scheduler queue, %NULL on leaf nodes. -+ */ -+ struct bfq_sched_data *my_sched_data; -+ /* the scheduler queue this entity belongs to */ -+ struct bfq_sched_data *sched_data; -+ -+ /* flag, set to request a weight, ioprio or ioprio_class change */ -+ int prio_changed; -+}; -+ -+struct bfq_group; -+ -+/** -+ * struct bfq_queue - leaf schedulable entity. -+ * -+ * A bfq_queue is a leaf request queue; it can be associated with an -+ * io_context or more, if it is async or shared between cooperating -+ * processes. @cgroup holds a reference to the cgroup, to be sure that it -+ * does not disappear while a bfqq still references it (mostly to avoid -+ * races between request issuing and task migration followed by cgroup -+ * destruction). -+ * All the fields are protected by the queue lock of the containing bfqd. -+ */ -+struct bfq_queue { -+ /* reference counter */ -+ int ref; -+ /* parent bfq_data */ -+ struct bfq_data *bfqd; -+ -+ /* current ioprio and ioprio class */ -+ unsigned short ioprio, ioprio_class; -+ /* next ioprio and ioprio class if a change is in progress */ -+ unsigned short new_ioprio, new_ioprio_class; -+ -+ /* -+ * Shared bfq_queue if queue is cooperating with one or more -+ * other queues. -+ */ -+ struct bfq_queue *new_bfqq; -+ /* request-position tree member (see bfq_group's @rq_pos_tree) */ -+ struct rb_node pos_node; -+ /* request-position tree root (see bfq_group's @rq_pos_tree) */ -+ struct rb_root *pos_root; -+ -+ /* sorted list of pending requests */ -+ struct rb_root sort_list; -+ /* if fifo isn't expired, next request to serve */ -+ struct request *next_rq; -+ /* number of sync and async requests queued */ -+ int queued[2]; -+ /* number of sync and async requests currently allocated */ -+ int allocated[2]; -+ /* number of pending metadata requests */ -+ int meta_pending; -+ /* fifo list of requests in sort_list */ -+ struct list_head fifo; -+ -+ /* entity representing this queue in the scheduler */ -+ struct bfq_entity entity; -+ -+ /* maximum budget allowed from the feedback mechanism */ -+ int max_budget; -+ /* budget expiration (in jiffies) */ -+ unsigned long budget_timeout; -+ -+ /* number of requests on the dispatch list or inside driver */ -+ int dispatched; -+ -+ unsigned int flags; /* status flags.*/ -+ -+ /* node for active/idle bfqq list inside parent bfqd */ -+ struct list_head bfqq_list; -+ -+ /* bit vector: a 1 for each seeky requests in history */ -+ u32 seek_history; -+ -+ /* node for the device's burst list */ -+ struct hlist_node burst_list_node; -+ -+ /* position of the last request enqueued */ -+ sector_t last_request_pos; -+ -+ /* Number of consecutive pairs of request completion and -+ * arrival, such that the queue becomes idle after the -+ * completion, but the next request arrives within an idle -+ * time slice; used only if the queue's IO_bound flag has been -+ * cleared. -+ */ -+ unsigned int requests_within_timer; -+ -+ /* pid of the process owning the queue, used for logging purposes */ -+ pid_t pid; -+ -+ /* -+ * Pointer to the bfq_io_cq owning the bfq_queue, set to %NULL -+ * if the queue is shared. -+ */ -+ struct bfq_io_cq *bic; -+ -+ /* current maximum weight-raising time for this queue */ -+ unsigned long wr_cur_max_time; -+ /* -+ * Minimum time instant such that, only if a new request is -+ * enqueued after this time instant in an idle @bfq_queue with -+ * no outstanding requests, then the task associated with the -+ * queue it is deemed as soft real-time (see the comments on -+ * the function bfq_bfqq_softrt_next_start()) -+ */ -+ unsigned long soft_rt_next_start; -+ /* -+ * Start time of the current weight-raising period if -+ * the @bfq-queue is being weight-raised, otherwise -+ * finish time of the last weight-raising period. -+ */ -+ unsigned long last_wr_start_finish; -+ /* factor by which the weight of this queue is multiplied */ -+ unsigned int wr_coeff; -+ /* -+ * Time of the last transition of the @bfq_queue from idle to -+ * backlogged. -+ */ -+ unsigned long last_idle_bklogged; -+ /* -+ * Cumulative service received from the @bfq_queue since the -+ * last transition from idle to backlogged. -+ */ -+ unsigned long service_from_backlogged; -+ /* -+ * Value of wr start time when switching to soft rt -+ */ -+ unsigned long wr_start_at_switch_to_srt; -+ -+ unsigned long split_time; /* time of last split */ -+}; -+ -+/** -+ * struct bfq_ttime - per process thinktime stats. -+ */ -+struct bfq_ttime { -+ u64 last_end_request; /* completion time of last request */ -+ -+ u64 ttime_total; /* total process thinktime */ -+ unsigned long ttime_samples; /* number of thinktime samples */ -+ u64 ttime_mean; /* average process thinktime */ -+ -+}; -+ -+/** -+ * struct bfq_io_cq - per (request_queue, io_context) structure. -+ */ -+struct bfq_io_cq { -+ /* associated io_cq structure */ -+ struct io_cq icq; /* must be the first member */ -+ /* array of two process queues, the sync and the async */ -+ struct bfq_queue *bfqq[2]; -+ /* associated @bfq_ttime struct */ -+ struct bfq_ttime ttime; -+ /* per (request_queue, blkcg) ioprio */ -+ int ioprio; -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+ uint64_t blkcg_serial_nr; /* the current blkcg serial */ -+#endif -+ -+ /* -+ * Snapshot of the idle window before merging; taken to -+ * remember this value while the queue is merged, so as to be -+ * able to restore it in case of split. -+ */ -+ bool saved_idle_window; -+ /* -+ * Same purpose as the previous two fields for the I/O bound -+ * classification of a queue. -+ */ -+ bool saved_IO_bound; -+ -+ /* -+ * Same purpose as the previous fields for the value of the -+ * field keeping the queue's belonging to a large burst -+ */ -+ bool saved_in_large_burst; -+ /* -+ * True if the queue belonged to a burst list before its merge -+ * with another cooperating queue. -+ */ -+ bool was_in_burst_list; -+ -+ /* -+ * Similar to previous fields: save wr information. -+ */ -+ unsigned long saved_wr_coeff; -+ unsigned long saved_last_wr_start_finish; -+ unsigned long saved_wr_start_at_switch_to_srt; -+ unsigned int saved_wr_cur_max_time; -+}; -+ -+enum bfq_device_speed { -+ BFQ_BFQD_FAST, -+ BFQ_BFQD_SLOW, -+}; -+ -+/** -+ * struct bfq_data - per-device data structure. -+ * -+ * All the fields are protected by the @queue lock. -+ */ -+struct bfq_data { -+ /* request queue for the device */ -+ struct request_queue *queue; -+ -+ /* root bfq_group for the device */ -+ struct bfq_group *root_group; -+ -+ /* -+ * rbtree of weight counters of @bfq_queues, sorted by -+ * weight. Used to keep track of whether all @bfq_queues have -+ * the same weight. The tree contains one counter for each -+ * distinct weight associated to some active and not -+ * weight-raised @bfq_queue (see the comments to the functions -+ * bfq_weights_tree_[add|remove] for further details). -+ */ -+ struct rb_root queue_weights_tree; -+ /* -+ * rbtree of non-queue @bfq_entity weight counters, sorted by -+ * weight. Used to keep track of whether all @bfq_groups have -+ * the same weight. The tree contains one counter for each -+ * distinct weight associated to some active @bfq_group (see -+ * the comments to the functions bfq_weights_tree_[add|remove] -+ * for further details). -+ */ -+ struct rb_root group_weights_tree; -+ -+ /* -+ * Number of bfq_queues containing requests (including the -+ * queue in service, even if it is idling). -+ */ -+ int busy_queues; -+ /* number of weight-raised busy @bfq_queues */ -+ int wr_busy_queues; -+ /* number of queued requests */ -+ int queued; -+ /* number of requests dispatched and waiting for completion */ -+ int rq_in_driver; -+ -+ /* -+ * Maximum number of requests in driver in the last -+ * @hw_tag_samples completed requests. -+ */ -+ int max_rq_in_driver; -+ /* number of samples used to calculate hw_tag */ -+ int hw_tag_samples; -+ /* flag set to one if the driver is showing a queueing behavior */ -+ int hw_tag; -+ -+ /* number of budgets assigned */ -+ int budgets_assigned; -+ -+ /* -+ * Timer set when idling (waiting) for the next request from -+ * the queue in service. -+ */ -+ struct hrtimer idle_slice_timer; -+ /* delayed work to restart dispatching on the request queue */ -+ struct work_struct unplug_work; -+ -+ /* bfq_queue in service */ -+ struct bfq_queue *in_service_queue; -+ /* bfq_io_cq (bic) associated with the @in_service_queue */ -+ struct bfq_io_cq *in_service_bic; -+ -+ /* on-disk position of the last served request */ -+ sector_t last_position; -+ -+ /* time of last request completion (ns) */ -+ u64 last_completion; -+ -+ /* time of first rq dispatch in current observation interval (ns) */ -+ u64 first_dispatch; -+ /* time of last rq dispatch in current observation interval (ns) */ -+ u64 last_dispatch; -+ -+ /* beginning of the last budget */ -+ ktime_t last_budget_start; -+ /* beginning of the last idle slice */ -+ ktime_t last_idling_start; -+ -+ /* number of samples in current observation interval */ -+ int peak_rate_samples; -+ /* num of samples of seq dispatches in current observation interval */ -+ u32 sequential_samples; -+ /* total num of sectors transferred in current observation interval */ -+ u64 tot_sectors_dispatched; -+ /* max rq size seen during current observation interval (sectors) */ -+ u32 last_rq_max_size; -+ /* time elapsed from first dispatch in current observ. interval (us) */ -+ u64 delta_from_first; -+ /* current estimate of device peak rate */ -+ u32 peak_rate; -+ -+ /* maximum budget allotted to a bfq_queue before rescheduling */ -+ int bfq_max_budget; -+ -+ /* list of all the bfq_queues active on the device */ -+ struct list_head active_list; -+ /* list of all the bfq_queues idle on the device */ -+ struct list_head idle_list; -+ -+ /* -+ * Timeout for async/sync requests; when it fires, requests -+ * are served in fifo order. -+ */ -+ u64 bfq_fifo_expire[2]; -+ /* weight of backward seeks wrt forward ones */ -+ unsigned int bfq_back_penalty; -+ /* maximum allowed backward seek */ -+ unsigned int bfq_back_max; -+ /* maximum idling time */ -+ u32 bfq_slice_idle; -+ -+ /* user-configured max budget value (0 for auto-tuning) */ -+ int bfq_user_max_budget; -+ /* -+ * Timeout for bfq_queues to consume their budget; used to -+ * prevent seeky queues from imposing long latencies to -+ * sequential or quasi-sequential ones (this also implies that -+ * seeky queues cannot receive guarantees in the service -+ * domain; after a timeout they are charged for the time they -+ * have been in service, to preserve fairness among them, but -+ * without service-domain guarantees). -+ */ -+ unsigned int bfq_timeout; -+ -+ /* -+ * Number of consecutive requests that must be issued within -+ * the idle time slice to set again idling to a queue which -+ * was marked as non-I/O-bound (see the definition of the -+ * IO_bound flag for further details). -+ */ -+ unsigned int bfq_requests_within_timer; -+ -+ /* -+ * Force device idling whenever needed to provide accurate -+ * service guarantees, without caring about throughput -+ * issues. CAVEAT: this may even increase latencies, in case -+ * of useless idling for processes that did stop doing I/O. -+ */ -+ bool strict_guarantees; -+ -+ /* -+ * Last time at which a queue entered the current burst of -+ * queues being activated shortly after each other; for more -+ * details about this and the following parameters related to -+ * a burst of activations, see the comments on the function -+ * bfq_handle_burst. -+ */ -+ unsigned long last_ins_in_burst; -+ /* -+ * Reference time interval used to decide whether a queue has -+ * been activated shortly after @last_ins_in_burst. -+ */ -+ unsigned long bfq_burst_interval; -+ /* number of queues in the current burst of queue activations */ -+ int burst_size; -+ -+ /* common parent entity for the queues in the burst */ -+ struct bfq_entity *burst_parent_entity; -+ /* Maximum burst size above which the current queue-activation -+ * burst is deemed as 'large'. -+ */ -+ unsigned long bfq_large_burst_thresh; -+ /* true if a large queue-activation burst is in progress */ -+ bool large_burst; -+ /* -+ * Head of the burst list (as for the above fields, more -+ * details in the comments on the function bfq_handle_burst). -+ */ -+ struct hlist_head burst_list; -+ -+ /* if set to true, low-latency heuristics are enabled */ -+ bool low_latency; -+ /* -+ * Maximum factor by which the weight of a weight-raised queue -+ * is multiplied. -+ */ -+ unsigned int bfq_wr_coeff; -+ /* maximum duration of a weight-raising period (jiffies) */ -+ unsigned int bfq_wr_max_time; -+ -+ /* Maximum weight-raising duration for soft real-time processes */ -+ unsigned int bfq_wr_rt_max_time; -+ /* -+ * Minimum idle period after which weight-raising may be -+ * reactivated for a queue (in jiffies). -+ */ -+ unsigned int bfq_wr_min_idle_time; -+ /* -+ * Minimum period between request arrivals after which -+ * weight-raising may be reactivated for an already busy async -+ * queue (in jiffies). -+ */ -+ unsigned long bfq_wr_min_inter_arr_async; -+ -+ /* Max service-rate for a soft real-time queue, in sectors/sec */ -+ unsigned int bfq_wr_max_softrt_rate; -+ /* -+ * Cached value of the product R*T, used for computing the -+ * maximum duration of weight raising automatically. -+ */ -+ u64 RT_prod; -+ /* device-speed class for the low-latency heuristic */ -+ enum bfq_device_speed device_speed; -+ -+ /* fallback dummy bfqq for extreme OOM conditions */ -+ struct bfq_queue oom_bfqq; -+}; -+ -+enum bfqq_state_flags { -+ BFQ_BFQQ_FLAG_just_created = 0, /* queue just allocated */ -+ BFQ_BFQQ_FLAG_busy, /* has requests or is in service */ -+ BFQ_BFQQ_FLAG_wait_request, /* waiting for a request */ -+ BFQ_BFQQ_FLAG_non_blocking_wait_rq, /* -+ * waiting for a request -+ * without idling the device -+ */ -+ BFQ_BFQQ_FLAG_must_alloc, /* must be allowed rq alloc */ -+ BFQ_BFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */ -+ BFQ_BFQQ_FLAG_idle_window, /* slice idling enabled */ -+ BFQ_BFQQ_FLAG_sync, /* synchronous queue */ -+ BFQ_BFQQ_FLAG_IO_bound, /* -+ * bfqq has timed-out at least once -+ * having consumed at most 2/10 of -+ * its budget -+ */ -+ BFQ_BFQQ_FLAG_in_large_burst, /* -+ * bfqq activated in a large burst, -+ * see comments to bfq_handle_burst. -+ */ -+ BFQ_BFQQ_FLAG_softrt_update, /* -+ * may need softrt-next-start -+ * update -+ */ -+ BFQ_BFQQ_FLAG_coop, /* bfqq is shared */ -+ BFQ_BFQQ_FLAG_split_coop /* shared bfqq will be split */ -+}; -+ -+#define BFQ_BFQQ_FNS(name) \ -+static void bfq_mark_bfqq_##name(struct bfq_queue *bfqq) \ -+{ \ -+ (bfqq)->flags |= (1 << BFQ_BFQQ_FLAG_##name); \ -+} \ -+static void bfq_clear_bfqq_##name(struct bfq_queue *bfqq) \ -+{ \ -+ (bfqq)->flags &= ~(1 << BFQ_BFQQ_FLAG_##name); \ -+} \ -+static int bfq_bfqq_##name(const struct bfq_queue *bfqq) \ -+{ \ -+ return ((bfqq)->flags & (1 << BFQ_BFQQ_FLAG_##name)) != 0; \ -+} -+ -+BFQ_BFQQ_FNS(just_created); -+BFQ_BFQQ_FNS(busy); -+BFQ_BFQQ_FNS(wait_request); -+BFQ_BFQQ_FNS(non_blocking_wait_rq); -+BFQ_BFQQ_FNS(must_alloc); -+BFQ_BFQQ_FNS(fifo_expire); -+BFQ_BFQQ_FNS(idle_window); -+BFQ_BFQQ_FNS(sync); -+BFQ_BFQQ_FNS(IO_bound); -+BFQ_BFQQ_FNS(in_large_burst); -+BFQ_BFQQ_FNS(coop); -+BFQ_BFQQ_FNS(split_coop); -+BFQ_BFQQ_FNS(softrt_update); -+#undef BFQ_BFQQ_FNS -+ -+/* Logging facilities. */ -+#ifdef CONFIG_BFQ_REDIRECT_TO_CONSOLE -+ -+static const char *checked_dev_name(const struct device *dev) -+{ -+ static const char nodev[] = "nodev"; -+ -+ if (dev) -+ return dev_name(dev); -+ -+ return nodev; -+} -+ -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+static struct bfq_group *bfqq_group(struct bfq_queue *bfqq); -+static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg); -+ -+#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \ -+ char __pbuf[128]; \ -+ \ -+ assert_spin_locked((bfqd)->queue->queue_lock); \ -+ blkg_path(bfqg_to_blkg(bfqq_group(bfqq)), __pbuf, sizeof(__pbuf)); \ -+ pr_crit("%s bfq%d%c %s " fmt "\n", \ -+ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \ -+ (bfqq)->pid, \ -+ bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \ -+ __pbuf, ##args); \ -+} while (0) -+ -+#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \ -+ char __pbuf[128]; \ -+ \ -+ blkg_path(bfqg_to_blkg(bfqg), __pbuf, sizeof(__pbuf)); \ -+ pr_crit("%s %s " fmt "\n", \ -+ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \ -+ __pbuf, ##args); \ -+} while (0) -+ -+#else /* CONFIG_BFQ_SQ_GROUP_IOSCHED */ -+ -+#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \ -+ pr_crit("%s bfq%d%c " fmt "\n", \ -+ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \ -+ (bfqq)->pid, bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \ -+ ##args) -+#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do {} while (0) -+ -+#endif /* CONFIG_BFQ_SQ_GROUP_IOSCHED */ -+ -+#define bfq_log(bfqd, fmt, args...) \ -+ pr_crit("%s bfq " fmt "\n", \ -+ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \ -+ ##args) -+ -+#else /* CONFIG_BFQ_REDIRECT_TO_CONSOLE */ -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+static struct bfq_group *bfqq_group(struct bfq_queue *bfqq); -+static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg); -+ -+#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \ -+ char __pbuf[128]; \ -+ \ -+ assert_spin_locked((bfqd)->queue->queue_lock); \ -+ blkg_path(bfqg_to_blkg(bfqq_group(bfqq)), __pbuf, sizeof(__pbuf)); \ -+ blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s " fmt, \ -+ (bfqq)->pid, \ -+ bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \ -+ __pbuf, ##args); \ -+} while (0) -+ -+#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \ -+ char __pbuf[128]; \ -+ \ -+ blkg_path(bfqg_to_blkg(bfqg), __pbuf, sizeof(__pbuf)); \ -+ blk_add_trace_msg((bfqd)->queue, "%s " fmt, __pbuf, ##args); \ -+} while (0) -+ -+#else /* CONFIG_BFQ_SQ_GROUP_IOSCHED */ -+ -+#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \ -+ blk_add_trace_msg((bfqd)->queue, "bfq%d%c " fmt, (bfqq)->pid, \ -+ bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \ -+ ##args) -+#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do {} while (0) -+ -+#endif /* CONFIG_BFQ_SQ_GROUP_IOSCHED */ -+ -+#define bfq_log(bfqd, fmt, args...) \ -+ blk_add_trace_msg((bfqd)->queue, "bfq " fmt, ##args) -+#endif /* CONFIG_BFQ_REDIRECT_TO_CONSOLE */ -+ -+/* Expiration reasons. */ -+enum bfqq_expiration { -+ BFQ_BFQQ_TOO_IDLE = 0, /* -+ * queue has been idling for -+ * too long -+ */ -+ BFQ_BFQQ_BUDGET_TIMEOUT, /* budget took too long to be used */ -+ BFQ_BFQQ_BUDGET_EXHAUSTED, /* budget consumed */ -+ BFQ_BFQQ_NO_MORE_REQUESTS, /* the queue has no more requests */ -+ BFQ_BFQQ_PREEMPTED /* preemption in progress */ -+}; -+ -+ -+struct bfqg_stats { -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+ /* number of ios merged */ -+ struct blkg_rwstat merged; -+ /* total time spent on device in ns, may not be accurate w/ queueing */ -+ struct blkg_rwstat service_time; -+ /* total time spent waiting in scheduler queue in ns */ -+ struct blkg_rwstat wait_time; -+ /* number of IOs queued up */ -+ struct blkg_rwstat queued; -+ /* total disk time and nr sectors dispatched by this group */ -+ struct blkg_stat time; -+ /* sum of number of ios queued across all samples */ -+ struct blkg_stat avg_queue_size_sum; -+ /* count of samples taken for average */ -+ struct blkg_stat avg_queue_size_samples; -+ /* how many times this group has been removed from service tree */ -+ struct blkg_stat dequeue; -+ /* total time spent waiting for it to be assigned a timeslice. */ -+ struct blkg_stat group_wait_time; -+ /* time spent idling for this blkcg_gq */ -+ struct blkg_stat idle_time; -+ /* total time with empty current active q with other requests queued */ -+ struct blkg_stat empty_time; -+ /* fields after this shouldn't be cleared on stat reset */ -+ uint64_t start_group_wait_time; -+ uint64_t start_idle_time; -+ uint64_t start_empty_time; -+ uint16_t flags; -+#endif -+}; -+ -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+/* -+ * struct bfq_group_data - per-blkcg storage for the blkio subsystem. -+ * -+ * @ps: @blkcg_policy_storage that this structure inherits -+ * @weight: weight of the bfq_group -+ */ -+struct bfq_group_data { -+ /* must be the first member */ -+ struct blkcg_policy_data pd; -+ -+ unsigned int weight; -+}; -+ -+/** -+ * struct bfq_group - per (device, cgroup) data structure. -+ * @entity: schedulable entity to insert into the parent group sched_data. -+ * @sched_data: own sched_data, to contain child entities (they may be -+ * both bfq_queues and bfq_groups). -+ * @bfqd: the bfq_data for the device this group acts upon. -+ * @async_bfqq: array of async queues for all the tasks belonging to -+ * the group, one queue per ioprio value per ioprio_class, -+ * except for the idle class that has only one queue. -+ * @async_idle_bfqq: async queue for the idle class (ioprio is ignored). -+ * @my_entity: pointer to @entity, %NULL for the toplevel group; used -+ * to avoid too many special cases during group creation/ -+ * migration. -+ * @active_entities: number of active entities belonging to the group; -+ * unused for the root group. Used to know whether there -+ * are groups with more than one active @bfq_entity -+ * (see the comments to the function -+ * bfq_bfqq_may_idle()). -+ * @rq_pos_tree: rbtree sorted by next_request position, used when -+ * determining if two or more queues have interleaving -+ * requests (see bfq_find_close_cooperator()). -+ * -+ * Each (device, cgroup) pair has its own bfq_group, i.e., for each cgroup -+ * there is a set of bfq_groups, each one collecting the lower-level -+ * entities belonging to the group that are acting on the same device. -+ * -+ * Locking works as follows: -+ * o @bfqd is protected by the queue lock, RCU is used to access it -+ * from the readers. -+ * o All the other fields are protected by the @bfqd queue lock. -+ */ -+struct bfq_group { -+ /* must be the first member */ -+ struct blkg_policy_data pd; -+ -+ struct bfq_entity entity; -+ struct bfq_sched_data sched_data; -+ -+ void *bfqd; -+ -+ struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR]; -+ struct bfq_queue *async_idle_bfqq; -+ -+ struct bfq_entity *my_entity; -+ -+ int active_entities; -+ -+ struct rb_root rq_pos_tree; -+ -+ struct bfqg_stats stats; -+}; -+ -+#else -+struct bfq_group { -+ struct bfq_sched_data sched_data; -+ -+ struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR]; -+ struct bfq_queue *async_idle_bfqq; -+ -+ struct rb_root rq_pos_tree; -+}; -+#endif -+ -+static struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity); -+ -+static unsigned int bfq_class_idx(struct bfq_entity *entity) -+{ -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); -+ -+ return bfqq ? bfqq->ioprio_class - 1 : -+ BFQ_DEFAULT_GRP_CLASS - 1; -+} -+ -+static struct bfq_service_tree * -+bfq_entity_service_tree(struct bfq_entity *entity) -+{ -+ struct bfq_sched_data *sched_data = entity->sched_data; -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); -+ unsigned int idx = bfq_class_idx(entity); -+ -+ BUG_ON(idx >= BFQ_IOPRIO_CLASSES); -+ BUG_ON(sched_data == NULL); -+ -+ if (bfqq) -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "entity_service_tree %p %d", -+ sched_data->service_tree + idx, idx); -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+ else { -+ struct bfq_group *bfqg = -+ container_of(entity, struct bfq_group, entity); -+ -+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg, -+ "entity_service_tree %p %d", -+ sched_data->service_tree + idx, idx); -+ } -+#endif -+ return sched_data->service_tree + idx; -+} -+ -+static struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, bool is_sync) -+{ -+ return bic->bfqq[is_sync]; -+} -+ -+static void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, -+ bool is_sync) -+{ -+ bic->bfqq[is_sync] = bfqq; -+} -+ -+static struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic) -+{ -+ return bic->icq.q->elevator->elevator_data; -+} -+ -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+ -+static struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq) -+{ -+ struct bfq_entity *group_entity = bfqq->entity.parent; -+ -+ if (!group_entity) -+ group_entity = &bfqq->bfqd->root_group->entity; -+ -+ return container_of(group_entity, struct bfq_group, entity); -+} -+ -+#else -+ -+static struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq) -+{ -+ return bfqq->bfqd->root_group; -+} -+ -+#endif -+ -+static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio); -+static void bfq_put_queue(struct bfq_queue *bfqq); -+static void bfq_dispatch_insert(struct request_queue *q, struct request *rq); -+static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd, -+ struct bio *bio, bool is_sync, -+ struct bfq_io_cq *bic); -+static void bfq_end_wr_async_queues(struct bfq_data *bfqd, -+ struct bfq_group *bfqg); -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg); -+#endif -+static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq); -+ -+#endif /* _BFQ_H */ -diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h -index 8da66379f7ea..bf000c58644b 100644 ---- a/include/linux/blkdev.h -+++ b/include/linux/blkdev.h -@@ -54,7 +54,7 @@ struct blk_stat_callback; - * Maximum number of blkcg policies allowed to be registered concurrently. - * Defined here to simplify include dependency. - */ --#define BLKCG_MAX_POLS 3 -+#define BLKCG_MAX_POLS 4 - - typedef void (rq_end_io_fn)(struct request *, blk_status_t); - - -From 9916fed6c89c61a2b26053be04501784570bbec8 Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Thu, 20 Jul 2017 10:46:39 +0200 -Subject: [PATCH 02/51] Add extra checks related to entity scheduling - -- extra checks related to ioprioi-class changes -- specific check on st->idle in __bfq_requeue_entity - -Signed-off-by: Paolo Valente ---- - block/bfq-sched.c | 9 ++++++++- - 1 file changed, 8 insertions(+), 1 deletion(-) - -diff --git a/block/bfq-sched.c b/block/bfq-sched.c -index ac8991bca9fa..5ddf9af4261e 100644 ---- a/block/bfq-sched.c -+++ b/block/bfq-sched.c -@@ -812,6 +812,7 @@ __bfq_entity_update_weight_prio(struct bfq_service_tree *old_st, - } - #endif - -+ BUG_ON(entity->tree && update_class_too); - BUG_ON(old_st->wsum < entity->weight); - old_st->wsum -= entity->weight; - -@@ -883,8 +884,10 @@ __bfq_entity_update_weight_prio(struct bfq_service_tree *old_st, - - new_st->wsum += entity->weight; - -- if (new_st != old_st) -+ if (new_st != old_st) { -+ BUG_ON(!update_class_too); - entity->start = new_st->vtime; -+ } - } - - return new_st; -@@ -993,6 +996,7 @@ static void bfq_update_fin_time_enqueue(struct bfq_entity *entity, - * tree, then it is safe to invoke next function with the last - * parameter set (see the comments on the function). - */ -+ BUG_ON(entity->tree); - st = __bfq_entity_update_weight_prio(st, entity, true); - bfq_calc_finish(entity, entity->budget); - -@@ -1113,9 +1117,11 @@ static void __bfq_activate_entity(struct bfq_entity *entity, - * check for that. - */ - bfq_idle_extract(st, entity); -+ BUG_ON(entity->tree); - entity->start = bfq_gt(min_vstart, entity->finish) ? - min_vstart : entity->finish; - } else { -+ BUG_ON(entity->tree); - /* - * The finish time of the entity may be invalid, and - * it is in the past for sure, otherwise the queue -@@ -1203,6 +1209,7 @@ static void __bfq_requeue_entity(struct bfq_entity *entity) - */ - bfq_calc_finish(entity, entity->service); - entity->start = entity->finish; -+ BUG_ON(entity->tree && entity->tree == &st->idle); - BUG_ON(entity->tree && entity->tree != &st->active); - /* - * In addition, if the entity had more than one child - -From 8f5b2c25dcbe31dda524e85b921b3aa1fe11d111 Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Fri, 21 Jul 2017 12:08:57 +0200 -Subject: [PATCH 03/51] block, bfq: reset in_service_entity if it becomes idle - -BFQ implements hierarchical scheduling by representing each group of -queues with a generic parent entity. For each parent entity, BFQ -maintains an in_service_entity pointer: if one of the child entities -happens to be in service, in_service_entity points to it. The -resetting of these pointers happens only on queue expirations: when -the in-service queue is expired, i.e., stops to be the queue in -service, BFQ resets all in_service_entity pointers along the -parent-entity path from this queue to the root entity. - -Functions handling the scheduling of entities assume, naturally, that -in-service entities are active, i.e., have pending I/O requests (or, -as a special case, even if they have no pending requests, they are -expected to receive a new request very soon, with the scheduler idling -the storage device while waiting for such an event). Unfortunately, -the above resetting scheme of the in_service_entity pointers may cause -this assumption to be violated. For example, the in-service queue may -happen to remain without requests because of a request merge. In this -case the queue does become idle, and all related data structures are -updated accordingly. But in_service_entity still points to the queue -in the parent entity. This inconsistency may even propagate to -higher-level parent entities, if they happen to become idle as well, -as a consequence of the leaf queue becoming idle. For this queue and -parent entities, scheduling functions have an undefined behaviour, -and, as reported, may easily lead to kernel crashes or hangs. - -This commit addresses this issue by simply resetting the -in_service_entity field also when it is detected to point to an entity -becoming idle (regardless of why the entity becomes idle). - -Reported-by: Laurentiu Nicola -Signed-off-by: Paolo Valente -Tested-by: Laurentiu Nicola ---- - block/bfq-sched.c | 4 +++- - 1 file changed, 3 insertions(+), 1 deletion(-) - -diff --git a/block/bfq-sched.c b/block/bfq-sched.c -index 5ddf9af4261e..a07a06eb5c72 100644 ---- a/block/bfq-sched.c -+++ b/block/bfq-sched.c -@@ -1336,8 +1336,10 @@ static bool __bfq_deactivate_entity(struct bfq_entity *entity, - - BUG_ON(is_in_service && entity->tree && entity->tree != &st->active); - -- if (is_in_service) -+ if (is_in_service) { - bfq_calc_finish(entity, entity->service); -+ sd->in_service_entity = NULL; -+ } - - if (entity->tree == &st->active) - bfq_active_extract(st, entity); - -From 600ea668e2d340c95724bcf981d88812d6900342 Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Fri, 28 Jul 2017 21:09:51 +0200 -Subject: [PATCH 04/51] block, bfq: consider also in_service_entity to state - whether an entity is active - -Groups of BFQ queues are represented by generic entities in BFQ. When -a queue belonging to a parent entity is deactivated, the parent entity -may need to be deactivated too, in case the deactivated queue was the -only active queue for the parent entity. This deactivation may need to -be propagated upwards if the entity belongs, in its turn, to a further -higher-level entity, and so on. In particular, the upward propagation -of deactivation stops at the first parent entity that remains active -even if one of its child entities has been deactivated. - -To decide whether the last non-deactivation condition holds for a -parent entity, BFQ checks whether the field next_in_service is still -not NULL for the parent entity, after the deactivation of one of its -child entity. If it is not NULL, then there are certainly other active -entities in the parent entity, and deactivations can stop. - -Unfortunately, this check misses a corner case: if in_service_entity -is not NULL, then next_in_service may happen to be NULL, although the -parent entity is evidently active. This happens if: 1) the entity -pointed by in_service_entity is the only active entity in the parent -entity, and 2) according to the definition of next_in_service, the -in_service_entity cannot be considered as next_in_service. See the -comments on the definition of next_in_service for details on this -second point. - -Hitting the above corner case causes crashes. - -To address this issue, this commit: -1) Extends the above check on only next_in_service to controlling both -next_in_service and in_service_entity (if any of them is not NULL, -then no further deactivation is performed) -2) Improves the (important) comments on how next_in_service is defined -and updated; in particular it fixes a few rather obscure paragraphs - -Reported-by: Eric Wheeler -Reported-by: Rick Yiu -Reported-by: Tom X Nguyen -Signed-off-by: Paolo Valente -Tested-by: Eric Wheeler -Tested-by: Rick Yiu -Tested-by: Laurentiu Nicola -Tested-by: Tom X Nguyen ---- - block/bfq-sched.c | 140 ++++++++++++++++++++++++++++++------------------------ - block/bfq.h | 23 +++++++-- - 2 files changed, 95 insertions(+), 68 deletions(-) - -diff --git a/block/bfq-sched.c b/block/bfq-sched.c -index a07a06eb5c72..5c0f9290a79c 100644 ---- a/block/bfq-sched.c -+++ b/block/bfq-sched.c -@@ -196,21 +196,23 @@ static bool bfq_update_parent_budget(struct bfq_entity *next_in_service) - - /* - * This function tells whether entity stops being a candidate for next -- * service, according to the following logic. -+ * service, according to the restrictive definition of the field -+ * next_in_service. In particular, this function is invoked for an -+ * entity that is about to be set in service. - * -- * This function is invoked for an entity that is about to be set in -- * service. If such an entity is a queue, then the entity is no longer -- * a candidate for next service (i.e, a candidate entity to serve -- * after the in-service entity is expired). The function then returns -- * true. -+ * If entity is a queue, then the entity is no longer a candidate for -+ * next service according to the that definition, because entity is -+ * about to become the in-service queue. This function then returns -+ * true if entity is a queue. - * -- * In contrast, the entity could stil be a candidate for next service -- * if it is not a queue, and has more than one child. In fact, even if -- * one of its children is about to be set in service, other children -- * may still be the next to serve. As a consequence, a non-queue -- * entity is not a candidate for next-service only if it has only one -- * child. And only if this condition holds, then the function returns -- * true for a non-queue entity. -+ * In contrast, entity could still be a candidate for next service if -+ * it is not a queue, and has more than one active child. In fact, -+ * even if one of its children is about to be set in service, other -+ * active children may still be the next to serve, for the parent -+ * entity, even according to the above definition. As a consequence, a -+ * non-queue entity is not a candidate for next-service only if it has -+ * only one active child. And only if this condition holds, then this -+ * function returns true for a non-queue entity. - */ - static bool bfq_no_longer_next_in_service(struct bfq_entity *entity) - { -@@ -223,6 +225,18 @@ static bool bfq_no_longer_next_in_service(struct bfq_entity *entity) - - BUG_ON(bfqg == ((struct bfq_data *)(bfqg->bfqd))->root_group); - BUG_ON(bfqg->active_entities == 0); -+ /* -+ * The field active_entities does not always contain the -+ * actual number of active children entities: it happens to -+ * not account for the in-service entity in case the latter is -+ * removed from its active tree (which may get done after -+ * invoking the function bfq_no_longer_next_in_service in -+ * bfq_get_next_queue). Fortunately, here, i.e., while -+ * bfq_no_longer_next_in_service is not yet completed in -+ * bfq_get_next_queue, bfq_active_extract has not yet been -+ * invoked, and thus active_entities still coincides with the -+ * actual number of active entities. -+ */ - if (bfqg->active_entities == 1) - return true; - -@@ -1089,7 +1103,7 @@ static void bfq_update_fin_time_enqueue(struct bfq_entity *entity, - * one of its children receives a new request. - * - * Basically, this function updates the timestamps of entity and -- * inserts entity into its active tree, ater possible extracting it -+ * inserts entity into its active tree, ater possibly extracting it - * from its idle tree. - */ - static void __bfq_activate_entity(struct bfq_entity *entity, -@@ -1213,7 +1227,7 @@ static void __bfq_requeue_entity(struct bfq_entity *entity) - BUG_ON(entity->tree && entity->tree != &st->active); - /* - * In addition, if the entity had more than one child -- * when set in service, then was not extracted from -+ * when set in service, then it was not extracted from - * the active tree. This implies that the position of - * the entity in the active tree may need to be - * changed now, because we have just updated the start -@@ -1221,9 +1235,8 @@ static void __bfq_requeue_entity(struct bfq_entity *entity) - * time in a moment (the requeueing is then, more - * precisely, a repositioning in this case). To - * implement this repositioning, we: 1) dequeue the -- * entity here, 2) update the finish time and -- * requeue the entity according to the new -- * timestamps below. -+ * entity here, 2) update the finish time and requeue -+ * the entity according to the new timestamps below. - */ - if (entity->tree) - bfq_active_extract(st, entity); -@@ -1270,9 +1283,9 @@ static void __bfq_activate_requeue_entity(struct bfq_entity *entity, - - - /** -- * bfq_activate_entity - activate or requeue an entity representing a bfq_queue, -- * and activate, requeue or reposition all ancestors -- * for which such an update becomes necessary. -+ * bfq_activate_requeue_entity - activate or requeue an entity representing a bfq_queue, -+ * and activate, requeue or reposition all ancestors -+ * for which such an update becomes necessary. - * @entity: the entity to activate. - * @non_blocking_wait_rq: true if this entity was waiting for a request - * @requeue: true if this is a requeue, which implies that bfqq is -@@ -1308,9 +1321,9 @@ static void bfq_activate_requeue_entity(struct bfq_entity *entity, - * @ins_into_idle_tree: if false, the entity will not be put into the - * idle tree. - * -- * Deactivates an entity, independently from its previous state. Must -+ * Deactivates an entity, independently of its previous state. Must - * be invoked only if entity is on a service tree. Extracts the entity -- * from that tree, and if necessary and allowed, puts it on the idle -+ * from that tree, and if necessary and allowed, puts it into the idle - * tree. - */ - static bool __bfq_deactivate_entity(struct bfq_entity *entity, -@@ -1359,7 +1372,7 @@ static bool __bfq_deactivate_entity(struct bfq_entity *entity, - /** - * bfq_deactivate_entity - deactivate an entity representing a bfq_queue. - * @entity: the entity to deactivate. -- * @ins_into_idle_tree: true if the entity can be put on the idle tree -+ * @ins_into_idle_tree: true if the entity can be put into the idle tree - */ - static void bfq_deactivate_entity(struct bfq_entity *entity, - bool ins_into_idle_tree, -@@ -1406,16 +1419,29 @@ static void bfq_deactivate_entity(struct bfq_entity *entity, - */ - bfq_update_next_in_service(sd, NULL); - -- if (sd->next_in_service) { -+ if (sd->next_in_service || sd->in_service_entity) { - /* -- * The parent entity is still backlogged, -- * because next_in_service is not NULL. So, no -- * further upwards deactivation must be -- * performed. Yet, next_in_service has -- * changed. Then the schedule does need to be -- * updated upwards. -+ * The parent entity is still active, because -+ * either next_in_service or in_service_entity -+ * is not NULL. So, no further upwards -+ * deactivation must be performed. Yet, -+ * next_in_service has changed. Then the -+ * schedule does need to be updated upwards. -+ * -+ * NOTE If in_service_entity is not NULL, then -+ * next_in_service may happen to be NULL, -+ * although the parent entity is evidently -+ * active. This happens if 1) the entity -+ * pointed by in_service_entity is the only -+ * active entity in the parent entity, and 2) -+ * according to the definition of -+ * next_in_service, the in_service_entity -+ * cannot be considered as -+ * next_in_service. See the comments on the -+ * definition of next_in_service for details. - */ - BUG_ON(sd->next_in_service == entity); -+ BUG_ON(sd->in_service_entity == entity); - break; - } - -@@ -1806,45 +1832,33 @@ static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd) - - /* - * If entity is no longer a candidate for next -- * service, then we extract it from its active tree, -- * for the following reason. To further boost the -- * throughput in some special case, BFQ needs to know -- * which is the next candidate entity to serve, while -- * there is already an entity in service. In this -- * respect, to make it easy to compute/update the next -- * candidate entity to serve after the current -- * candidate has been set in service, there is a case -- * where it is necessary to extract the current -- * candidate from its service tree. Such a case is -- * when the entity just set in service cannot be also -- * a candidate for next service. Details about when -- * this conditions holds are reported in the comments -- * on the function bfq_no_longer_next_in_service() -- * invoked below. -+ * service, then it must be extracted from its active -+ * tree, so as to make sure that it won't be -+ * considered when computing next_in_service. See the -+ * comments on the function -+ * bfq_no_longer_next_in_service() for details. - */ - if (bfq_no_longer_next_in_service(entity)) - bfq_active_extract(bfq_entity_service_tree(entity), - entity); - - /* -- * For the same reason why we may have just extracted -- * entity from its active tree, we may need to update -- * next_in_service for the sched_data of entity too, -- * regardless of whether entity has been extracted. -- * In fact, even if entity has not been extracted, a -- * descendant entity may get extracted. Such an event -- * would cause a change in next_in_service for the -- * level of the descendant entity, and thus possibly -- * back to upper levels. -+ * Even if entity is not to be extracted according to -+ * the above check, a descendant entity may get -+ * extracted in one of the next iterations of this -+ * loop. Such an event could cause a change in -+ * next_in_service for the level of the descendant -+ * entity, and thus possibly back to this level. - * -- * We cannot perform the resulting needed update -- * before the end of this loop, because, to know which -- * is the correct next-to-serve candidate entity for -- * each level, we need first to find the leaf entity -- * to set in service. In fact, only after we know -- * which is the next-to-serve leaf entity, we can -- * discover whether the parent entity of the leaf -- * entity becomes the next-to-serve, and so on. -+ * However, we cannot perform the resulting needed -+ * update of next_in_service for this level before the -+ * end of the whole loop, because, to know which is -+ * the correct next-to-serve candidate entity for each -+ * level, we need first to find the leaf entity to set -+ * in service. In fact, only after we know which is -+ * the next-to-serve leaf entity, we can discover -+ * whether the parent entity of the leaf entity -+ * becomes the next-to-serve, and so on. - */ - - /* Log some information */ -diff --git a/block/bfq.h b/block/bfq.h -index f5751ea59d98..ebd9688b9f61 100644 ---- a/block/bfq.h -+++ b/block/bfq.h -@@ -68,17 +68,30 @@ struct bfq_service_tree { - * - * bfq_sched_data is the basic scheduler queue. It supports three - * ioprio_classes, and can be used either as a toplevel queue or as an -- * intermediate queue on a hierarchical setup. @next_in_service -- * points to the active entity of the sched_data service trees that -- * will be scheduled next. It is used to reduce the number of steps -- * needed for each hierarchical-schedule update. -+ * intermediate queue in a hierarchical setup. - * - * The supported ioprio_classes are the same as in CFQ, in descending - * priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE. - * Requests from higher priority queues are served before all the - * requests from lower priority queues; among requests of the same - * queue requests are served according to B-WF2Q+. -- * All the fields are protected by the queue lock of the containing bfqd. -+ * -+ * The schedule is implemented by the service trees, plus the field -+ * @next_in_service, which points to the entity on the active trees -+ * that will be served next, if 1) no changes in the schedule occurs -+ * before the current in-service entity is expired, 2) the in-service -+ * queue becomes idle when it expires, and 3) if the entity pointed by -+ * in_service_entity is not a queue, then the in-service child entity -+ * of the entity pointed by in_service_entity becomes idle on -+ * expiration. This peculiar definition allows for the following -+ * optimization, not yet exploited: while a given entity is still in -+ * service, we already know which is the best candidate for next -+ * service among the other active entitities in the same parent -+ * entity. We can then quickly compare the timestamps of the -+ * in-service entity with those of such best candidate. -+ * -+ * All the fields are protected by the queue lock of the containing -+ * bfqd. - */ - struct bfq_sched_data { - struct bfq_entity *in_service_entity; /* entity in service */ - -From 6b5effd10bc6711a862e7cbd7cd2dd0146defa01 Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Thu, 4 May 2017 10:53:43 +0200 -Subject: [PATCH 05/51] block, bfq: improve and refactor throughput-boosting - logic - -When a queue associated with a process remains empty, there are cases -where throughput gets boosted if the device is idled to await the -arrival of a new I/O request for that queue. Currently, BFQ assumes -that one of these cases is when the device has no internal queueing -(regardless of the properties of the I/O being served). Unfortunately, -this condition has proved to be too general. So, this commit refines it -as "the device has no internal queueing and is rotational". - -This refinement provides a significant throughput boost with random -I/O, on flash-based storage without internal queueing. For example, on -a HiKey board, throughput increases by up to 125%, growing, e.g., from -6.9MB/s to 15.6MB/s with two or three random readers in parallel. - -This commit also refactors the code related to device idling, for the -following reason. Finding the change that provides the above large -improvement has been slightly more difficult than it had to be, -because the logic that decides whether to idle the device is still -scattered across three functions. Almost all of the logic is in the -function bfq_bfqq_may_idle, but (1) part of the decision is made in -bfq_update_idle_window, and (2) the function bfq_bfqq_must_idle may -switch off idling regardless of the output of bfq_bfqq_may_idle. In -addition, both bfq_update_idle_window and bfq_bfqq_must_idle make -their decisions as a function of parameters that are used, for similar -purposes, also in bfq_bfqq_may_idle. This commit addresses this issue -by moving all the logic into bfq_bfqq_may_idle. - -Signed-off-by: Paolo Valente -Signed-off-by: Luca Miccio ---- - block/bfq-sq-iosched.c | 141 +++++++++++++++++++++++++++---------------------- - block/bfq.h | 12 ++--- - 2 files changed, 83 insertions(+), 70 deletions(-) - -diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c -index 65e7c7e77f3c..30d019fc67e0 100644 ---- a/block/bfq-sq-iosched.c -+++ b/block/bfq-sq-iosched.c -@@ -684,10 +684,10 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd, - unsigned int old_wr_coeff; - bool busy = bfq_already_existing && bfq_bfqq_busy(bfqq); - -- if (bic->saved_idle_window) -- bfq_mark_bfqq_idle_window(bfqq); -+ if (bic->saved_has_short_ttime) -+ bfq_mark_bfqq_has_short_ttime(bfqq); - else -- bfq_clear_bfqq_idle_window(bfqq); -+ bfq_clear_bfqq_has_short_ttime(bfqq); - - if (bic->saved_IO_bound) - bfq_mark_bfqq_IO_bound(bfqq); -@@ -2047,7 +2047,7 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq) - if (!bic) - return; - -- bic->saved_idle_window = bfq_bfqq_idle_window(bfqq); -+ bic->saved_has_short_ttime = bfq_bfqq_has_short_ttime(bfqq); - bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq); - bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq); - bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node); -@@ -3214,9 +3214,9 @@ static void bfq_bfqq_expire(struct bfq_data *bfqd, - } - - bfq_log_bfqq(bfqd, bfqq, -- "expire (%d, slow %d, num_disp %d, idle_win %d, weight %d)", -+ "expire (%d, slow %d, num_disp %d, short_ttime %d, weight %d)", - reason, slow, bfqq->dispatched, -- bfq_bfqq_idle_window(bfqq), entity->weight); -+ bfq_bfqq_has_short_ttime(bfqq), entity->weight); - - /* - * Increase, decrease or leave budget unchanged according to -@@ -3298,7 +3298,10 @@ static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq) - static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq) - { - struct bfq_data *bfqd = bfqq->bfqd; -- bool idling_boosts_thr, idling_boosts_thr_without_issues, -+ bool rot_without_queueing = -+ !blk_queue_nonrot(bfqd->queue) && !bfqd->hw_tag, -+ bfqq_sequential_and_IO_bound, -+ idling_boosts_thr, idling_boosts_thr_without_issues, - idling_needed_for_service_guarantees, - asymmetric_scenario; - -@@ -3306,27 +3309,44 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq) - return true; - - /* -+ * Idling is performed only if slice_idle > 0. In addition, we -+ * do not idle if -+ * (a) bfqq is async -+ * (b) bfqq is in the idle io prio class: in this case we do -+ * not idle because we want to minimize the bandwidth that -+ * queues in this class can steal to higher-priority queues -+ */ -+ if (bfqd->bfq_slice_idle == 0 || !bfq_bfqq_sync(bfqq) || -+ bfq_class_idle(bfqq)) -+ return false; -+ -+ bfqq_sequential_and_IO_bound = !BFQQ_SEEKY(bfqq) && -+ bfq_bfqq_IO_bound(bfqq) && bfq_bfqq_has_short_ttime(bfqq); -+ /* - * The next variable takes into account the cases where idling - * boosts the throughput. - * - * The value of the variable is computed considering, first, that - * idling is virtually always beneficial for the throughput if: -- * (a) the device is not NCQ-capable, or -- * (b) regardless of the presence of NCQ, the device is rotational -- * and the request pattern for bfqq is I/O-bound and sequential. -+ * (a) the device is not NCQ-capable and rotational, or -+ * (b) regardless of the presence of NCQ, the device is rotational and -+ * the request pattern for bfqq is I/O-bound and sequential, or -+ * (c) regardless of whether it is rotational, the device is -+ * not NCQ-capable and the request pattern for bfqq is -+ * I/O-bound and sequential. - * - * Secondly, and in contrast to the above item (b), idling an - * NCQ-capable flash-based device would not boost the - * throughput even with sequential I/O; rather it would lower - * the throughput in proportion to how fast the device - * is. Accordingly, the next variable is true if any of the -- * above conditions (a) and (b) is true, and, in particular, -- * happens to be false if bfqd is an NCQ-capable flash-based -- * device. -+ * above conditions (a), (b) or (c) is true, and, in -+ * particular, happens to be false if bfqd is an NCQ-capable -+ * flash-based device. - */ -- idling_boosts_thr = !bfqd->hw_tag || -- (!blk_queue_nonrot(bfqd->queue) && bfq_bfqq_IO_bound(bfqq) && -- bfq_bfqq_idle_window(bfqq)); -+ idling_boosts_thr = rot_without_queueing || -+ ((!blk_queue_nonrot(bfqd->queue) || !bfqd->hw_tag) && -+ bfqq_sequential_and_IO_bound); - - /* - * The value of the next variable, -@@ -3497,12 +3517,10 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq) - asymmetric_scenario && !bfq_bfqq_in_large_burst(bfqq); - - /* -- * We have now all the components we need to compute the return -- * value of the function, which is true only if both the following -- * conditions hold: -- * 1) bfqq is sync, because idling make sense only for sync queues; -- * 2) idling either boosts the throughput (without issues), or -- * is necessary to preserve service guarantees. -+ * We have now all the components we need to compute the -+ * return value of the function, which is true only if idling -+ * either boosts the throughput (without issues), or is -+ * necessary to preserve service guarantees. - */ - bfq_log_bfqq(bfqd, bfqq, "may_idle: sync %d idling_boosts_thr %d", - bfq_bfqq_sync(bfqq), idling_boosts_thr); -@@ -3514,9 +3532,8 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq) - bfq_bfqq_IO_bound(bfqq), - idling_needed_for_service_guarantees); - -- return bfq_bfqq_sync(bfqq) && -- (idling_boosts_thr_without_issues || -- idling_needed_for_service_guarantees); -+ return idling_boosts_thr_without_issues || -+ idling_needed_for_service_guarantees; - } - - /* -@@ -3532,10 +3549,7 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq) - */ - static bool bfq_bfqq_must_idle(struct bfq_queue *bfqq) - { -- struct bfq_data *bfqd = bfqq->bfqd; -- -- return RB_EMPTY_ROOT(&bfqq->sort_list) && bfqd->bfq_slice_idle != 0 && -- bfq_bfqq_may_idle(bfqq); -+ return RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_bfqq_may_idle(bfqq); - } - - /* -@@ -3994,7 +4008,6 @@ static void bfq_set_next_ioprio_data(struct bfq_queue *bfqq, - case IOPRIO_CLASS_IDLE: - bfqq->new_ioprio_class = IOPRIO_CLASS_IDLE; - bfqq->new_ioprio = 7; -- bfq_clear_bfqq_idle_window(bfqq); - break; - } - -@@ -4058,8 +4071,14 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, - bfq_set_next_ioprio_data(bfqq, bic); - - if (is_sync) { -+ /* -+ * No need to mark as has_short_ttime if in -+ * idle_class, because no device idling is performed -+ * for queues in idle class -+ */ - if (!bfq_class_idle(bfqq)) -- bfq_mark_bfqq_idle_window(bfqq); -+ /* tentatively mark as has_short_ttime */ -+ bfq_mark_bfqq_has_short_ttime(bfqq); - bfq_mark_bfqq_sync(bfqq); - bfq_mark_bfqq_just_created(bfqq); - } else -@@ -4195,18 +4214,19 @@ bfq_update_io_seektime(struct bfq_data *bfqd, struct bfq_queue *bfqq, - blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT); - } - --/* -- * Disable idle window if the process thinks too long or seeks so much that -- * it doesn't matter. -- */ --static void bfq_update_idle_window(struct bfq_data *bfqd, -- struct bfq_queue *bfqq, -- struct bfq_io_cq *bic) -+static void bfq_update_has_short_ttime(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq, -+ struct bfq_io_cq *bic) - { -- int enable_idle; -+ bool has_short_ttime = true; - -- /* Don't idle for async or idle io prio class. */ -- if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq)) -+ /* -+ * No need to update has_short_ttime if bfqq is async or in -+ * idle io prio class, or if bfq_slice_idle is zero, because -+ * no device idling is performed for bfqq in this case. -+ */ -+ if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq) || -+ bfqd->bfq_slice_idle == 0) - return; - - /* Idle window just restored, statistics are meaningless. */ -@@ -4214,27 +4234,22 @@ static void bfq_update_idle_window(struct bfq_data *bfqd, - bfqd->bfq_wr_min_idle_time)) - return; - -- enable_idle = bfq_bfqq_idle_window(bfqq); -- -+ /* Think time is infinite if no process is linked to -+ * bfqq. Otherwise check average think time to -+ * decide whether to mark as has_short_ttime -+ */ - if (atomic_read(&bic->icq.ioc->active_ref) == 0 || -- bfqd->bfq_slice_idle == 0 || -- (bfqd->hw_tag && BFQQ_SEEKY(bfqq) && -- bfqq->wr_coeff == 1)) -- enable_idle = 0; -- else if (bfq_sample_valid(bic->ttime.ttime_samples)) { -- if (bic->ttime.ttime_mean > bfqd->bfq_slice_idle && -- bfqq->wr_coeff == 1) -- enable_idle = 0; -- else -- enable_idle = 1; -- } -- bfq_log_bfqq(bfqd, bfqq, "update_idle_window: enable_idle %d", -- enable_idle); -+ (bfq_sample_valid(bic->ttime.ttime_samples) && -+ bic->ttime.ttime_mean > bfqd->bfq_slice_idle)) -+ has_short_ttime = false; -+ -+ bfq_log_bfqq(bfqd, bfqq, "update_has_short_ttime: has_short_ttime %d", -+ has_short_ttime); - -- if (enable_idle) -- bfq_mark_bfqq_idle_window(bfqq); -+ if (has_short_ttime) -+ bfq_mark_bfqq_has_short_ttime(bfqq); - else -- bfq_clear_bfqq_idle_window(bfqq); -+ bfq_clear_bfqq_has_short_ttime(bfqq); - } - - /* -@@ -4250,14 +4265,12 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, - bfqq->meta_pending++; - - bfq_update_io_thinktime(bfqd, bic); -+ bfq_update_has_short_ttime(bfqd, bfqq, bic); - bfq_update_io_seektime(bfqd, bfqq, rq); -- if (bfqq->entity.service > bfq_max_budget(bfqd) / 8 || -- !BFQQ_SEEKY(bfqq)) -- bfq_update_idle_window(bfqd, bfqq, bic); - - bfq_log_bfqq(bfqd, bfqq, -- "rq_enqueued: idle_window=%d (seeky %d)", -- bfq_bfqq_idle_window(bfqq), BFQQ_SEEKY(bfqq)); -+ "rq_enqueued: has_short_ttime=%d (seeky %d)", -+ bfq_bfqq_has_short_ttime(bfqq), BFQQ_SEEKY(bfqq)); - - bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); - -diff --git a/block/bfq.h b/block/bfq.h -index ebd9688b9f61..34fc4697fd89 100644 ---- a/block/bfq.h -+++ b/block/bfq.h -@@ -349,11 +349,11 @@ struct bfq_io_cq { - #endif - - /* -- * Snapshot of the idle window before merging; taken to -- * remember this value while the queue is merged, so as to be -- * able to restore it in case of split. -+ * Snapshot of the has_short_time flag before merging; taken -+ * to remember its value while the queue is merged, so as to -+ * be able to restore it in case of split. - */ -- bool saved_idle_window; -+ bool saved_has_short_ttime; - /* - * Same purpose as the previous two fields for the I/O bound - * classification of a queue. -@@ -610,7 +610,7 @@ enum bfqq_state_flags { - */ - BFQ_BFQQ_FLAG_must_alloc, /* must be allowed rq alloc */ - BFQ_BFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */ -- BFQ_BFQQ_FLAG_idle_window, /* slice idling enabled */ -+ BFQ_BFQQ_FLAG_has_short_ttime, /* queue has a short think time */ - BFQ_BFQQ_FLAG_sync, /* synchronous queue */ - BFQ_BFQQ_FLAG_IO_bound, /* - * bfqq has timed-out at least once -@@ -649,7 +649,7 @@ BFQ_BFQQ_FNS(wait_request); - BFQ_BFQQ_FNS(non_blocking_wait_rq); - BFQ_BFQQ_FNS(must_alloc); - BFQ_BFQQ_FNS(fifo_expire); --BFQ_BFQQ_FNS(idle_window); -+BFQ_BFQQ_FNS(has_short_ttime); - BFQ_BFQQ_FNS(sync); - BFQ_BFQQ_FNS(IO_bound); - BFQ_BFQQ_FNS(in_large_burst); - -From b5e746fa99d961a5642cffb27c19a77e8b638007 Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Mon, 19 Dec 2016 16:59:33 +0100 -Subject: [PATCH 06/51] FIRST BFQ-MQ COMMIT: Copy bfq-sq-iosched.c as - bfq-mq-iosched.c - -This commit introduces bfq-mq-iosched.c, the main source file that -will contain the code of bfq for blk-mq. I name tentatively -bfq-mq this version of bfq. - -For the moment, the file bfq-mq-iosched.c is just a copy of -bfq-sq-iosched.c, i.e, of the main source file of bfq for blk. - -Signed-off-by: Paolo Valente ---- - block/bfq-mq-iosched.c | 5392 ++++++++++++++++++++++++++++++++++++++++++++++++ - 1 file changed, 5392 insertions(+) - create mode 100644 block/bfq-mq-iosched.c - -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -new file mode 100644 -index 000000000000..30d019fc67e0 ---- /dev/null -+++ b/block/bfq-mq-iosched.c -@@ -0,0 +1,5392 @@ -+/* -+ * Budget Fair Queueing (BFQ) I/O scheduler. -+ * -+ * Based on ideas and code from CFQ: -+ * Copyright (C) 2003 Jens Axboe -+ * -+ * Copyright (C) 2008 Fabio Checconi -+ * Paolo Valente -+ * -+ * Copyright (C) 2015 Paolo Valente -+ * -+ * Copyright (C) 2017 Paolo Valente -+ * -+ * Licensed under the GPL-2 as detailed in the accompanying COPYING.BFQ -+ * file. -+ * -+ * BFQ is a proportional-share I/O scheduler, with some extra -+ * low-latency capabilities. BFQ also supports full hierarchical -+ * scheduling through cgroups. Next paragraphs provide an introduction -+ * on BFQ inner workings. Details on BFQ benefits and usage can be -+ * found in Documentation/block/bfq-iosched.txt. -+ * -+ * BFQ is a proportional-share storage-I/O scheduling algorithm based -+ * on the slice-by-slice service scheme of CFQ. But BFQ assigns -+ * budgets, measured in number of sectors, to processes instead of -+ * time slices. The device is not granted to the in-service process -+ * for a given time slice, but until it has exhausted its assigned -+ * budget. This change from the time to the service domain enables BFQ -+ * to distribute the device throughput among processes as desired, -+ * without any distortion due to throughput fluctuations, or to device -+ * internal queueing. BFQ uses an ad hoc internal scheduler, called -+ * B-WF2Q+, to schedule processes according to their budgets. More -+ * precisely, BFQ schedules queues associated with processes. Thanks to -+ * the accurate policy of B-WF2Q+, BFQ can afford to assign high -+ * budgets to I/O-bound processes issuing sequential requests (to -+ * boost the throughput), and yet guarantee a low latency to -+ * interactive and soft real-time applications. -+ * -+ * NOTE: if the main or only goal, with a given device, is to achieve -+ * the maximum-possible throughput at all times, then do switch off -+ * all low-latency heuristics for that device, by setting low_latency -+ * to 0. -+ * -+ * BFQ is described in [1], where also a reference to the initial, more -+ * theoretical paper on BFQ can be found. The interested reader can find -+ * in the latter paper full details on the main algorithm, as well as -+ * formulas of the guarantees and formal proofs of all the properties. -+ * With respect to the version of BFQ presented in these papers, this -+ * implementation adds a few more heuristics, such as the one that -+ * guarantees a low latency to soft real-time applications, and a -+ * hierarchical extension based on H-WF2Q+. -+ * -+ * B-WF2Q+ is based on WF2Q+, that is described in [2], together with -+ * H-WF2Q+, while the augmented tree used to implement B-WF2Q+ with O(log N) -+ * complexity derives from the one introduced with EEVDF in [3]. -+ * -+ * [1] P. Valente, A. Avanzini, "Evolution of the BFQ Storage I/O -+ * Scheduler", Proceedings of the First Workshop on Mobile System -+ * Technologies (MST-2015), May 2015. -+ * http://algogroup.unimore.it/people/paolo/disk_sched/mst-2015.pdf -+ * -+ * http://algogroup.unimo.it/people/paolo/disk_sched/bf1-v1-suite-results.pdf -+ * -+ * [2] Jon C.R. Bennett and H. Zhang, ``Hierarchical Packet Fair Queueing -+ * Algorithms,'' IEEE/ACM Transactions on Networking, 5(5):675-689, -+ * Oct 1997. -+ * -+ * http://www.cs.cmu.edu/~hzhang/papers/TON-97-Oct.ps.gz -+ * -+ * [3] I. Stoica and H. Abdel-Wahab, ``Earliest Eligible Virtual Deadline -+ * First: A Flexible and Accurate Mechanism for Proportional Share -+ * Resource Allocation,'' technical report. -+ * -+ * http://www.cs.berkeley.edu/~istoica/papers/eevdf-tr-95.pdf -+ */ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "blk.h" -+#include "bfq.h" -+ -+/* Expiration time of sync (0) and async (1) requests, in ns. */ -+static const u64 bfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 }; -+ -+/* Maximum backwards seek, in KiB. */ -+static const int bfq_back_max = (16 * 1024); -+ -+/* Penalty of a backwards seek, in number of sectors. */ -+static const int bfq_back_penalty = 2; -+ -+/* Idling period duration, in ns. */ -+static u32 bfq_slice_idle = (NSEC_PER_SEC / 125); -+ -+/* Minimum number of assigned budgets for which stats are safe to compute. */ -+static const int bfq_stats_min_budgets = 194; -+ -+/* Default maximum budget values, in sectors and number of requests. */ -+static const int bfq_default_max_budget = (16 * 1024); -+ -+/* -+ * Async to sync throughput distribution is controlled as follows: -+ * when an async request is served, the entity is charged the number -+ * of sectors of the request, multiplied by the factor below -+ */ -+static const int bfq_async_charge_factor = 10; -+ -+/* Default timeout values, in jiffies, approximating CFQ defaults. */ -+static const int bfq_timeout = (HZ / 8); -+ -+static struct kmem_cache *bfq_pool; -+ -+/* Below this threshold (in ns), we consider thinktime immediate. */ -+#define BFQ_MIN_TT (2 * NSEC_PER_MSEC) -+ -+/* hw_tag detection: parallel requests threshold and min samples needed. */ -+#define BFQ_HW_QUEUE_THRESHOLD 4 -+#define BFQ_HW_QUEUE_SAMPLES 32 -+ -+#define BFQQ_SEEK_THR (sector_t)(8 * 100) -+#define BFQQ_SECT_THR_NONROT (sector_t)(2 * 32) -+#define BFQQ_CLOSE_THR (sector_t)(8 * 1024) -+#define BFQQ_SEEKY(bfqq) (hweight32(bfqq->seek_history) > 32/8) -+ -+/* Min number of samples required to perform peak-rate update */ -+#define BFQ_RATE_MIN_SAMPLES 32 -+/* Min observation time interval required to perform a peak-rate update (ns) */ -+#define BFQ_RATE_MIN_INTERVAL (300*NSEC_PER_MSEC) -+/* Target observation time interval for a peak-rate update (ns) */ -+#define BFQ_RATE_REF_INTERVAL NSEC_PER_SEC -+ -+/* Shift used for peak rate fixed precision calculations. */ -+#define BFQ_RATE_SHIFT 16 -+ -+/* -+ * By default, BFQ computes the duration of the weight raising for -+ * interactive applications automatically, using the following formula: -+ * duration = (R / r) * T, where r is the peak rate of the device, and -+ * R and T are two reference parameters. -+ * In particular, R is the peak rate of the reference device (see below), -+ * and T is a reference time: given the systems that are likely to be -+ * installed on the reference device according to its speed class, T is -+ * about the maximum time needed, under BFQ and while reading two files in -+ * parallel, to load typical large applications on these systems. -+ * In practice, the slower/faster the device at hand is, the more/less it -+ * takes to load applications with respect to the reference device. -+ * Accordingly, the longer/shorter BFQ grants weight raising to interactive -+ * applications. -+ * -+ * BFQ uses four different reference pairs (R, T), depending on: -+ * . whether the device is rotational or non-rotational; -+ * . whether the device is slow, such as old or portable HDDs, as well as -+ * SD cards, or fast, such as newer HDDs and SSDs. -+ * -+ * The device's speed class is dynamically (re)detected in -+ * bfq_update_peak_rate() every time the estimated peak rate is updated. -+ * -+ * In the following definitions, R_slow[0]/R_fast[0] and -+ * T_slow[0]/T_fast[0] are the reference values for a slow/fast -+ * rotational device, whereas R_slow[1]/R_fast[1] and -+ * T_slow[1]/T_fast[1] are the reference values for a slow/fast -+ * non-rotational device. Finally, device_speed_thresh are the -+ * thresholds used to switch between speed classes. The reference -+ * rates are not the actual peak rates of the devices used as a -+ * reference, but slightly lower values. The reason for using these -+ * slightly lower values is that the peak-rate estimator tends to -+ * yield slightly lower values than the actual peak rate (it can yield -+ * the actual peak rate only if there is only one process doing I/O, -+ * and the process does sequential I/O). -+ * -+ * Both the reference peak rates and the thresholds are measured in -+ * sectors/usec, left-shifted by BFQ_RATE_SHIFT. -+ */ -+static int R_slow[2] = {1000, 10700}; -+static int R_fast[2] = {14000, 33000}; -+/* -+ * To improve readability, a conversion function is used to initialize the -+ * following arrays, which entails that they can be initialized only in a -+ * function. -+ */ -+static int T_slow[2]; -+static int T_fast[2]; -+static int device_speed_thresh[2]; -+ -+#define BFQ_SERVICE_TREE_INIT ((struct bfq_service_tree) \ -+ { RB_ROOT, RB_ROOT, NULL, NULL, 0, 0 }) -+ -+#define RQ_BIC(rq) ((struct bfq_io_cq *) (rq)->elv.priv[0]) -+#define RQ_BFQQ(rq) ((rq)->elv.priv[1]) -+ -+static void bfq_schedule_dispatch(struct bfq_data *bfqd); -+ -+#include "bfq-ioc.c" -+#include "bfq-sched.c" -+#include "bfq-cgroup-included.c" -+ -+#define bfq_class_idle(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_IDLE) -+#define bfq_class_rt(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_RT) -+ -+#define bfq_sample_valid(samples) ((samples) > 80) -+ -+/* -+ * Scheduler run of queue, if there are requests pending and no one in the -+ * driver that will restart queueing. -+ */ -+static void bfq_schedule_dispatch(struct bfq_data *bfqd) -+{ -+ if (bfqd->queued != 0) { -+ bfq_log(bfqd, "schedule dispatch"); -+ kblockd_schedule_work(&bfqd->unplug_work); -+ } -+} -+ -+/* -+ * Lifted from AS - choose which of rq1 and rq2 that is best served now. -+ * We choose the request that is closesr to the head right now. Distance -+ * behind the head is penalized and only allowed to a certain extent. -+ */ -+static struct request *bfq_choose_req(struct bfq_data *bfqd, -+ struct request *rq1, -+ struct request *rq2, -+ sector_t last) -+{ -+ sector_t s1, s2, d1 = 0, d2 = 0; -+ unsigned long back_max; -+#define BFQ_RQ1_WRAP 0x01 /* request 1 wraps */ -+#define BFQ_RQ2_WRAP 0x02 /* request 2 wraps */ -+ unsigned int wrap = 0; /* bit mask: requests behind the disk head? */ -+ -+ if (!rq1 || rq1 == rq2) -+ return rq2; -+ if (!rq2) -+ return rq1; -+ -+ if (rq_is_sync(rq1) && !rq_is_sync(rq2)) -+ return rq1; -+ else if (rq_is_sync(rq2) && !rq_is_sync(rq1)) -+ return rq2; -+ if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META)) -+ return rq1; -+ else if ((rq2->cmd_flags & REQ_META) && !(rq1->cmd_flags & REQ_META)) -+ return rq2; -+ -+ s1 = blk_rq_pos(rq1); -+ s2 = blk_rq_pos(rq2); -+ -+ /* -+ * By definition, 1KiB is 2 sectors. -+ */ -+ back_max = bfqd->bfq_back_max * 2; -+ -+ /* -+ * Strict one way elevator _except_ in the case where we allow -+ * short backward seeks which are biased as twice the cost of a -+ * similar forward seek. -+ */ -+ if (s1 >= last) -+ d1 = s1 - last; -+ else if (s1 + back_max >= last) -+ d1 = (last - s1) * bfqd->bfq_back_penalty; -+ else -+ wrap |= BFQ_RQ1_WRAP; -+ -+ if (s2 >= last) -+ d2 = s2 - last; -+ else if (s2 + back_max >= last) -+ d2 = (last - s2) * bfqd->bfq_back_penalty; -+ else -+ wrap |= BFQ_RQ2_WRAP; -+ -+ /* Found required data */ -+ -+ /* -+ * By doing switch() on the bit mask "wrap" we avoid having to -+ * check two variables for all permutations: --> faster! -+ */ -+ switch (wrap) { -+ case 0: /* common case for CFQ: rq1 and rq2 not wrapped */ -+ if (d1 < d2) -+ return rq1; -+ else if (d2 < d1) -+ return rq2; -+ -+ if (s1 >= s2) -+ return rq1; -+ else -+ return rq2; -+ -+ case BFQ_RQ2_WRAP: -+ return rq1; -+ case BFQ_RQ1_WRAP: -+ return rq2; -+ case (BFQ_RQ1_WRAP|BFQ_RQ2_WRAP): /* both rqs wrapped */ -+ default: -+ /* -+ * Since both rqs are wrapped, -+ * start with the one that's further behind head -+ * (--> only *one* back seek required), -+ * since back seek takes more time than forward. -+ */ -+ if (s1 <= s2) -+ return rq1; -+ else -+ return rq2; -+ } -+} -+ -+static struct bfq_queue * -+bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root, -+ sector_t sector, struct rb_node **ret_parent, -+ struct rb_node ***rb_link) -+{ -+ struct rb_node **p, *parent; -+ struct bfq_queue *bfqq = NULL; -+ -+ parent = NULL; -+ p = &root->rb_node; -+ while (*p) { -+ struct rb_node **n; -+ -+ parent = *p; -+ bfqq = rb_entry(parent, struct bfq_queue, pos_node); -+ -+ /* -+ * Sort strictly based on sector. Smallest to the left, -+ * largest to the right. -+ */ -+ if (sector > blk_rq_pos(bfqq->next_rq)) -+ n = &(*p)->rb_right; -+ else if (sector < blk_rq_pos(bfqq->next_rq)) -+ n = &(*p)->rb_left; -+ else -+ break; -+ p = n; -+ bfqq = NULL; -+ } -+ -+ *ret_parent = parent; -+ if (rb_link) -+ *rb_link = p; -+ -+ bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d", -+ (unsigned long long) sector, -+ bfqq ? bfqq->pid : 0); -+ -+ return bfqq; -+} -+ -+static void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq) -+{ -+ struct rb_node **p, *parent; -+ struct bfq_queue *__bfqq; -+ -+ if (bfqq->pos_root) { -+ rb_erase(&bfqq->pos_node, bfqq->pos_root); -+ bfqq->pos_root = NULL; -+ } -+ -+ if (bfq_class_idle(bfqq)) -+ return; -+ if (!bfqq->next_rq) -+ return; -+ -+ bfqq->pos_root = &bfq_bfqq_to_bfqg(bfqq)->rq_pos_tree; -+ __bfqq = bfq_rq_pos_tree_lookup(bfqd, bfqq->pos_root, -+ blk_rq_pos(bfqq->next_rq), &parent, &p); -+ if (!__bfqq) { -+ rb_link_node(&bfqq->pos_node, parent, p); -+ rb_insert_color(&bfqq->pos_node, bfqq->pos_root); -+ } else -+ bfqq->pos_root = NULL; -+} -+ -+/* -+ * Tell whether there are active queues or groups with differentiated weights. -+ */ -+static bool bfq_differentiated_weights(struct bfq_data *bfqd) -+{ -+ /* -+ * For weights to differ, at least one of the trees must contain -+ * at least two nodes. -+ */ -+ return (!RB_EMPTY_ROOT(&bfqd->queue_weights_tree) && -+ (bfqd->queue_weights_tree.rb_node->rb_left || -+ bfqd->queue_weights_tree.rb_node->rb_right) -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+ ) || -+ (!RB_EMPTY_ROOT(&bfqd->group_weights_tree) && -+ (bfqd->group_weights_tree.rb_node->rb_left || -+ bfqd->group_weights_tree.rb_node->rb_right) -+#endif -+ ); -+} -+ -+/* -+ * The following function returns true if every queue must receive the -+ * same share of the throughput (this condition is used when deciding -+ * whether idling may be disabled, see the comments in the function -+ * bfq_bfqq_may_idle()). -+ * -+ * Such a scenario occurs when: -+ * 1) all active queues have the same weight, -+ * 2) all active groups at the same level in the groups tree have the same -+ * weight, -+ * 3) all active groups at the same level in the groups tree have the same -+ * number of children. -+ * -+ * Unfortunately, keeping the necessary state for evaluating exactly the -+ * above symmetry conditions would be quite complex and time-consuming. -+ * Therefore this function evaluates, instead, the following stronger -+ * sub-conditions, for which it is much easier to maintain the needed -+ * state: -+ * 1) all active queues have the same weight, -+ * 2) all active groups have the same weight, -+ * 3) all active groups have at most one active child each. -+ * In particular, the last two conditions are always true if hierarchical -+ * support and the cgroups interface are not enabled, thus no state needs -+ * to be maintained in this case. -+ */ -+static bool bfq_symmetric_scenario(struct bfq_data *bfqd) -+{ -+ return !bfq_differentiated_weights(bfqd); -+} -+ -+/* -+ * If the weight-counter tree passed as input contains no counter for -+ * the weight of the input entity, then add that counter; otherwise just -+ * increment the existing counter. -+ * -+ * Note that weight-counter trees contain few nodes in mostly symmetric -+ * scenarios. For example, if all queues have the same weight, then the -+ * weight-counter tree for the queues may contain at most one node. -+ * This holds even if low_latency is on, because weight-raised queues -+ * are not inserted in the tree. -+ * In most scenarios, the rate at which nodes are created/destroyed -+ * should be low too. -+ */ -+static void bfq_weights_tree_add(struct bfq_data *bfqd, -+ struct bfq_entity *entity, -+ struct rb_root *root) -+{ -+ struct rb_node **new = &(root->rb_node), *parent = NULL; -+ -+ /* -+ * Do not insert if the entity is already associated with a -+ * counter, which happens if: -+ * 1) the entity is associated with a queue, -+ * 2) a request arrival has caused the queue to become both -+ * non-weight-raised, and hence change its weight, and -+ * backlogged; in this respect, each of the two events -+ * causes an invocation of this function, -+ * 3) this is the invocation of this function caused by the -+ * second event. This second invocation is actually useless, -+ * and we handle this fact by exiting immediately. More -+ * efficient or clearer solutions might possibly be adopted. -+ */ -+ if (entity->weight_counter) -+ return; -+ -+ while (*new) { -+ struct bfq_weight_counter *__counter = container_of(*new, -+ struct bfq_weight_counter, -+ weights_node); -+ parent = *new; -+ -+ if (entity->weight == __counter->weight) { -+ entity->weight_counter = __counter; -+ goto inc_counter; -+ } -+ if (entity->weight < __counter->weight) -+ new = &((*new)->rb_left); -+ else -+ new = &((*new)->rb_right); -+ } -+ -+ entity->weight_counter = kzalloc(sizeof(struct bfq_weight_counter), -+ GFP_ATOMIC); -+ -+ /* -+ * In the unlucky event of an allocation failure, we just -+ * exit. This will cause the weight of entity to not be -+ * considered in bfq_differentiated_weights, which, in its -+ * turn, causes the scenario to be deemed wrongly symmetric in -+ * case entity's weight would have been the only weight making -+ * the scenario asymmetric. On the bright side, no unbalance -+ * will however occur when entity becomes inactive again (the -+ * invocation of this function is triggered by an activation -+ * of entity). In fact, bfq_weights_tree_remove does nothing -+ * if !entity->weight_counter. -+ */ -+ if (unlikely(!entity->weight_counter)) -+ return; -+ -+ entity->weight_counter->weight = entity->weight; -+ rb_link_node(&entity->weight_counter->weights_node, parent, new); -+ rb_insert_color(&entity->weight_counter->weights_node, root); -+ -+inc_counter: -+ entity->weight_counter->num_active++; -+} -+ -+/* -+ * Decrement the weight counter associated with the entity, and, if the -+ * counter reaches 0, remove the counter from the tree. -+ * See the comments to the function bfq_weights_tree_add() for considerations -+ * about overhead. -+ */ -+static void bfq_weights_tree_remove(struct bfq_data *bfqd, -+ struct bfq_entity *entity, -+ struct rb_root *root) -+{ -+ if (!entity->weight_counter) -+ return; -+ -+ BUG_ON(RB_EMPTY_ROOT(root)); -+ BUG_ON(entity->weight_counter->weight != entity->weight); -+ -+ BUG_ON(!entity->weight_counter->num_active); -+ entity->weight_counter->num_active--; -+ if (entity->weight_counter->num_active > 0) -+ goto reset_entity_pointer; -+ -+ rb_erase(&entity->weight_counter->weights_node, root); -+ kfree(entity->weight_counter); -+ -+reset_entity_pointer: -+ entity->weight_counter = NULL; -+} -+ -+/* -+ * Return expired entry, or NULL to just start from scratch in rbtree. -+ */ -+static struct request *bfq_check_fifo(struct bfq_queue *bfqq, -+ struct request *last) -+{ -+ struct request *rq; -+ -+ if (bfq_bfqq_fifo_expire(bfqq)) -+ return NULL; -+ -+ bfq_mark_bfqq_fifo_expire(bfqq); -+ -+ rq = rq_entry_fifo(bfqq->fifo.next); -+ -+ if (rq == last || ktime_get_ns() < rq->fifo_time) -+ return NULL; -+ -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "check_fifo: returned %p", rq); -+ BUG_ON(RB_EMPTY_NODE(&rq->rb_node)); -+ return rq; -+} -+ -+static struct request *bfq_find_next_rq(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq, -+ struct request *last) -+{ -+ struct rb_node *rbnext = rb_next(&last->rb_node); -+ struct rb_node *rbprev = rb_prev(&last->rb_node); -+ struct request *next, *prev = NULL; -+ -+ BUG_ON(list_empty(&bfqq->fifo)); -+ -+ /* Follow expired path, else get first next available. */ -+ next = bfq_check_fifo(bfqq, last); -+ if (next) { -+ BUG_ON(next == last); -+ return next; -+ } -+ -+ BUG_ON(RB_EMPTY_NODE(&last->rb_node)); -+ -+ if (rbprev) -+ prev = rb_entry_rq(rbprev); -+ -+ if (rbnext) -+ next = rb_entry_rq(rbnext); -+ else { -+ rbnext = rb_first(&bfqq->sort_list); -+ if (rbnext && rbnext != &last->rb_node) -+ next = rb_entry_rq(rbnext); -+ } -+ -+ return bfq_choose_req(bfqd, next, prev, blk_rq_pos(last)); -+} -+ -+/* see the definition of bfq_async_charge_factor for details */ -+static unsigned long bfq_serv_to_charge(struct request *rq, -+ struct bfq_queue *bfqq) -+{ -+ if (bfq_bfqq_sync(bfqq) || bfqq->wr_coeff > 1) -+ return blk_rq_sectors(rq); -+ -+ /* -+ * If there are no weight-raised queues, then amplify service -+ * by just the async charge factor; otherwise amplify service -+ * by twice the async charge factor, to further reduce latency -+ * for weight-raised queues. -+ */ -+ if (bfqq->bfqd->wr_busy_queues == 0) -+ return blk_rq_sectors(rq) * bfq_async_charge_factor; -+ -+ return blk_rq_sectors(rq) * 2 * bfq_async_charge_factor; -+} -+ -+/** -+ * bfq_updated_next_req - update the queue after a new next_rq selection. -+ * @bfqd: the device data the queue belongs to. -+ * @bfqq: the queue to update. -+ * -+ * If the first request of a queue changes we make sure that the queue -+ * has enough budget to serve at least its first request (if the -+ * request has grown). We do this because if the queue has not enough -+ * budget for its first request, it has to go through two dispatch -+ * rounds to actually get it dispatched. -+ */ -+static void bfq_updated_next_req(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq) -+{ -+ struct bfq_entity *entity = &bfqq->entity; -+ struct bfq_service_tree *st = bfq_entity_service_tree(entity); -+ struct request *next_rq = bfqq->next_rq; -+ unsigned long new_budget; -+ -+ if (!next_rq) -+ return; -+ -+ if (bfqq == bfqd->in_service_queue) -+ /* -+ * In order not to break guarantees, budgets cannot be -+ * changed after an entity has been selected. -+ */ -+ return; -+ -+ BUG_ON(entity->tree != &st->active); -+ BUG_ON(entity == entity->sched_data->in_service_entity); -+ -+ new_budget = max_t(unsigned long, bfqq->max_budget, -+ bfq_serv_to_charge(next_rq, bfqq)); -+ if (entity->budget != new_budget) { -+ entity->budget = new_budget; -+ bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu", -+ new_budget); -+ bfq_requeue_bfqq(bfqd, bfqq); -+ } -+} -+ -+static unsigned int bfq_wr_duration(struct bfq_data *bfqd) -+{ -+ u64 dur; -+ -+ if (bfqd->bfq_wr_max_time > 0) -+ return bfqd->bfq_wr_max_time; -+ -+ dur = bfqd->RT_prod; -+ do_div(dur, bfqd->peak_rate); -+ -+ /* -+ * Limit duration between 3 and 13 seconds. Tests show that -+ * higher values than 13 seconds often yield the opposite of -+ * the desired result, i.e., worsen responsiveness by letting -+ * non-interactive and non-soft-real-time applications -+ * preserve weight raising for a too long time interval. -+ * -+ * On the other end, lower values than 3 seconds make it -+ * difficult for most interactive tasks to complete their jobs -+ * before weight-raising finishes. -+ */ -+ if (dur > msecs_to_jiffies(13000)) -+ dur = msecs_to_jiffies(13000); -+ else if (dur < msecs_to_jiffies(3000)) -+ dur = msecs_to_jiffies(3000); -+ -+ return dur; -+} -+ -+static void -+bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd, -+ struct bfq_io_cq *bic, bool bfq_already_existing) -+{ -+ unsigned int old_wr_coeff; -+ bool busy = bfq_already_existing && bfq_bfqq_busy(bfqq); -+ -+ if (bic->saved_has_short_ttime) -+ bfq_mark_bfqq_has_short_ttime(bfqq); -+ else -+ bfq_clear_bfqq_has_short_ttime(bfqq); -+ -+ if (bic->saved_IO_bound) -+ bfq_mark_bfqq_IO_bound(bfqq); -+ else -+ bfq_clear_bfqq_IO_bound(bfqq); -+ -+ if (unlikely(busy)) -+ old_wr_coeff = bfqq->wr_coeff; -+ -+ bfqq->wr_coeff = bic->saved_wr_coeff; -+ bfqq->wr_start_at_switch_to_srt = bic->saved_wr_start_at_switch_to_srt; -+ BUG_ON(time_is_after_jiffies(bfqq->wr_start_at_switch_to_srt)); -+ bfqq->last_wr_start_finish = bic->saved_last_wr_start_finish; -+ bfqq->wr_cur_max_time = bic->saved_wr_cur_max_time; -+ BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish)); -+ -+ if (bfqq->wr_coeff > 1 && (bfq_bfqq_in_large_burst(bfqq) || -+ time_is_before_jiffies(bfqq->last_wr_start_finish + -+ bfqq->wr_cur_max_time))) { -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "resume state: switching off wr (%lu + %lu < %lu)", -+ bfqq->last_wr_start_finish, bfqq->wr_cur_max_time, -+ jiffies); -+ -+ bfqq->wr_coeff = 1; -+ } -+ -+ /* make sure weight will be updated, however we got here */ -+ bfqq->entity.prio_changed = 1; -+ -+ if (likely(!busy)) -+ return; -+ -+ if (old_wr_coeff == 1 && bfqq->wr_coeff > 1) { -+ bfqd->wr_busy_queues++; -+ BUG_ON(bfqd->wr_busy_queues > bfqd->busy_queues); -+ } else if (old_wr_coeff > 1 && bfqq->wr_coeff == 1) { -+ bfqd->wr_busy_queues--; -+ BUG_ON(bfqd->wr_busy_queues < 0); -+ } -+} -+ -+static int bfqq_process_refs(struct bfq_queue *bfqq) -+{ -+ int process_refs, io_refs; -+ -+ lockdep_assert_held(bfqq->bfqd->queue->queue_lock); -+ -+ io_refs = bfqq->allocated[READ] + bfqq->allocated[WRITE]; -+ process_refs = bfqq->ref - io_refs - bfqq->entity.on_st; -+ BUG_ON(process_refs < 0); -+ return process_refs; -+} -+ -+/* Empty burst list and add just bfqq (see comments to bfq_handle_burst) */ -+static void bfq_reset_burst_list(struct bfq_data *bfqd, struct bfq_queue *bfqq) -+{ -+ struct bfq_queue *item; -+ struct hlist_node *n; -+ -+ hlist_for_each_entry_safe(item, n, &bfqd->burst_list, burst_list_node) -+ hlist_del_init(&item->burst_list_node); -+ hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list); -+ bfqd->burst_size = 1; -+ bfqd->burst_parent_entity = bfqq->entity.parent; -+} -+ -+/* Add bfqq to the list of queues in current burst (see bfq_handle_burst) */ -+static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq) -+{ -+ /* Increment burst size to take into account also bfqq */ -+ bfqd->burst_size++; -+ -+ bfq_log_bfqq(bfqd, bfqq, "add_to_burst %d", bfqd->burst_size); -+ -+ BUG_ON(bfqd->burst_size > bfqd->bfq_large_burst_thresh); -+ -+ if (bfqd->burst_size == bfqd->bfq_large_burst_thresh) { -+ struct bfq_queue *pos, *bfqq_item; -+ struct hlist_node *n; -+ -+ /* -+ * Enough queues have been activated shortly after each -+ * other to consider this burst as large. -+ */ -+ bfqd->large_burst = true; -+ bfq_log_bfqq(bfqd, bfqq, "add_to_burst: large burst started"); -+ -+ /* -+ * We can now mark all queues in the burst list as -+ * belonging to a large burst. -+ */ -+ hlist_for_each_entry(bfqq_item, &bfqd->burst_list, -+ burst_list_node) { -+ bfq_mark_bfqq_in_large_burst(bfqq_item); -+ bfq_log_bfqq(bfqd, bfqq_item, "marked in large burst"); -+ } -+ bfq_mark_bfqq_in_large_burst(bfqq); -+ bfq_log_bfqq(bfqd, bfqq, "marked in large burst"); -+ -+ /* -+ * From now on, and until the current burst finishes, any -+ * new queue being activated shortly after the last queue -+ * was inserted in the burst can be immediately marked as -+ * belonging to a large burst. So the burst list is not -+ * needed any more. Remove it. -+ */ -+ hlist_for_each_entry_safe(pos, n, &bfqd->burst_list, -+ burst_list_node) -+ hlist_del_init(&pos->burst_list_node); -+ } else /* -+ * Burst not yet large: add bfqq to the burst list. Do -+ * not increment the ref counter for bfqq, because bfqq -+ * is removed from the burst list before freeing bfqq -+ * in put_queue. -+ */ -+ hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list); -+} -+ -+/* -+ * If many queues belonging to the same group happen to be created -+ * shortly after each other, then the processes associated with these -+ * queues have typically a common goal. In particular, bursts of queue -+ * creations are usually caused by services or applications that spawn -+ * many parallel threads/processes. Examples are systemd during boot, -+ * or git grep. To help these processes get their job done as soon as -+ * possible, it is usually better to not grant either weight-raising -+ * or device idling to their queues. -+ * -+ * In this comment we describe, firstly, the reasons why this fact -+ * holds, and, secondly, the next function, which implements the main -+ * steps needed to properly mark these queues so that they can then be -+ * treated in a different way. -+ * -+ * The above services or applications benefit mostly from a high -+ * throughput: the quicker the requests of the activated queues are -+ * cumulatively served, the sooner the target job of these queues gets -+ * completed. As a consequence, weight-raising any of these queues, -+ * which also implies idling the device for it, is almost always -+ * counterproductive. In most cases it just lowers throughput. -+ * -+ * On the other hand, a burst of queue creations may be caused also by -+ * the start of an application that does not consist of a lot of -+ * parallel I/O-bound threads. In fact, with a complex application, -+ * several short processes may need to be executed to start-up the -+ * application. In this respect, to start an application as quickly as -+ * possible, the best thing to do is in any case to privilege the I/O -+ * related to the application with respect to all other -+ * I/O. Therefore, the best strategy to start as quickly as possible -+ * an application that causes a burst of queue creations is to -+ * weight-raise all the queues created during the burst. This is the -+ * exact opposite of the best strategy for the other type of bursts. -+ * -+ * In the end, to take the best action for each of the two cases, the -+ * two types of bursts need to be distinguished. Fortunately, this -+ * seems relatively easy, by looking at the sizes of the bursts. In -+ * particular, we found a threshold such that only bursts with a -+ * larger size than that threshold are apparently caused by -+ * services or commands such as systemd or git grep. For brevity, -+ * hereafter we call just 'large' these bursts. BFQ *does not* -+ * weight-raise queues whose creation occurs in a large burst. In -+ * addition, for each of these queues BFQ performs or does not perform -+ * idling depending on which choice boosts the throughput more. The -+ * exact choice depends on the device and request pattern at -+ * hand. -+ * -+ * Unfortunately, false positives may occur while an interactive task -+ * is starting (e.g., an application is being started). The -+ * consequence is that the queues associated with the task do not -+ * enjoy weight raising as expected. Fortunately these false positives -+ * are very rare. They typically occur if some service happens to -+ * start doing I/O exactly when the interactive task starts. -+ * -+ * Turning back to the next function, it implements all the steps -+ * needed to detect the occurrence of a large burst and to properly -+ * mark all the queues belonging to it (so that they can then be -+ * treated in a different way). This goal is achieved by maintaining a -+ * "burst list" that holds, temporarily, the queues that belong to the -+ * burst in progress. The list is then used to mark these queues as -+ * belonging to a large burst if the burst does become large. The main -+ * steps are the following. -+ * -+ * . when the very first queue is created, the queue is inserted into the -+ * list (as it could be the first queue in a possible burst) -+ * -+ * . if the current burst has not yet become large, and a queue Q that does -+ * not yet belong to the burst is activated shortly after the last time -+ * at which a new queue entered the burst list, then the function appends -+ * Q to the burst list -+ * -+ * . if, as a consequence of the previous step, the burst size reaches -+ * the large-burst threshold, then -+ * -+ * . all the queues in the burst list are marked as belonging to a -+ * large burst -+ * -+ * . the burst list is deleted; in fact, the burst list already served -+ * its purpose (keeping temporarily track of the queues in a burst, -+ * so as to be able to mark them as belonging to a large burst in the -+ * previous sub-step), and now is not needed any more -+ * -+ * . the device enters a large-burst mode -+ * -+ * . if a queue Q that does not belong to the burst is created while -+ * the device is in large-burst mode and shortly after the last time -+ * at which a queue either entered the burst list or was marked as -+ * belonging to the current large burst, then Q is immediately marked -+ * as belonging to a large burst. -+ * -+ * . if a queue Q that does not belong to the burst is created a while -+ * later, i.e., not shortly after, than the last time at which a queue -+ * either entered the burst list or was marked as belonging to the -+ * current large burst, then the current burst is deemed as finished and: -+ * -+ * . the large-burst mode is reset if set -+ * -+ * . the burst list is emptied -+ * -+ * . Q is inserted in the burst list, as Q may be the first queue -+ * in a possible new burst (then the burst list contains just Q -+ * after this step). -+ */ -+static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq) -+{ -+ /* -+ * If bfqq is already in the burst list or is part of a large -+ * burst, or finally has just been split, then there is -+ * nothing else to do. -+ */ -+ if (!hlist_unhashed(&bfqq->burst_list_node) || -+ bfq_bfqq_in_large_burst(bfqq) || -+ time_is_after_eq_jiffies(bfqq->split_time + -+ msecs_to_jiffies(10))) -+ return; -+ -+ /* -+ * If bfqq's creation happens late enough, or bfqq belongs to -+ * a different group than the burst group, then the current -+ * burst is finished, and related data structures must be -+ * reset. -+ * -+ * In this respect, consider the special case where bfqq is -+ * the very first queue created after BFQ is selected for this -+ * device. In this case, last_ins_in_burst and -+ * burst_parent_entity are not yet significant when we get -+ * here. But it is easy to verify that, whether or not the -+ * following condition is true, bfqq will end up being -+ * inserted into the burst list. In particular the list will -+ * happen to contain only bfqq. And this is exactly what has -+ * to happen, as bfqq may be the first queue of the first -+ * burst. -+ */ -+ if (time_is_before_jiffies(bfqd->last_ins_in_burst + -+ bfqd->bfq_burst_interval) || -+ bfqq->entity.parent != bfqd->burst_parent_entity) { -+ bfqd->large_burst = false; -+ bfq_reset_burst_list(bfqd, bfqq); -+ bfq_log_bfqq(bfqd, bfqq, -+ "handle_burst: late activation or different group"); -+ goto end; -+ } -+ -+ /* -+ * If we get here, then bfqq is being activated shortly after the -+ * last queue. So, if the current burst is also large, we can mark -+ * bfqq as belonging to this large burst immediately. -+ */ -+ if (bfqd->large_burst) { -+ bfq_log_bfqq(bfqd, bfqq, "handle_burst: marked in burst"); -+ bfq_mark_bfqq_in_large_burst(bfqq); -+ goto end; -+ } -+ -+ /* -+ * If we get here, then a large-burst state has not yet been -+ * reached, but bfqq is being activated shortly after the last -+ * queue. Then we add bfqq to the burst. -+ */ -+ bfq_add_to_burst(bfqd, bfqq); -+end: -+ /* -+ * At this point, bfqq either has been added to the current -+ * burst or has caused the current burst to terminate and a -+ * possible new burst to start. In particular, in the second -+ * case, bfqq has become the first queue in the possible new -+ * burst. In both cases last_ins_in_burst needs to be moved -+ * forward. -+ */ -+ bfqd->last_ins_in_burst = jiffies; -+ -+} -+ -+static int bfq_bfqq_budget_left(struct bfq_queue *bfqq) -+{ -+ struct bfq_entity *entity = &bfqq->entity; -+ -+ return entity->budget - entity->service; -+} -+ -+/* -+ * If enough samples have been computed, return the current max budget -+ * stored in bfqd, which is dynamically updated according to the -+ * estimated disk peak rate; otherwise return the default max budget -+ */ -+static int bfq_max_budget(struct bfq_data *bfqd) -+{ -+ if (bfqd->budgets_assigned < bfq_stats_min_budgets) -+ return bfq_default_max_budget; -+ else -+ return bfqd->bfq_max_budget; -+} -+ -+/* -+ * Return min budget, which is a fraction of the current or default -+ * max budget (trying with 1/32) -+ */ -+static int bfq_min_budget(struct bfq_data *bfqd) -+{ -+ if (bfqd->budgets_assigned < bfq_stats_min_budgets) -+ return bfq_default_max_budget / 32; -+ else -+ return bfqd->bfq_max_budget / 32; -+} -+ -+static void bfq_bfqq_expire(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq, -+ bool compensate, -+ enum bfqq_expiration reason); -+ -+/* -+ * The next function, invoked after the input queue bfqq switches from -+ * idle to busy, updates the budget of bfqq. The function also tells -+ * whether the in-service queue should be expired, by returning -+ * true. The purpose of expiring the in-service queue is to give bfqq -+ * the chance to possibly preempt the in-service queue, and the reason -+ * for preempting the in-service queue is to achieve one of the two -+ * goals below. -+ * -+ * 1. Guarantee to bfqq its reserved bandwidth even if bfqq has -+ * expired because it has remained idle. In particular, bfqq may have -+ * expired for one of the following two reasons: -+ * -+ * - BFQ_BFQQ_NO_MORE_REQUEST bfqq did not enjoy any device idling and -+ * did not make it to issue a new request before its last request -+ * was served; -+ * -+ * - BFQ_BFQQ_TOO_IDLE bfqq did enjoy device idling, but did not issue -+ * a new request before the expiration of the idling-time. -+ * -+ * Even if bfqq has expired for one of the above reasons, the process -+ * associated with the queue may be however issuing requests greedily, -+ * and thus be sensitive to the bandwidth it receives (bfqq may have -+ * remained idle for other reasons: CPU high load, bfqq not enjoying -+ * idling, I/O throttling somewhere in the path from the process to -+ * the I/O scheduler, ...). But if, after every expiration for one of -+ * the above two reasons, bfqq has to wait for the service of at least -+ * one full budget of another queue before being served again, then -+ * bfqq is likely to get a much lower bandwidth or resource time than -+ * its reserved ones. To address this issue, two countermeasures need -+ * to be taken. -+ * -+ * First, the budget and the timestamps of bfqq need to be updated in -+ * a special way on bfqq reactivation: they need to be updated as if -+ * bfqq did not remain idle and did not expire. In fact, if they are -+ * computed as if bfqq expired and remained idle until reactivation, -+ * then the process associated with bfqq is treated as if, instead of -+ * being greedy, it stopped issuing requests when bfqq remained idle, -+ * and restarts issuing requests only on this reactivation. In other -+ * words, the scheduler does not help the process recover the "service -+ * hole" between bfqq expiration and reactivation. As a consequence, -+ * the process receives a lower bandwidth than its reserved one. In -+ * contrast, to recover this hole, the budget must be updated as if -+ * bfqq was not expired at all before this reactivation, i.e., it must -+ * be set to the value of the remaining budget when bfqq was -+ * expired. Along the same line, timestamps need to be assigned the -+ * value they had the last time bfqq was selected for service, i.e., -+ * before last expiration. Thus timestamps need to be back-shifted -+ * with respect to their normal computation (see [1] for more details -+ * on this tricky aspect). -+ * -+ * Secondly, to allow the process to recover the hole, the in-service -+ * queue must be expired too, to give bfqq the chance to preempt it -+ * immediately. In fact, if bfqq has to wait for a full budget of the -+ * in-service queue to be completed, then it may become impossible to -+ * let the process recover the hole, even if the back-shifted -+ * timestamps of bfqq are lower than those of the in-service queue. If -+ * this happens for most or all of the holes, then the process may not -+ * receive its reserved bandwidth. In this respect, it is worth noting -+ * that, being the service of outstanding requests unpreemptible, a -+ * little fraction of the holes may however be unrecoverable, thereby -+ * causing a little loss of bandwidth. -+ * -+ * The last important point is detecting whether bfqq does need this -+ * bandwidth recovery. In this respect, the next function deems the -+ * process associated with bfqq greedy, and thus allows it to recover -+ * the hole, if: 1) the process is waiting for the arrival of a new -+ * request (which implies that bfqq expired for one of the above two -+ * reasons), and 2) such a request has arrived soon. The first -+ * condition is controlled through the flag non_blocking_wait_rq, -+ * while the second through the flag arrived_in_time. If both -+ * conditions hold, then the function computes the budget in the -+ * above-described special way, and signals that the in-service queue -+ * should be expired. Timestamp back-shifting is done later in -+ * __bfq_activate_entity. -+ * -+ * 2. Reduce latency. Even if timestamps are not backshifted to let -+ * the process associated with bfqq recover a service hole, bfqq may -+ * however happen to have, after being (re)activated, a lower finish -+ * timestamp than the in-service queue. That is, the next budget of -+ * bfqq may have to be completed before the one of the in-service -+ * queue. If this is the case, then preempting the in-service queue -+ * allows this goal to be achieved, apart from the unpreemptible, -+ * outstanding requests mentioned above. -+ * -+ * Unfortunately, regardless of which of the above two goals one wants -+ * to achieve, service trees need first to be updated to know whether -+ * the in-service queue must be preempted. To have service trees -+ * correctly updated, the in-service queue must be expired and -+ * rescheduled, and bfqq must be scheduled too. This is one of the -+ * most costly operations (in future versions, the scheduling -+ * mechanism may be re-designed in such a way to make it possible to -+ * know whether preemption is needed without needing to update service -+ * trees). In addition, queue preemptions almost always cause random -+ * I/O, and thus loss of throughput. Because of these facts, the next -+ * function adopts the following simple scheme to avoid both costly -+ * operations and too frequent preemptions: it requests the expiration -+ * of the in-service queue (unconditionally) only for queues that need -+ * to recover a hole, or that either are weight-raised or deserve to -+ * be weight-raised. -+ */ -+static bool bfq_bfqq_update_budg_for_activation(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq, -+ bool arrived_in_time, -+ bool wr_or_deserves_wr) -+{ -+ struct bfq_entity *entity = &bfqq->entity; -+ -+ if (bfq_bfqq_non_blocking_wait_rq(bfqq) && arrived_in_time) { -+ /* -+ * We do not clear the flag non_blocking_wait_rq here, as -+ * the latter is used in bfq_activate_bfqq to signal -+ * that timestamps need to be back-shifted (and is -+ * cleared right after). -+ */ -+ -+ /* -+ * In next assignment we rely on that either -+ * entity->service or entity->budget are not updated -+ * on expiration if bfqq is empty (see -+ * __bfq_bfqq_recalc_budget). Thus both quantities -+ * remain unchanged after such an expiration, and the -+ * following statement therefore assigns to -+ * entity->budget the remaining budget on such an -+ * expiration. For clarity, entity->service is not -+ * updated on expiration in any case, and, in normal -+ * operation, is reset only when bfqq is selected for -+ * service (see bfq_get_next_queue). -+ */ -+ BUG_ON(bfqq->max_budget < 0); -+ entity->budget = min_t(unsigned long, -+ bfq_bfqq_budget_left(bfqq), -+ bfqq->max_budget); -+ -+ BUG_ON(entity->budget < 0); -+ return true; -+ } -+ -+ BUG_ON(bfqq->max_budget < 0); -+ entity->budget = max_t(unsigned long, bfqq->max_budget, -+ bfq_serv_to_charge(bfqq->next_rq, bfqq)); -+ BUG_ON(entity->budget < 0); -+ -+ bfq_clear_bfqq_non_blocking_wait_rq(bfqq); -+ return wr_or_deserves_wr; -+} -+ -+static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq, -+ unsigned int old_wr_coeff, -+ bool wr_or_deserves_wr, -+ bool interactive, -+ bool in_burst, -+ bool soft_rt) -+{ -+ if (old_wr_coeff == 1 && wr_or_deserves_wr) { -+ /* start a weight-raising period */ -+ if (interactive) { -+ bfqq->wr_coeff = bfqd->bfq_wr_coeff; -+ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); -+ } else { -+ bfqq->wr_start_at_switch_to_srt = jiffies; -+ bfqq->wr_coeff = bfqd->bfq_wr_coeff * -+ BFQ_SOFTRT_WEIGHT_FACTOR; -+ bfqq->wr_cur_max_time = -+ bfqd->bfq_wr_rt_max_time; -+ } -+ /* -+ * If needed, further reduce budget to make sure it is -+ * close to bfqq's backlog, so as to reduce the -+ * scheduling-error component due to a too large -+ * budget. Do not care about throughput consequences, -+ * but only about latency. Finally, do not assign a -+ * too small budget either, to avoid increasing -+ * latency by causing too frequent expirations. -+ */ -+ bfqq->entity.budget = min_t(unsigned long, -+ bfqq->entity.budget, -+ 2 * bfq_min_budget(bfqd)); -+ -+ bfq_log_bfqq(bfqd, bfqq, -+ "wrais starting at %lu, rais_max_time %u", -+ jiffies, -+ jiffies_to_msecs(bfqq->wr_cur_max_time)); -+ } else if (old_wr_coeff > 1) { -+ if (interactive) { /* update wr coeff and duration */ -+ bfqq->wr_coeff = bfqd->bfq_wr_coeff; -+ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); -+ } else if (in_burst) { -+ bfqq->wr_coeff = 1; -+ bfq_log_bfqq(bfqd, bfqq, -+ "wrais ending at %lu, rais_max_time %u", -+ jiffies, -+ jiffies_to_msecs(bfqq-> -+ wr_cur_max_time)); -+ } else if (soft_rt) { -+ /* -+ * The application is now or still meeting the -+ * requirements for being deemed soft rt. We -+ * can then correctly and safely (re)charge -+ * the weight-raising duration for the -+ * application with the weight-raising -+ * duration for soft rt applications. -+ * -+ * In particular, doing this recharge now, i.e., -+ * before the weight-raising period for the -+ * application finishes, reduces the probability -+ * of the following negative scenario: -+ * 1) the weight of a soft rt application is -+ * raised at startup (as for any newly -+ * created application), -+ * 2) since the application is not interactive, -+ * at a certain time weight-raising is -+ * stopped for the application, -+ * 3) at that time the application happens to -+ * still have pending requests, and hence -+ * is destined to not have a chance to be -+ * deemed soft rt before these requests are -+ * completed (see the comments to the -+ * function bfq_bfqq_softrt_next_start() -+ * for details on soft rt detection), -+ * 4) these pending requests experience a high -+ * latency because the application is not -+ * weight-raised while they are pending. -+ */ -+ if (bfqq->wr_cur_max_time != -+ bfqd->bfq_wr_rt_max_time) { -+ bfqq->wr_start_at_switch_to_srt = -+ bfqq->last_wr_start_finish; -+ BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish)); -+ -+ bfqq->wr_cur_max_time = -+ bfqd->bfq_wr_rt_max_time; -+ bfqq->wr_coeff = bfqd->bfq_wr_coeff * -+ BFQ_SOFTRT_WEIGHT_FACTOR; -+ bfq_log_bfqq(bfqd, bfqq, -+ "switching to soft_rt wr"); -+ } else -+ bfq_log_bfqq(bfqd, bfqq, -+ "moving forward soft_rt wr duration"); -+ bfqq->last_wr_start_finish = jiffies; -+ } -+ } -+} -+ -+static bool bfq_bfqq_idle_for_long_time(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq) -+{ -+ return bfqq->dispatched == 0 && -+ time_is_before_jiffies( -+ bfqq->budget_timeout + -+ bfqd->bfq_wr_min_idle_time); -+} -+ -+static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq, -+ int old_wr_coeff, -+ struct request *rq, -+ bool *interactive) -+{ -+ bool soft_rt, in_burst, wr_or_deserves_wr, -+ bfqq_wants_to_preempt, -+ idle_for_long_time = bfq_bfqq_idle_for_long_time(bfqd, bfqq), -+ /* -+ * See the comments on -+ * bfq_bfqq_update_budg_for_activation for -+ * details on the usage of the next variable. -+ */ -+ arrived_in_time = ktime_get_ns() <= -+ RQ_BIC(rq)->ttime.last_end_request + -+ bfqd->bfq_slice_idle * 3; -+ -+ bfq_log_bfqq(bfqd, bfqq, -+ "bfq_add_request non-busy: " -+ "jiffies %lu, in_time %d, idle_long %d busyw %d " -+ "wr_coeff %u", -+ jiffies, arrived_in_time, -+ idle_for_long_time, -+ bfq_bfqq_non_blocking_wait_rq(bfqq), -+ old_wr_coeff); -+ -+ BUG_ON(bfqq->entity.budget < bfqq->entity.service); -+ -+ BUG_ON(bfqq == bfqd->in_service_queue); -+ bfqg_stats_update_io_add(bfqq_group(RQ_BFQQ(rq)), bfqq, rq->cmd_flags); -+ -+ /* -+ * bfqq deserves to be weight-raised if: -+ * - it is sync, -+ * - it does not belong to a large burst, -+ * - it has been idle for enough time or is soft real-time, -+ * - is linked to a bfq_io_cq (it is not shared in any sense) -+ */ -+ in_burst = bfq_bfqq_in_large_burst(bfqq); -+ soft_rt = bfqd->bfq_wr_max_softrt_rate > 0 && -+ !in_burst && -+ time_is_before_jiffies(bfqq->soft_rt_next_start); -+ *interactive = -+ !in_burst && -+ idle_for_long_time; -+ wr_or_deserves_wr = bfqd->low_latency && -+ (bfqq->wr_coeff > 1 || -+ (bfq_bfqq_sync(bfqq) && -+ bfqq->bic && (*interactive || soft_rt))); -+ -+ bfq_log_bfqq(bfqd, bfqq, -+ "bfq_add_request: " -+ "in_burst %d, " -+ "soft_rt %d (next %lu), inter %d, bic %p", -+ bfq_bfqq_in_large_burst(bfqq), soft_rt, -+ bfqq->soft_rt_next_start, -+ *interactive, -+ bfqq->bic); -+ -+ /* -+ * Using the last flag, update budget and check whether bfqq -+ * may want to preempt the in-service queue. -+ */ -+ bfqq_wants_to_preempt = -+ bfq_bfqq_update_budg_for_activation(bfqd, bfqq, -+ arrived_in_time, -+ wr_or_deserves_wr); -+ -+ /* -+ * If bfqq happened to be activated in a burst, but has been -+ * idle for much more than an interactive queue, then we -+ * assume that, in the overall I/O initiated in the burst, the -+ * I/O associated with bfqq is finished. So bfqq does not need -+ * to be treated as a queue belonging to a burst -+ * anymore. Accordingly, we reset bfqq's in_large_burst flag -+ * if set, and remove bfqq from the burst list if it's -+ * there. We do not decrement burst_size, because the fact -+ * that bfqq does not need to belong to the burst list any -+ * more does not invalidate the fact that bfqq was created in -+ * a burst. -+ */ -+ if (likely(!bfq_bfqq_just_created(bfqq)) && -+ idle_for_long_time && -+ time_is_before_jiffies( -+ bfqq->budget_timeout + -+ msecs_to_jiffies(10000))) { -+ hlist_del_init(&bfqq->burst_list_node); -+ bfq_clear_bfqq_in_large_burst(bfqq); -+ } -+ -+ bfq_clear_bfqq_just_created(bfqq); -+ -+ if (!bfq_bfqq_IO_bound(bfqq)) { -+ if (arrived_in_time) { -+ bfqq->requests_within_timer++; -+ if (bfqq->requests_within_timer >= -+ bfqd->bfq_requests_within_timer) -+ bfq_mark_bfqq_IO_bound(bfqq); -+ } else -+ bfqq->requests_within_timer = 0; -+ bfq_log_bfqq(bfqd, bfqq, "requests in time %d", -+ bfqq->requests_within_timer); -+ } -+ -+ if (bfqd->low_latency) { -+ if (unlikely(time_is_after_jiffies(bfqq->split_time))) -+ /* wraparound */ -+ bfqq->split_time = -+ jiffies - bfqd->bfq_wr_min_idle_time - 1; -+ -+ if (time_is_before_jiffies(bfqq->split_time + -+ bfqd->bfq_wr_min_idle_time)) { -+ bfq_update_bfqq_wr_on_rq_arrival(bfqd, bfqq, -+ old_wr_coeff, -+ wr_or_deserves_wr, -+ *interactive, -+ in_burst, -+ soft_rt); -+ -+ if (old_wr_coeff != bfqq->wr_coeff) -+ bfqq->entity.prio_changed = 1; -+ } -+ } -+ -+ bfqq->last_idle_bklogged = jiffies; -+ bfqq->service_from_backlogged = 0; -+ bfq_clear_bfqq_softrt_update(bfqq); -+ -+ bfq_add_bfqq_busy(bfqd, bfqq); -+ -+ /* -+ * Expire in-service queue only if preemption may be needed -+ * for guarantees. In this respect, the function -+ * next_queue_may_preempt just checks a simple, necessary -+ * condition, and not a sufficient condition based on -+ * timestamps. In fact, for the latter condition to be -+ * evaluated, timestamps would need first to be updated, and -+ * this operation is quite costly (see the comments on the -+ * function bfq_bfqq_update_budg_for_activation). -+ */ -+ if (bfqd->in_service_queue && bfqq_wants_to_preempt && -+ bfqd->in_service_queue->wr_coeff < bfqq->wr_coeff && -+ next_queue_may_preempt(bfqd)) { -+ struct bfq_queue *in_serv = -+ bfqd->in_service_queue; -+ BUG_ON(in_serv == bfqq); -+ -+ bfq_bfqq_expire(bfqd, bfqd->in_service_queue, -+ false, BFQ_BFQQ_PREEMPTED); -+ } -+} -+ -+static void bfq_add_request(struct request *rq) -+{ -+ struct bfq_queue *bfqq = RQ_BFQQ(rq); -+ struct bfq_data *bfqd = bfqq->bfqd; -+ struct request *next_rq, *prev; -+ unsigned int old_wr_coeff = bfqq->wr_coeff; -+ bool interactive = false; -+ -+ bfq_log_bfqq(bfqd, bfqq, "add_request: size %u %s", -+ blk_rq_sectors(rq), rq_is_sync(rq) ? "S" : "A"); -+ -+ if (bfqq->wr_coeff > 1) /* queue is being weight-raised */ -+ bfq_log_bfqq(bfqd, bfqq, -+ "raising period dur %u/%u msec, old coeff %u, w %d(%d)", -+ jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish), -+ jiffies_to_msecs(bfqq->wr_cur_max_time), -+ bfqq->wr_coeff, -+ bfqq->entity.weight, bfqq->entity.orig_weight); -+ -+ bfqq->queued[rq_is_sync(rq)]++; -+ bfqd->queued++; -+ -+ elv_rb_add(&bfqq->sort_list, rq); -+ -+ /* -+ * Check if this request is a better next-to-serve candidate. -+ */ -+ prev = bfqq->next_rq; -+ next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position); -+ BUG_ON(!next_rq); -+ bfqq->next_rq = next_rq; -+ -+ /* -+ * Adjust priority tree position, if next_rq changes. -+ */ -+ if (prev != bfqq->next_rq) -+ bfq_pos_tree_add_move(bfqd, bfqq); -+ -+ if (!bfq_bfqq_busy(bfqq)) /* switching to busy ... */ -+ bfq_bfqq_handle_idle_busy_switch(bfqd, bfqq, old_wr_coeff, -+ rq, &interactive); -+ else { -+ if (bfqd->low_latency && old_wr_coeff == 1 && !rq_is_sync(rq) && -+ time_is_before_jiffies( -+ bfqq->last_wr_start_finish + -+ bfqd->bfq_wr_min_inter_arr_async)) { -+ bfqq->wr_coeff = bfqd->bfq_wr_coeff; -+ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); -+ -+ bfqd->wr_busy_queues++; -+ BUG_ON(bfqd->wr_busy_queues > bfqd->busy_queues); -+ bfqq->entity.prio_changed = 1; -+ bfq_log_bfqq(bfqd, bfqq, -+ "non-idle wrais starting, " -+ "wr_max_time %u wr_busy %d", -+ jiffies_to_msecs(bfqq->wr_cur_max_time), -+ bfqd->wr_busy_queues); -+ } -+ if (prev != bfqq->next_rq) -+ bfq_updated_next_req(bfqd, bfqq); -+ } -+ -+ /* -+ * Assign jiffies to last_wr_start_finish in the following -+ * cases: -+ * -+ * . if bfqq is not going to be weight-raised, because, for -+ * non weight-raised queues, last_wr_start_finish stores the -+ * arrival time of the last request; as of now, this piece -+ * of information is used only for deciding whether to -+ * weight-raise async queues -+ * -+ * . if bfqq is not weight-raised, because, if bfqq is now -+ * switching to weight-raised, then last_wr_start_finish -+ * stores the time when weight-raising starts -+ * -+ * . if bfqq is interactive, because, regardless of whether -+ * bfqq is currently weight-raised, the weight-raising -+ * period must start or restart (this case is considered -+ * separately because it is not detected by the above -+ * conditions, if bfqq is already weight-raised) -+ * -+ * last_wr_start_finish has to be updated also if bfqq is soft -+ * real-time, because the weight-raising period is constantly -+ * restarted on idle-to-busy transitions for these queues, but -+ * this is already done in bfq_bfqq_handle_idle_busy_switch if -+ * needed. -+ */ -+ if (bfqd->low_latency && -+ (old_wr_coeff == 1 || bfqq->wr_coeff == 1 || interactive)) -+ bfqq->last_wr_start_finish = jiffies; -+} -+ -+static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd, -+ struct bio *bio) -+{ -+ struct task_struct *tsk = current; -+ struct bfq_io_cq *bic; -+ struct bfq_queue *bfqq; -+ -+ bic = bfq_bic_lookup(bfqd, tsk->io_context); -+ if (!bic) -+ return NULL; -+ -+ bfqq = bic_to_bfqq(bic, op_is_sync(bio->bi_opf)); -+ if (bfqq) -+ return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio)); -+ -+ return NULL; -+} -+ -+static sector_t get_sdist(sector_t last_pos, struct request *rq) -+{ -+ sector_t sdist = 0; -+ -+ if (last_pos) { -+ if (last_pos < blk_rq_pos(rq)) -+ sdist = blk_rq_pos(rq) - last_pos; -+ else -+ sdist = last_pos - blk_rq_pos(rq); -+ } -+ -+ return sdist; -+} -+ -+static void bfq_activate_request(struct request_queue *q, struct request *rq) -+{ -+ struct bfq_data *bfqd = q->elevator->elevator_data; -+ bfqd->rq_in_driver++; -+} -+ -+static void bfq_deactivate_request(struct request_queue *q, struct request *rq) -+{ -+ struct bfq_data *bfqd = q->elevator->elevator_data; -+ -+ BUG_ON(bfqd->rq_in_driver == 0); -+ bfqd->rq_in_driver--; -+} -+ -+static void bfq_remove_request(struct request *rq) -+{ -+ struct bfq_queue *bfqq = RQ_BFQQ(rq); -+ struct bfq_data *bfqd = bfqq->bfqd; -+ const int sync = rq_is_sync(rq); -+ -+ BUG_ON(bfqq->entity.service > bfqq->entity.budget && -+ bfqq == bfqd->in_service_queue); -+ -+ if (bfqq->next_rq == rq) { -+ bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq); -+ bfq_updated_next_req(bfqd, bfqq); -+ } -+ -+ if (rq->queuelist.prev != &rq->queuelist) -+ list_del_init(&rq->queuelist); -+ BUG_ON(bfqq->queued[sync] == 0); -+ bfqq->queued[sync]--; -+ bfqd->queued--; -+ elv_rb_del(&bfqq->sort_list, rq); -+ -+ if (RB_EMPTY_ROOT(&bfqq->sort_list)) { -+ bfqq->next_rq = NULL; -+ -+ BUG_ON(bfqq->entity.budget < 0); -+ -+ if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->in_service_queue) { -+ BUG_ON(bfqq->ref < 2); /* referred by rq and on tree */ -+ bfq_del_bfqq_busy(bfqd, bfqq, false); -+ /* -+ * bfqq emptied. In normal operation, when -+ * bfqq is empty, bfqq->entity.service and -+ * bfqq->entity.budget must contain, -+ * respectively, the service received and the -+ * budget used last time bfqq emptied. These -+ * facts do not hold in this case, as at least -+ * this last removal occurred while bfqq is -+ * not in service. To avoid inconsistencies, -+ * reset both bfqq->entity.service and -+ * bfqq->entity.budget, if bfqq has still a -+ * process that may issue I/O requests to it. -+ */ -+ bfqq->entity.budget = bfqq->entity.service = 0; -+ } -+ -+ /* -+ * Remove queue from request-position tree as it is empty. -+ */ -+ if (bfqq->pos_root) { -+ rb_erase(&bfqq->pos_node, bfqq->pos_root); -+ bfqq->pos_root = NULL; -+ } -+ } -+ -+ if (rq->cmd_flags & REQ_META) { -+ BUG_ON(bfqq->meta_pending == 0); -+ bfqq->meta_pending--; -+ } -+ bfqg_stats_update_io_remove(bfqq_group(bfqq), rq->cmd_flags); -+} -+ -+static enum elv_merge bfq_merge(struct request_queue *q, struct request **req, -+ struct bio *bio) -+{ -+ struct bfq_data *bfqd = q->elevator->elevator_data; -+ struct request *__rq; -+ -+ __rq = bfq_find_rq_fmerge(bfqd, bio); -+ if (__rq && elv_bio_merge_ok(__rq, bio)) { -+ *req = __rq; -+ return ELEVATOR_FRONT_MERGE; -+ } -+ -+ return ELEVATOR_NO_MERGE; -+} -+ -+static void bfq_merged_request(struct request_queue *q, struct request *req, -+ enum elv_merge type) -+{ -+ if (type == ELEVATOR_FRONT_MERGE && -+ rb_prev(&req->rb_node) && -+ blk_rq_pos(req) < -+ blk_rq_pos(container_of(rb_prev(&req->rb_node), -+ struct request, rb_node))) { -+ struct bfq_queue *bfqq = RQ_BFQQ(req); -+ struct bfq_data *bfqd = bfqq->bfqd; -+ struct request *prev, *next_rq; -+ -+ /* Reposition request in its sort_list */ -+ elv_rb_del(&bfqq->sort_list, req); -+ elv_rb_add(&bfqq->sort_list, req); -+ /* Choose next request to be served for bfqq */ -+ prev = bfqq->next_rq; -+ next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req, -+ bfqd->last_position); -+ BUG_ON(!next_rq); -+ bfqq->next_rq = next_rq; -+ /* -+ * If next_rq changes, update both the queue's budget to -+ * fit the new request and the queue's position in its -+ * rq_pos_tree. -+ */ -+ if (prev != bfqq->next_rq) { -+ bfq_updated_next_req(bfqd, bfqq); -+ bfq_pos_tree_add_move(bfqd, bfqq); -+ } -+ } -+} -+ -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+static void bfq_bio_merged(struct request_queue *q, struct request *req, -+ struct bio *bio) -+{ -+ bfqg_stats_update_io_merged(bfqq_group(RQ_BFQQ(req)), bio->bi_opf); -+} -+#endif -+ -+static void bfq_merged_requests(struct request_queue *q, struct request *rq, -+ struct request *next) -+{ -+ struct bfq_queue *bfqq = RQ_BFQQ(rq), *next_bfqq = RQ_BFQQ(next); -+ -+ /* -+ * If next and rq belong to the same bfq_queue and next is older -+ * than rq, then reposition rq in the fifo (by substituting next -+ * with rq). Otherwise, if next and rq belong to different -+ * bfq_queues, never reposition rq: in fact, we would have to -+ * reposition it with respect to next's position in its own fifo, -+ * which would most certainly be too expensive with respect to -+ * the benefits. -+ */ -+ if (bfqq == next_bfqq && -+ !list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && -+ next->fifo_time < rq->fifo_time) { -+ list_del_init(&rq->queuelist); -+ list_replace_init(&next->queuelist, &rq->queuelist); -+ rq->fifo_time = next->fifo_time; -+ } -+ -+ if (bfqq->next_rq == next) -+ bfqq->next_rq = rq; -+ -+ bfq_remove_request(next); -+ bfqg_stats_update_io_merged(bfqq_group(bfqq), next->cmd_flags); -+} -+ -+/* Must be called with bfqq != NULL */ -+static void bfq_bfqq_end_wr(struct bfq_queue *bfqq) -+{ -+ BUG_ON(!bfqq); -+ -+ if (bfq_bfqq_busy(bfqq)) { -+ bfqq->bfqd->wr_busy_queues--; -+ BUG_ON(bfqq->bfqd->wr_busy_queues < 0); -+ } -+ bfqq->wr_coeff = 1; -+ bfqq->wr_cur_max_time = 0; -+ bfqq->last_wr_start_finish = jiffies; -+ /* -+ * Trigger a weight change on the next invocation of -+ * __bfq_entity_update_weight_prio. -+ */ -+ bfqq->entity.prio_changed = 1; -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "end_wr: wrais ending at %lu, rais_max_time %u", -+ bfqq->last_wr_start_finish, -+ jiffies_to_msecs(bfqq->wr_cur_max_time)); -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "end_wr: wr_busy %d", -+ bfqq->bfqd->wr_busy_queues); -+} -+ -+static void bfq_end_wr_async_queues(struct bfq_data *bfqd, -+ struct bfq_group *bfqg) -+{ -+ int i, j; -+ -+ for (i = 0; i < 2; i++) -+ for (j = 0; j < IOPRIO_BE_NR; j++) -+ if (bfqg->async_bfqq[i][j]) -+ bfq_bfqq_end_wr(bfqg->async_bfqq[i][j]); -+ if (bfqg->async_idle_bfqq) -+ bfq_bfqq_end_wr(bfqg->async_idle_bfqq); -+} -+ -+static void bfq_end_wr(struct bfq_data *bfqd) -+{ -+ struct bfq_queue *bfqq; -+ -+ spin_lock_irq(bfqd->queue->queue_lock); -+ -+ list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list) -+ bfq_bfqq_end_wr(bfqq); -+ list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list) -+ bfq_bfqq_end_wr(bfqq); -+ bfq_end_wr_async(bfqd); -+ -+ spin_unlock_irq(bfqd->queue->queue_lock); -+} -+ -+static sector_t bfq_io_struct_pos(void *io_struct, bool request) -+{ -+ if (request) -+ return blk_rq_pos(io_struct); -+ else -+ return ((struct bio *)io_struct)->bi_iter.bi_sector; -+} -+ -+static int bfq_rq_close_to_sector(void *io_struct, bool request, -+ sector_t sector) -+{ -+ return abs(bfq_io_struct_pos(io_struct, request) - sector) <= -+ BFQQ_CLOSE_THR; -+} -+ -+static struct bfq_queue *bfqq_find_close(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq, -+ sector_t sector) -+{ -+ struct rb_root *root = &bfq_bfqq_to_bfqg(bfqq)->rq_pos_tree; -+ struct rb_node *parent, *node; -+ struct bfq_queue *__bfqq; -+ -+ if (RB_EMPTY_ROOT(root)) -+ return NULL; -+ -+ /* -+ * First, if we find a request starting at the end of the last -+ * request, choose it. -+ */ -+ __bfqq = bfq_rq_pos_tree_lookup(bfqd, root, sector, &parent, NULL); -+ if (__bfqq) -+ return __bfqq; -+ -+ /* -+ * If the exact sector wasn't found, the parent of the NULL leaf -+ * will contain the closest sector (rq_pos_tree sorted by -+ * next_request position). -+ */ -+ __bfqq = rb_entry(parent, struct bfq_queue, pos_node); -+ if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector)) -+ return __bfqq; -+ -+ if (blk_rq_pos(__bfqq->next_rq) < sector) -+ node = rb_next(&__bfqq->pos_node); -+ else -+ node = rb_prev(&__bfqq->pos_node); -+ if (!node) -+ return NULL; -+ -+ __bfqq = rb_entry(node, struct bfq_queue, pos_node); -+ if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector)) -+ return __bfqq; -+ -+ return NULL; -+} -+ -+static struct bfq_queue *bfq_find_close_cooperator(struct bfq_data *bfqd, -+ struct bfq_queue *cur_bfqq, -+ sector_t sector) -+{ -+ struct bfq_queue *bfqq; -+ -+ /* -+ * We shall notice if some of the queues are cooperating, -+ * e.g., working closely on the same area of the device. In -+ * that case, we can group them together and: 1) don't waste -+ * time idling, and 2) serve the union of their requests in -+ * the best possible order for throughput. -+ */ -+ bfqq = bfqq_find_close(bfqd, cur_bfqq, sector); -+ if (!bfqq || bfqq == cur_bfqq) -+ return NULL; -+ -+ return bfqq; -+} -+ -+static struct bfq_queue * -+bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) -+{ -+ int process_refs, new_process_refs; -+ struct bfq_queue *__bfqq; -+ -+ /* -+ * If there are no process references on the new_bfqq, then it is -+ * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain -+ * may have dropped their last reference (not just their last process -+ * reference). -+ */ -+ if (!bfqq_process_refs(new_bfqq)) -+ return NULL; -+ -+ /* Avoid a circular list and skip interim queue merges. */ -+ while ((__bfqq = new_bfqq->new_bfqq)) { -+ if (__bfqq == bfqq) -+ return NULL; -+ new_bfqq = __bfqq; -+ } -+ -+ process_refs = bfqq_process_refs(bfqq); -+ new_process_refs = bfqq_process_refs(new_bfqq); -+ /* -+ * If the process for the bfqq has gone away, there is no -+ * sense in merging the queues. -+ */ -+ if (process_refs == 0 || new_process_refs == 0) -+ return NULL; -+ -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d", -+ new_bfqq->pid); -+ -+ /* -+ * Merging is just a redirection: the requests of the process -+ * owning one of the two queues are redirected to the other queue. -+ * The latter queue, in its turn, is set as shared if this is the -+ * first time that the requests of some process are redirected to -+ * it. -+ * -+ * We redirect bfqq to new_bfqq and not the opposite, because we -+ * are in the context of the process owning bfqq, hence we have -+ * the io_cq of this process. So we can immediately configure this -+ * io_cq to redirect the requests of the process to new_bfqq. -+ * -+ * NOTE, even if new_bfqq coincides with the in-service queue, the -+ * io_cq of new_bfqq is not available, because, if the in-service -+ * queue is shared, bfqd->in_service_bic may not point to the -+ * io_cq of the in-service queue. -+ * Redirecting the requests of the process owning bfqq to the -+ * currently in-service queue is in any case the best option, as -+ * we feed the in-service queue with new requests close to the -+ * last request served and, by doing so, hopefully increase the -+ * throughput. -+ */ -+ bfqq->new_bfqq = new_bfqq; -+ new_bfqq->ref += process_refs; -+ return new_bfqq; -+} -+ -+static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq, -+ struct bfq_queue *new_bfqq) -+{ -+ if (bfq_class_idle(bfqq) || bfq_class_idle(new_bfqq) || -+ (bfqq->ioprio_class != new_bfqq->ioprio_class)) -+ return false; -+ -+ /* -+ * If either of the queues has already been detected as seeky, -+ * then merging it with the other queue is unlikely to lead to -+ * sequential I/O. -+ */ -+ if (BFQQ_SEEKY(bfqq) || BFQQ_SEEKY(new_bfqq)) -+ return false; -+ -+ /* -+ * Interleaved I/O is known to be done by (some) applications -+ * only for reads, so it does not make sense to merge async -+ * queues. -+ */ -+ if (!bfq_bfqq_sync(bfqq) || !bfq_bfqq_sync(new_bfqq)) -+ return false; -+ -+ return true; -+} -+ -+/* -+ * If this function returns true, then bfqq cannot be merged. The idea -+ * is that true cooperation happens very early after processes start -+ * to do I/O. Usually, late cooperations are just accidental false -+ * positives. In case bfqq is weight-raised, such false positives -+ * would evidently degrade latency guarantees for bfqq. -+ */ -+static bool wr_from_too_long(struct bfq_queue *bfqq) -+{ -+ return bfqq->wr_coeff > 1 && -+ time_is_before_jiffies(bfqq->last_wr_start_finish + -+ msecs_to_jiffies(100)); -+} -+ -+/* -+ * Attempt to schedule a merge of bfqq with the currently in-service -+ * queue or with a close queue among the scheduled queues. Return -+ * NULL if no merge was scheduled, a pointer to the shared bfq_queue -+ * structure otherwise. -+ * -+ * The OOM queue is not allowed to participate to cooperation: in fact, since -+ * the requests temporarily redirected to the OOM queue could be redirected -+ * again to dedicated queues at any time, the state needed to correctly -+ * handle merging with the OOM queue would be quite complex and expensive -+ * to maintain. Besides, in such a critical condition as an out of memory, -+ * the benefits of queue merging may be little relevant, or even negligible. -+ * -+ * Weight-raised queues can be merged only if their weight-raising -+ * period has just started. In fact cooperating processes are usually -+ * started together. Thus, with this filter we avoid false positives -+ * that would jeopardize low-latency guarantees. -+ * -+ * WARNING: queue merging may impair fairness among non-weight raised -+ * queues, for at least two reasons: 1) the original weight of a -+ * merged queue may change during the merged state, 2) even being the -+ * weight the same, a merged queue may be bloated with many more -+ * requests than the ones produced by its originally-associated -+ * process. -+ */ -+static struct bfq_queue * -+bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, -+ void *io_struct, bool request) -+{ -+ struct bfq_queue *in_service_bfqq, *new_bfqq; -+ -+ if (bfqq->new_bfqq) -+ return bfqq->new_bfqq; -+ -+ if (io_struct && wr_from_too_long(bfqq) && -+ likely(bfqq != &bfqd->oom_bfqq)) -+ bfq_log_bfqq(bfqd, bfqq, -+ "would have looked for coop, but bfq%d wr", -+ bfqq->pid); -+ -+ if (!io_struct || -+ wr_from_too_long(bfqq) || -+ unlikely(bfqq == &bfqd->oom_bfqq)) -+ return NULL; -+ -+ /* If there is only one backlogged queue, don't search. */ -+ if (bfqd->busy_queues == 1) -+ return NULL; -+ -+ in_service_bfqq = bfqd->in_service_queue; -+ -+ if (in_service_bfqq && in_service_bfqq != bfqq && -+ bfqd->in_service_bic && wr_from_too_long(in_service_bfqq) -+ && likely(in_service_bfqq == &bfqd->oom_bfqq)) -+ bfq_log_bfqq(bfqd, bfqq, -+ "would have tried merge with in-service-queue, but wr"); -+ -+ if (!in_service_bfqq || in_service_bfqq == bfqq || -+ !bfqd->in_service_bic || wr_from_too_long(in_service_bfqq) || -+ unlikely(in_service_bfqq == &bfqd->oom_bfqq)) -+ goto check_scheduled; -+ -+ if (bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) && -+ bfqq->entity.parent == in_service_bfqq->entity.parent && -+ bfq_may_be_close_cooperator(bfqq, in_service_bfqq)) { -+ new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq); -+ if (new_bfqq) -+ return new_bfqq; -+ } -+ /* -+ * Check whether there is a cooperator among currently scheduled -+ * queues. The only thing we need is that the bio/request is not -+ * NULL, as we need it to establish whether a cooperator exists. -+ */ -+check_scheduled: -+ new_bfqq = bfq_find_close_cooperator(bfqd, bfqq, -+ bfq_io_struct_pos(io_struct, request)); -+ -+ BUG_ON(new_bfqq && bfqq->entity.parent != new_bfqq->entity.parent); -+ -+ if (new_bfqq && wr_from_too_long(new_bfqq) && -+ likely(new_bfqq != &bfqd->oom_bfqq) && -+ bfq_may_be_close_cooperator(bfqq, new_bfqq)) -+ bfq_log_bfqq(bfqd, bfqq, -+ "would have merged with bfq%d, but wr", -+ new_bfqq->pid); -+ -+ if (new_bfqq && !wr_from_too_long(new_bfqq) && -+ likely(new_bfqq != &bfqd->oom_bfqq) && -+ bfq_may_be_close_cooperator(bfqq, new_bfqq)) -+ return bfq_setup_merge(bfqq, new_bfqq); -+ -+ return NULL; -+} -+ -+static void bfq_bfqq_save_state(struct bfq_queue *bfqq) -+{ -+ struct bfq_io_cq *bic = bfqq->bic; -+ -+ /* -+ * If !bfqq->bic, the queue is already shared or its requests -+ * have already been redirected to a shared queue; both idle window -+ * and weight raising state have already been saved. Do nothing. -+ */ -+ if (!bic) -+ return; -+ -+ bic->saved_has_short_ttime = bfq_bfqq_has_short_ttime(bfqq); -+ bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq); -+ bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq); -+ bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node); -+ bic->saved_wr_coeff = bfqq->wr_coeff; -+ bic->saved_wr_start_at_switch_to_srt = bfqq->wr_start_at_switch_to_srt; -+ bic->saved_last_wr_start_finish = bfqq->last_wr_start_finish; -+ bic->saved_wr_cur_max_time = bfqq->wr_cur_max_time; -+ BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish)); -+} -+ -+static void bfq_get_bic_reference(struct bfq_queue *bfqq) -+{ -+ /* -+ * If bfqq->bic has a non-NULL value, the bic to which it belongs -+ * is about to begin using a shared bfq_queue. -+ */ -+ if (bfqq->bic) -+ atomic_long_inc(&bfqq->bic->icq.ioc->refcount); -+} -+ -+static void -+bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic, -+ struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) -+{ -+ bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu", -+ (unsigned long) new_bfqq->pid); -+ /* Save weight raising and idle window of the merged queues */ -+ bfq_bfqq_save_state(bfqq); -+ bfq_bfqq_save_state(new_bfqq); -+ if (bfq_bfqq_IO_bound(bfqq)) -+ bfq_mark_bfqq_IO_bound(new_bfqq); -+ bfq_clear_bfqq_IO_bound(bfqq); -+ -+ /* -+ * If bfqq is weight-raised, then let new_bfqq inherit -+ * weight-raising. To reduce false positives, neglect the case -+ * where bfqq has just been created, but has not yet made it -+ * to be weight-raised (which may happen because EQM may merge -+ * bfqq even before bfq_add_request is executed for the first -+ * time for bfqq). Handling this case would however be very -+ * easy, thanks to the flag just_created. -+ */ -+ if (new_bfqq->wr_coeff == 1 && bfqq->wr_coeff > 1) { -+ new_bfqq->wr_coeff = bfqq->wr_coeff; -+ new_bfqq->wr_cur_max_time = bfqq->wr_cur_max_time; -+ new_bfqq->last_wr_start_finish = bfqq->last_wr_start_finish; -+ new_bfqq->wr_start_at_switch_to_srt = -+ bfqq->wr_start_at_switch_to_srt; -+ if (bfq_bfqq_busy(new_bfqq)) { -+ bfqd->wr_busy_queues++; -+ BUG_ON(bfqd->wr_busy_queues > bfqd->busy_queues); -+ } -+ -+ new_bfqq->entity.prio_changed = 1; -+ bfq_log_bfqq(bfqd, new_bfqq, -+ "wr start after merge with %d, rais_max_time %u", -+ bfqq->pid, -+ jiffies_to_msecs(bfqq->wr_cur_max_time)); -+ } -+ -+ if (bfqq->wr_coeff > 1) { /* bfqq has given its wr to new_bfqq */ -+ bfqq->wr_coeff = 1; -+ bfqq->entity.prio_changed = 1; -+ if (bfq_bfqq_busy(bfqq)) { -+ bfqd->wr_busy_queues--; -+ BUG_ON(bfqd->wr_busy_queues < 0); -+ } -+ -+ } -+ -+ bfq_log_bfqq(bfqd, new_bfqq, "merge_bfqqs: wr_busy %d", -+ bfqd->wr_busy_queues); -+ -+ /* -+ * Grab a reference to the bic, to prevent it from being destroyed -+ * before being possibly touched by a bfq_split_bfqq(). -+ */ -+ bfq_get_bic_reference(bfqq); -+ bfq_get_bic_reference(new_bfqq); -+ /* -+ * Merge queues (that is, let bic redirect its requests to new_bfqq) -+ */ -+ bic_set_bfqq(bic, new_bfqq, 1); -+ bfq_mark_bfqq_coop(new_bfqq); -+ /* -+ * new_bfqq now belongs to at least two bics (it is a shared queue): -+ * set new_bfqq->bic to NULL. bfqq either: -+ * - does not belong to any bic any more, and hence bfqq->bic must -+ * be set to NULL, or -+ * - is a queue whose owning bics have already been redirected to a -+ * different queue, hence the queue is destined to not belong to -+ * any bic soon and bfqq->bic is already NULL (therefore the next -+ * assignment causes no harm). -+ */ -+ new_bfqq->bic = NULL; -+ bfqq->bic = NULL; -+ /* release process reference to bfqq */ -+ bfq_put_queue(bfqq); -+} -+ -+static int bfq_allow_bio_merge(struct request_queue *q, struct request *rq, -+ struct bio *bio) -+{ -+ struct bfq_data *bfqd = q->elevator->elevator_data; -+ bool is_sync = op_is_sync(bio->bi_opf); -+ struct bfq_io_cq *bic; -+ struct bfq_queue *bfqq, *new_bfqq; -+ -+ /* -+ * Disallow merge of a sync bio into an async request. -+ */ -+ if (is_sync && !rq_is_sync(rq)) -+ return false; -+ -+ /* -+ * Lookup the bfqq that this bio will be queued with. Allow -+ * merge only if rq is queued there. -+ * Queue lock is held here. -+ */ -+ bic = bfq_bic_lookup(bfqd, current->io_context); -+ if (!bic) -+ return false; -+ -+ bfqq = bic_to_bfqq(bic, is_sync); -+ /* -+ * We take advantage of this function to perform an early merge -+ * of the queues of possible cooperating processes. -+ */ -+ if (bfqq) { -+ new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false); -+ if (new_bfqq) { -+ bfq_merge_bfqqs(bfqd, bic, bfqq, new_bfqq); -+ /* -+ * If we get here, the bio will be queued in the -+ * shared queue, i.e., new_bfqq, so use new_bfqq -+ * to decide whether bio and rq can be merged. -+ */ -+ bfqq = new_bfqq; -+ } -+ } -+ -+ return bfqq == RQ_BFQQ(rq); -+} -+ -+static int bfq_allow_rq_merge(struct request_queue *q, struct request *rq, -+ struct request *next) -+{ -+ return RQ_BFQQ(rq) == RQ_BFQQ(next); -+} -+ -+/* -+ * Set the maximum time for the in-service queue to consume its -+ * budget. This prevents seeky processes from lowering the throughput. -+ * In practice, a time-slice service scheme is used with seeky -+ * processes. -+ */ -+static void bfq_set_budget_timeout(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq) -+{ -+ unsigned int timeout_coeff; -+ -+ if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time) -+ timeout_coeff = 1; -+ else -+ timeout_coeff = bfqq->entity.weight / bfqq->entity.orig_weight; -+ -+ bfqd->last_budget_start = ktime_get(); -+ -+ bfqq->budget_timeout = jiffies + -+ bfqd->bfq_timeout * timeout_coeff; -+ -+ bfq_log_bfqq(bfqd, bfqq, "set budget_timeout %u", -+ jiffies_to_msecs(bfqd->bfq_timeout * timeout_coeff)); -+} -+ -+static void __bfq_set_in_service_queue(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq) -+{ -+ if (bfqq) { -+ bfqg_stats_update_avg_queue_size(bfqq_group(bfqq)); -+ bfq_mark_bfqq_must_alloc(bfqq); -+ bfq_clear_bfqq_fifo_expire(bfqq); -+ -+ bfqd->budgets_assigned = (bfqd->budgets_assigned*7 + 256) / 8; -+ -+ BUG_ON(bfqq == bfqd->in_service_queue); -+ BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list)); -+ -+ if (time_is_before_jiffies(bfqq->last_wr_start_finish) && -+ bfqq->wr_coeff > 1 && -+ bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time && -+ time_is_before_jiffies(bfqq->budget_timeout)) { -+ /* -+ * For soft real-time queues, move the start -+ * of the weight-raising period forward by the -+ * time the queue has not received any -+ * service. Otherwise, a relatively long -+ * service delay is likely to cause the -+ * weight-raising period of the queue to end, -+ * because of the short duration of the -+ * weight-raising period of a soft real-time -+ * queue. It is worth noting that this move -+ * is not so dangerous for the other queues, -+ * because soft real-time queues are not -+ * greedy. -+ * -+ * To not add a further variable, we use the -+ * overloaded field budget_timeout to -+ * determine for how long the queue has not -+ * received service, i.e., how much time has -+ * elapsed since the queue expired. However, -+ * this is a little imprecise, because -+ * budget_timeout is set to jiffies if bfqq -+ * not only expires, but also remains with no -+ * request. -+ */ -+ if (time_after(bfqq->budget_timeout, -+ bfqq->last_wr_start_finish)) -+ bfqq->last_wr_start_finish += -+ jiffies - bfqq->budget_timeout; -+ else -+ bfqq->last_wr_start_finish = jiffies; -+ -+ if (time_is_after_jiffies(bfqq->last_wr_start_finish)) { -+ pr_crit( -+ "BFQ WARNING:last %lu budget %lu jiffies %lu", -+ bfqq->last_wr_start_finish, -+ bfqq->budget_timeout, -+ jiffies); -+ pr_crit("diff %lu", jiffies - -+ max_t(unsigned long, -+ bfqq->last_wr_start_finish, -+ bfqq->budget_timeout)); -+ bfqq->last_wr_start_finish = jiffies; -+ } -+ } -+ -+ bfq_set_budget_timeout(bfqd, bfqq); -+ bfq_log_bfqq(bfqd, bfqq, -+ "set_in_service_queue, cur-budget = %d", -+ bfqq->entity.budget); -+ } else -+ bfq_log(bfqd, "set_in_service_queue: NULL"); -+ -+ bfqd->in_service_queue = bfqq; -+} -+ -+/* -+ * Get and set a new queue for service. -+ */ -+static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd) -+{ -+ struct bfq_queue *bfqq = bfq_get_next_queue(bfqd); -+ -+ __bfq_set_in_service_queue(bfqd, bfqq); -+ return bfqq; -+} -+ -+static void bfq_arm_slice_timer(struct bfq_data *bfqd) -+{ -+ struct bfq_queue *bfqq = bfqd->in_service_queue; -+ struct bfq_io_cq *bic; -+ u32 sl; -+ -+ BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list)); -+ -+ /* Processes have exited, don't wait. */ -+ bic = bfqd->in_service_bic; -+ if (!bic || atomic_read(&bic->icq.ioc->active_ref) == 0) -+ return; -+ -+ bfq_mark_bfqq_wait_request(bfqq); -+ -+ /* -+ * We don't want to idle for seeks, but we do want to allow -+ * fair distribution of slice time for a process doing back-to-back -+ * seeks. So allow a little bit of time for him to submit a new rq. -+ * -+ * To prevent processes with (partly) seeky workloads from -+ * being too ill-treated, grant them a small fraction of the -+ * assigned budget before reducing the waiting time to -+ * BFQ_MIN_TT. This happened to help reduce latency. -+ */ -+ sl = bfqd->bfq_slice_idle; -+ /* -+ * Unless the queue is being weight-raised or the scenario is -+ * asymmetric, grant only minimum idle time if the queue -+ * is seeky. A long idling is preserved for a weight-raised -+ * queue, or, more in general, in an asymemtric scenario, -+ * because a long idling is needed for guaranteeing to a queue -+ * its reserved share of the throughput (in particular, it is -+ * needed if the queue has a higher weight than some other -+ * queue). -+ */ -+ if (BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 && -+ bfq_symmetric_scenario(bfqd)) -+ sl = min_t(u32, sl, BFQ_MIN_TT); -+ -+ bfqd->last_idling_start = ktime_get(); -+ hrtimer_start(&bfqd->idle_slice_timer, ns_to_ktime(sl), -+ HRTIMER_MODE_REL); -+ bfqg_stats_set_start_idle_time(bfqq_group(bfqq)); -+ bfq_log(bfqd, "arm idle: %ld/%ld ms", -+ sl / NSEC_PER_MSEC, bfqd->bfq_slice_idle / NSEC_PER_MSEC); -+} -+ -+/* -+ * In autotuning mode, max_budget is dynamically recomputed as the -+ * amount of sectors transferred in timeout at the estimated peak -+ * rate. This enables BFQ to utilize a full timeslice with a full -+ * budget, even if the in-service queue is served at peak rate. And -+ * this maximises throughput with sequential workloads. -+ */ -+static unsigned long bfq_calc_max_budget(struct bfq_data *bfqd) -+{ -+ return (u64)bfqd->peak_rate * USEC_PER_MSEC * -+ jiffies_to_msecs(bfqd->bfq_timeout)>>BFQ_RATE_SHIFT; -+} -+ -+/* -+ * Update parameters related to throughput and responsiveness, as a -+ * function of the estimated peak rate. See comments on -+ * bfq_calc_max_budget(), and on T_slow and T_fast arrays. -+ */ -+static void update_thr_responsiveness_params(struct bfq_data *bfqd) -+{ -+ int dev_type = blk_queue_nonrot(bfqd->queue); -+ -+ if (bfqd->bfq_user_max_budget == 0) { -+ bfqd->bfq_max_budget = -+ bfq_calc_max_budget(bfqd); -+ BUG_ON(bfqd->bfq_max_budget < 0); -+ bfq_log(bfqd, "new max_budget = %d", -+ bfqd->bfq_max_budget); -+ } -+ -+ if (bfqd->device_speed == BFQ_BFQD_FAST && -+ bfqd->peak_rate < device_speed_thresh[dev_type]) { -+ bfqd->device_speed = BFQ_BFQD_SLOW; -+ bfqd->RT_prod = R_slow[dev_type] * -+ T_slow[dev_type]; -+ } else if (bfqd->device_speed == BFQ_BFQD_SLOW && -+ bfqd->peak_rate > device_speed_thresh[dev_type]) { -+ bfqd->device_speed = BFQ_BFQD_FAST; -+ bfqd->RT_prod = R_fast[dev_type] * -+ T_fast[dev_type]; -+ } -+ -+ bfq_log(bfqd, -+"dev_type %s dev_speed_class = %s (%llu sects/sec), thresh %llu setcs/sec", -+ dev_type == 0 ? "ROT" : "NONROT", -+ bfqd->device_speed == BFQ_BFQD_FAST ? "FAST" : "SLOW", -+ bfqd->device_speed == BFQ_BFQD_FAST ? -+ (USEC_PER_SEC*(u64)R_fast[dev_type])>>BFQ_RATE_SHIFT : -+ (USEC_PER_SEC*(u64)R_slow[dev_type])>>BFQ_RATE_SHIFT, -+ (USEC_PER_SEC*(u64)device_speed_thresh[dev_type])>> -+ BFQ_RATE_SHIFT); -+} -+ -+static void bfq_reset_rate_computation(struct bfq_data *bfqd, struct request *rq) -+{ -+ if (rq != NULL) { /* new rq dispatch now, reset accordingly */ -+ bfqd->last_dispatch = bfqd->first_dispatch = ktime_get_ns() ; -+ bfqd->peak_rate_samples = 1; -+ bfqd->sequential_samples = 0; -+ bfqd->tot_sectors_dispatched = bfqd->last_rq_max_size = -+ blk_rq_sectors(rq); -+ } else /* no new rq dispatched, just reset the number of samples */ -+ bfqd->peak_rate_samples = 0; /* full re-init on next disp. */ -+ -+ bfq_log(bfqd, -+ "reset_rate_computation at end, sample %u/%u tot_sects %llu", -+ bfqd->peak_rate_samples, bfqd->sequential_samples, -+ bfqd->tot_sectors_dispatched); -+} -+ -+static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq) -+{ -+ u32 rate, weight, divisor; -+ -+ /* -+ * For the convergence property to hold (see comments on -+ * bfq_update_peak_rate()) and for the assessment to be -+ * reliable, a minimum number of samples must be present, and -+ * a minimum amount of time must have elapsed. If not so, do -+ * not compute new rate. Just reset parameters, to get ready -+ * for a new evaluation attempt. -+ */ -+ if (bfqd->peak_rate_samples < BFQ_RATE_MIN_SAMPLES || -+ bfqd->delta_from_first < BFQ_RATE_MIN_INTERVAL) { -+ bfq_log(bfqd, -+ "update_rate_reset: only resetting, delta_first %lluus samples %d", -+ bfqd->delta_from_first>>10, bfqd->peak_rate_samples); -+ goto reset_computation; -+ } -+ -+ /* -+ * If a new request completion has occurred after last -+ * dispatch, then, to approximate the rate at which requests -+ * have been served by the device, it is more precise to -+ * extend the observation interval to the last completion. -+ */ -+ bfqd->delta_from_first = -+ max_t(u64, bfqd->delta_from_first, -+ bfqd->last_completion - bfqd->first_dispatch); -+ -+ BUG_ON(bfqd->delta_from_first == 0); -+ /* -+ * Rate computed in sects/usec, and not sects/nsec, for -+ * precision issues. -+ */ -+ rate = div64_ul(bfqd->tot_sectors_dispatched<delta_from_first, NSEC_PER_USEC)); -+ -+ bfq_log(bfqd, -+"update_rate_reset: tot_sects %llu delta_first %lluus rate %llu sects/s (%d)", -+ bfqd->tot_sectors_dispatched, bfqd->delta_from_first>>10, -+ ((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT), -+ rate > 20< 20M sectors/sec) -+ */ -+ if ((bfqd->sequential_samples < (3 * bfqd->peak_rate_samples)>>2 && -+ rate <= bfqd->peak_rate) || -+ rate > 20<peak_rate_samples, bfqd->sequential_samples, -+ ((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT), -+ ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT)); -+ goto reset_computation; -+ } else { -+ bfq_log(bfqd, -+ "update_rate_reset: do update, samples %u/%u rate/peak %llu/%llu", -+ bfqd->peak_rate_samples, bfqd->sequential_samples, -+ ((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT), -+ ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT)); -+ } -+ -+ /* -+ * We have to update the peak rate, at last! To this purpose, -+ * we use a low-pass filter. We compute the smoothing constant -+ * of the filter as a function of the 'weight' of the new -+ * measured rate. -+ * -+ * As can be seen in next formulas, we define this weight as a -+ * quantity proportional to how sequential the workload is, -+ * and to how long the observation time interval is. -+ * -+ * The weight runs from 0 to 8. The maximum value of the -+ * weight, 8, yields the minimum value for the smoothing -+ * constant. At this minimum value for the smoothing constant, -+ * the measured rate contributes for half of the next value of -+ * the estimated peak rate. -+ * -+ * So, the first step is to compute the weight as a function -+ * of how sequential the workload is. Note that the weight -+ * cannot reach 9, because bfqd->sequential_samples cannot -+ * become equal to bfqd->peak_rate_samples, which, in its -+ * turn, holds true because bfqd->sequential_samples is not -+ * incremented for the first sample. -+ */ -+ weight = (9 * bfqd->sequential_samples) / bfqd->peak_rate_samples; -+ -+ /* -+ * Second step: further refine the weight as a function of the -+ * duration of the observation interval. -+ */ -+ weight = min_t(u32, 8, -+ div_u64(weight * bfqd->delta_from_first, -+ BFQ_RATE_REF_INTERVAL)); -+ -+ /* -+ * Divisor ranging from 10, for minimum weight, to 2, for -+ * maximum weight. -+ */ -+ divisor = 10 - weight; -+ BUG_ON(divisor == 0); -+ -+ /* -+ * Finally, update peak rate: -+ * -+ * peak_rate = peak_rate * (divisor-1) / divisor + rate / divisor -+ */ -+ bfqd->peak_rate *= divisor-1; -+ bfqd->peak_rate /= divisor; -+ rate /= divisor; /* smoothing constant alpha = 1/divisor */ -+ -+ bfq_log(bfqd, -+ "update_rate_reset: divisor %d tmp_peak_rate %llu tmp_rate %u", -+ divisor, -+ ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT), -+ (u32)((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT)); -+ -+ BUG_ON(bfqd->peak_rate == 0); -+ BUG_ON(bfqd->peak_rate > 20<peak_rate += rate; -+ update_thr_responsiveness_params(bfqd); -+ BUG_ON(bfqd->peak_rate > 20<peak_rate_samples == 0) { /* first dispatch */ -+ bfq_log(bfqd, -+ "update_peak_rate: goto reset, samples %d", -+ bfqd->peak_rate_samples) ; -+ bfq_reset_rate_computation(bfqd, rq); -+ goto update_last_values; /* will add one sample */ -+ } -+ -+ /* -+ * Device idle for very long: the observation interval lasting -+ * up to this dispatch cannot be a valid observation interval -+ * for computing a new peak rate (similarly to the late- -+ * completion event in bfq_completed_request()). Go to -+ * update_rate_and_reset to have the following three steps -+ * taken: -+ * - close the observation interval at the last (previous) -+ * request dispatch or completion -+ * - compute rate, if possible, for that observation interval -+ * - start a new observation interval with this dispatch -+ */ -+ if (now_ns - bfqd->last_dispatch > 100*NSEC_PER_MSEC && -+ bfqd->rq_in_driver == 0) { -+ bfq_log(bfqd, -+"update_peak_rate: jumping to updating&resetting delta_last %lluus samples %d", -+ (now_ns - bfqd->last_dispatch)>>10, -+ bfqd->peak_rate_samples) ; -+ goto update_rate_and_reset; -+ } -+ -+ /* Update sampling information */ -+ bfqd->peak_rate_samples++; -+ -+ if ((bfqd->rq_in_driver > 0 || -+ now_ns - bfqd->last_completion < BFQ_MIN_TT) -+ && get_sdist(bfqd->last_position, rq) < BFQQ_SEEK_THR) -+ bfqd->sequential_samples++; -+ -+ bfqd->tot_sectors_dispatched += blk_rq_sectors(rq); -+ -+ /* Reset max observed rq size every 32 dispatches */ -+ if (likely(bfqd->peak_rate_samples % 32)) -+ bfqd->last_rq_max_size = -+ max_t(u32, blk_rq_sectors(rq), bfqd->last_rq_max_size); -+ else -+ bfqd->last_rq_max_size = blk_rq_sectors(rq); -+ -+ bfqd->delta_from_first = now_ns - bfqd->first_dispatch; -+ -+ bfq_log(bfqd, -+ "update_peak_rate: added samples %u/%u tot_sects %llu delta_first %lluus", -+ bfqd->peak_rate_samples, bfqd->sequential_samples, -+ bfqd->tot_sectors_dispatched, -+ bfqd->delta_from_first>>10); -+ -+ /* Target observation interval not yet reached, go on sampling */ -+ if (bfqd->delta_from_first < BFQ_RATE_REF_INTERVAL) -+ goto update_last_values; -+ -+update_rate_and_reset: -+ bfq_update_rate_reset(bfqd, rq); -+update_last_values: -+ bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq); -+ bfqd->last_dispatch = now_ns; -+ -+ bfq_log(bfqd, -+ "update_peak_rate: delta_first %lluus last_pos %llu peak_rate %llu", -+ (now_ns - bfqd->first_dispatch)>>10, -+ (unsigned long long) bfqd->last_position, -+ ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT)); -+ bfq_log(bfqd, -+ "update_peak_rate: samples at end %d", bfqd->peak_rate_samples); -+} -+ -+/* -+ * Move request from internal lists to the dispatch list of the request queue -+ */ -+static void bfq_dispatch_insert(struct request_queue *q, struct request *rq) -+{ -+ struct bfq_queue *bfqq = RQ_BFQQ(rq); -+ -+ /* -+ * For consistency, the next instruction should have been executed -+ * after removing the request from the queue and dispatching it. -+ * We execute instead this instruction before bfq_remove_request() -+ * (and hence introduce a temporary inconsistency), for efficiency. -+ * In fact, in a forced_dispatch, this prevents two counters related -+ * to bfqq->dispatched to risk to be uselessly decremented if bfqq -+ * is not in service, and then to be incremented again after -+ * incrementing bfqq->dispatched. -+ */ -+ bfqq->dispatched++; -+ bfq_update_peak_rate(q->elevator->elevator_data, rq); -+ -+ bfq_remove_request(rq); -+ elv_dispatch_sort(q, rq); -+} -+ -+static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq) -+{ -+ BUG_ON(bfqq != bfqd->in_service_queue); -+ -+ /* -+ * If this bfqq is shared between multiple processes, check -+ * to make sure that those processes are still issuing I/Os -+ * within the mean seek distance. If not, it may be time to -+ * break the queues apart again. -+ */ -+ if (bfq_bfqq_coop(bfqq) && BFQQ_SEEKY(bfqq)) -+ bfq_mark_bfqq_split_coop(bfqq); -+ -+ if (RB_EMPTY_ROOT(&bfqq->sort_list)) { -+ if (bfqq->dispatched == 0) -+ /* -+ * Overloading budget_timeout field to store -+ * the time at which the queue remains with no -+ * backlog and no outstanding request; used by -+ * the weight-raising mechanism. -+ */ -+ bfqq->budget_timeout = jiffies; -+ -+ bfq_del_bfqq_busy(bfqd, bfqq, true); -+ } else { -+ bfq_requeue_bfqq(bfqd, bfqq); -+ /* -+ * Resort priority tree of potential close cooperators. -+ */ -+ bfq_pos_tree_add_move(bfqd, bfqq); -+ } -+ -+ /* -+ * All in-service entities must have been properly deactivated -+ * or requeued before executing the next function, which -+ * resets all in-service entites as no more in service. -+ */ -+ __bfq_bfqd_reset_in_service(bfqd); -+} -+ -+/** -+ * __bfq_bfqq_recalc_budget - try to adapt the budget to the @bfqq behavior. -+ * @bfqd: device data. -+ * @bfqq: queue to update. -+ * @reason: reason for expiration. -+ * -+ * Handle the feedback on @bfqq budget at queue expiration. -+ * See the body for detailed comments. -+ */ -+static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq, -+ enum bfqq_expiration reason) -+{ -+ struct request *next_rq; -+ int budget, min_budget; -+ -+ BUG_ON(bfqq != bfqd->in_service_queue); -+ -+ min_budget = bfq_min_budget(bfqd); -+ -+ if (bfqq->wr_coeff == 1) -+ budget = bfqq->max_budget; -+ else /* -+ * Use a constant, low budget for weight-raised queues, -+ * to help achieve a low latency. Keep it slightly higher -+ * than the minimum possible budget, to cause a little -+ * bit fewer expirations. -+ */ -+ budget = 2 * min_budget; -+ -+ bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %d, budg left %d", -+ bfqq->entity.budget, bfq_bfqq_budget_left(bfqq)); -+ bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %d, min budg %d", -+ budget, bfq_min_budget(bfqd)); -+ bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d", -+ bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue)); -+ -+ if (bfq_bfqq_sync(bfqq) && bfqq->wr_coeff == 1) { -+ switch (reason) { -+ /* -+ * Caveat: in all the following cases we trade latency -+ * for throughput. -+ */ -+ case BFQ_BFQQ_TOO_IDLE: -+ /* -+ * This is the only case where we may reduce -+ * the budget: if there is no request of the -+ * process still waiting for completion, then -+ * we assume (tentatively) that the timer has -+ * expired because the batch of requests of -+ * the process could have been served with a -+ * smaller budget. Hence, betting that -+ * process will behave in the same way when it -+ * becomes backlogged again, we reduce its -+ * next budget. As long as we guess right, -+ * this budget cut reduces the latency -+ * experienced by the process. -+ * -+ * However, if there are still outstanding -+ * requests, then the process may have not yet -+ * issued its next request just because it is -+ * still waiting for the completion of some of -+ * the still outstanding ones. So in this -+ * subcase we do not reduce its budget, on the -+ * contrary we increase it to possibly boost -+ * the throughput, as discussed in the -+ * comments to the BUDGET_TIMEOUT case. -+ */ -+ if (bfqq->dispatched > 0) /* still outstanding reqs */ -+ budget = min(budget * 2, bfqd->bfq_max_budget); -+ else { -+ if (budget > 5 * min_budget) -+ budget -= 4 * min_budget; -+ else -+ budget = min_budget; -+ } -+ break; -+ case BFQ_BFQQ_BUDGET_TIMEOUT: -+ /* -+ * We double the budget here because it gives -+ * the chance to boost the throughput if this -+ * is not a seeky process (and has bumped into -+ * this timeout because of, e.g., ZBR). -+ */ -+ budget = min(budget * 2, bfqd->bfq_max_budget); -+ break; -+ case BFQ_BFQQ_BUDGET_EXHAUSTED: -+ /* -+ * The process still has backlog, and did not -+ * let either the budget timeout or the disk -+ * idling timeout expire. Hence it is not -+ * seeky, has a short thinktime and may be -+ * happy with a higher budget too. So -+ * definitely increase the budget of this good -+ * candidate to boost the disk throughput. -+ */ -+ budget = min(budget * 4, bfqd->bfq_max_budget); -+ break; -+ case BFQ_BFQQ_NO_MORE_REQUESTS: -+ /* -+ * For queues that expire for this reason, it -+ * is particularly important to keep the -+ * budget close to the actual service they -+ * need. Doing so reduces the timestamp -+ * misalignment problem described in the -+ * comments in the body of -+ * __bfq_activate_entity. In fact, suppose -+ * that a queue systematically expires for -+ * BFQ_BFQQ_NO_MORE_REQUESTS and presents a -+ * new request in time to enjoy timestamp -+ * back-shifting. The larger the budget of the -+ * queue is with respect to the service the -+ * queue actually requests in each service -+ * slot, the more times the queue can be -+ * reactivated with the same virtual finish -+ * time. It follows that, even if this finish -+ * time is pushed to the system virtual time -+ * to reduce the consequent timestamp -+ * misalignment, the queue unjustly enjoys for -+ * many re-activations a lower finish time -+ * than all newly activated queues. -+ * -+ * The service needed by bfqq is measured -+ * quite precisely by bfqq->entity.service. -+ * Since bfqq does not enjoy device idling, -+ * bfqq->entity.service is equal to the number -+ * of sectors that the process associated with -+ * bfqq requested to read/write before waiting -+ * for request completions, or blocking for -+ * other reasons. -+ */ -+ budget = max_t(int, bfqq->entity.service, min_budget); -+ break; -+ default: -+ return; -+ } -+ } else if (!bfq_bfqq_sync(bfqq)) -+ /* -+ * Async queues get always the maximum possible -+ * budget, as for them we do not care about latency -+ * (in addition, their ability to dispatch is limited -+ * by the charging factor). -+ */ -+ budget = bfqd->bfq_max_budget; -+ -+ bfqq->max_budget = budget; -+ -+ if (bfqd->budgets_assigned >= bfq_stats_min_budgets && -+ !bfqd->bfq_user_max_budget) -+ bfqq->max_budget = min(bfqq->max_budget, bfqd->bfq_max_budget); -+ -+ /* -+ * If there is still backlog, then assign a new budget, making -+ * sure that it is large enough for the next request. Since -+ * the finish time of bfqq must be kept in sync with the -+ * budget, be sure to call __bfq_bfqq_expire() *after* this -+ * update. -+ * -+ * If there is no backlog, then no need to update the budget; -+ * it will be updated on the arrival of a new request. -+ */ -+ next_rq = bfqq->next_rq; -+ if (next_rq) { -+ BUG_ON(reason == BFQ_BFQQ_TOO_IDLE || -+ reason == BFQ_BFQQ_NO_MORE_REQUESTS); -+ bfqq->entity.budget = max_t(unsigned long, bfqq->max_budget, -+ bfq_serv_to_charge(next_rq, bfqq)); -+ BUG_ON(!bfq_bfqq_busy(bfqq)); -+ BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list)); -+ } -+ -+ bfq_log_bfqq(bfqd, bfqq, "head sect: %u, new budget %d", -+ next_rq ? blk_rq_sectors(next_rq) : 0, -+ bfqq->entity.budget); -+} -+ -+/* -+ * Return true if the process associated with bfqq is "slow". The slow -+ * flag is used, in addition to the budget timeout, to reduce the -+ * amount of service provided to seeky processes, and thus reduce -+ * their chances to lower the throughput. More details in the comments -+ * on the function bfq_bfqq_expire(). -+ * -+ * An important observation is in order: as discussed in the comments -+ * on the function bfq_update_peak_rate(), with devices with internal -+ * queues, it is hard if ever possible to know when and for how long -+ * an I/O request is processed by the device (apart from the trivial -+ * I/O pattern where a new request is dispatched only after the -+ * previous one has been completed). This makes it hard to evaluate -+ * the real rate at which the I/O requests of each bfq_queue are -+ * served. In fact, for an I/O scheduler like BFQ, serving a -+ * bfq_queue means just dispatching its requests during its service -+ * slot (i.e., until the budget of the queue is exhausted, or the -+ * queue remains idle, or, finally, a timeout fires). But, during the -+ * service slot of a bfq_queue, around 100 ms at most, the device may -+ * be even still processing requests of bfq_queues served in previous -+ * service slots. On the opposite end, the requests of the in-service -+ * bfq_queue may be completed after the service slot of the queue -+ * finishes. -+ * -+ * Anyway, unless more sophisticated solutions are used -+ * (where possible), the sum of the sizes of the requests dispatched -+ * during the service slot of a bfq_queue is probably the only -+ * approximation available for the service received by the bfq_queue -+ * during its service slot. And this sum is the quantity used in this -+ * function to evaluate the I/O speed of a process. -+ */ -+static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq, -+ bool compensate, enum bfqq_expiration reason, -+ unsigned long *delta_ms) -+{ -+ ktime_t delta_ktime; -+ u32 delta_usecs; -+ bool slow = BFQQ_SEEKY(bfqq); /* if delta too short, use seekyness */ -+ -+ if (!bfq_bfqq_sync(bfqq)) -+ return false; -+ -+ if (compensate) -+ delta_ktime = bfqd->last_idling_start; -+ else -+ delta_ktime = ktime_get(); -+ delta_ktime = ktime_sub(delta_ktime, bfqd->last_budget_start); -+ delta_usecs = ktime_to_us(delta_ktime); -+ -+ /* don't use too short time intervals */ -+ if (delta_usecs < 1000) { -+ if (blk_queue_nonrot(bfqd->queue)) -+ /* -+ * give same worst-case guarantees as idling -+ * for seeky -+ */ -+ *delta_ms = BFQ_MIN_TT / NSEC_PER_MSEC; -+ else /* charge at least one seek */ -+ *delta_ms = bfq_slice_idle / NSEC_PER_MSEC; -+ -+ bfq_log(bfqd, "bfq_bfqq_is_slow: too short %u", delta_usecs); -+ -+ return slow; -+ } -+ -+ *delta_ms = delta_usecs / USEC_PER_MSEC; -+ -+ /* -+ * Use only long (> 20ms) intervals to filter out excessive -+ * spikes in service rate estimation. -+ */ -+ if (delta_usecs > 20000) { -+ /* -+ * Caveat for rotational devices: processes doing I/O -+ * in the slower disk zones tend to be slow(er) even -+ * if not seeky. In this respect, the estimated peak -+ * rate is likely to be an average over the disk -+ * surface. Accordingly, to not be too harsh with -+ * unlucky processes, a process is deemed slow only if -+ * its rate has been lower than half of the estimated -+ * peak rate. -+ */ -+ slow = bfqq->entity.service < bfqd->bfq_max_budget / 2; -+ bfq_log(bfqd, "bfq_bfqq_is_slow: relative rate %d/%d", -+ bfqq->entity.service, bfqd->bfq_max_budget); -+ } -+ -+ bfq_log_bfqq(bfqd, bfqq, "bfq_bfqq_is_slow: slow %d", slow); -+ -+ return slow; -+} -+ -+/* -+ * To be deemed as soft real-time, an application must meet two -+ * requirements. First, the application must not require an average -+ * bandwidth higher than the approximate bandwidth required to playback or -+ * record a compressed high-definition video. -+ * The next function is invoked on the completion of the last request of a -+ * batch, to compute the next-start time instant, soft_rt_next_start, such -+ * that, if the next request of the application does not arrive before -+ * soft_rt_next_start, then the above requirement on the bandwidth is met. -+ * -+ * The second requirement is that the request pattern of the application is -+ * isochronous, i.e., that, after issuing a request or a batch of requests, -+ * the application stops issuing new requests until all its pending requests -+ * have been completed. After that, the application may issue a new batch, -+ * and so on. -+ * For this reason the next function is invoked to compute -+ * soft_rt_next_start only for applications that meet this requirement, -+ * whereas soft_rt_next_start is set to infinity for applications that do -+ * not. -+ * -+ * Unfortunately, even a greedy application may happen to behave in an -+ * isochronous way if the CPU load is high. In fact, the application may -+ * stop issuing requests while the CPUs are busy serving other processes, -+ * then restart, then stop again for a while, and so on. In addition, if -+ * the disk achieves a low enough throughput with the request pattern -+ * issued by the application (e.g., because the request pattern is random -+ * and/or the device is slow), then the application may meet the above -+ * bandwidth requirement too. To prevent such a greedy application to be -+ * deemed as soft real-time, a further rule is used in the computation of -+ * soft_rt_next_start: soft_rt_next_start must be higher than the current -+ * time plus the maximum time for which the arrival of a request is waited -+ * for when a sync queue becomes idle, namely bfqd->bfq_slice_idle. -+ * This filters out greedy applications, as the latter issue instead their -+ * next request as soon as possible after the last one has been completed -+ * (in contrast, when a batch of requests is completed, a soft real-time -+ * application spends some time processing data). -+ * -+ * Unfortunately, the last filter may easily generate false positives if -+ * only bfqd->bfq_slice_idle is used as a reference time interval and one -+ * or both the following cases occur: -+ * 1) HZ is so low that the duration of a jiffy is comparable to or higher -+ * than bfqd->bfq_slice_idle. This happens, e.g., on slow devices with -+ * HZ=100. -+ * 2) jiffies, instead of increasing at a constant rate, may stop increasing -+ * for a while, then suddenly 'jump' by several units to recover the lost -+ * increments. This seems to happen, e.g., inside virtual machines. -+ * To address this issue, we do not use as a reference time interval just -+ * bfqd->bfq_slice_idle, but bfqd->bfq_slice_idle plus a few jiffies. In -+ * particular we add the minimum number of jiffies for which the filter -+ * seems to be quite precise also in embedded systems and KVM/QEMU virtual -+ * machines. -+ */ -+static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq) -+{ -+ bfq_log_bfqq(bfqd, bfqq, -+"softrt_next_start: service_blkg %lu soft_rate %u sects/sec interval %u", -+ bfqq->service_from_backlogged, -+ bfqd->bfq_wr_max_softrt_rate, -+ jiffies_to_msecs(HZ * bfqq->service_from_backlogged / -+ bfqd->bfq_wr_max_softrt_rate)); -+ -+ return max(bfqq->last_idle_bklogged + -+ HZ * bfqq->service_from_backlogged / -+ bfqd->bfq_wr_max_softrt_rate, -+ jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4); -+} -+ -+/* -+ * Return the farthest future time instant according to jiffies -+ * macros. -+ */ -+static unsigned long bfq_greatest_from_now(void) -+{ -+ return jiffies + MAX_JIFFY_OFFSET; -+} -+ -+/* -+ * Return the farthest past time instant according to jiffies -+ * macros. -+ */ -+static unsigned long bfq_smallest_from_now(void) -+{ -+ return jiffies - MAX_JIFFY_OFFSET; -+} -+ -+/** -+ * bfq_bfqq_expire - expire a queue. -+ * @bfqd: device owning the queue. -+ * @bfqq: the queue to expire. -+ * @compensate: if true, compensate for the time spent idling. -+ * @reason: the reason causing the expiration. -+ * -+ * If the process associated with bfqq does slow I/O (e.g., because it -+ * issues random requests), we charge bfqq with the time it has been -+ * in service instead of the service it has received (see -+ * bfq_bfqq_charge_time for details on how this goal is achieved). As -+ * a consequence, bfqq will typically get higher timestamps upon -+ * reactivation, and hence it will be rescheduled as if it had -+ * received more service than what it has actually received. In the -+ * end, bfqq receives less service in proportion to how slowly its -+ * associated process consumes its budgets (and hence how seriously it -+ * tends to lower the throughput). In addition, this time-charging -+ * strategy guarantees time fairness among slow processes. In -+ * contrast, if the process associated with bfqq is not slow, we -+ * charge bfqq exactly with the service it has received. -+ * -+ * Charging time to the first type of queues and the exact service to -+ * the other has the effect of using the WF2Q+ policy to schedule the -+ * former on a timeslice basis, without violating service domain -+ * guarantees among the latter. -+ */ -+static void bfq_bfqq_expire(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq, -+ bool compensate, -+ enum bfqq_expiration reason) -+{ -+ bool slow; -+ unsigned long delta = 0; -+ struct bfq_entity *entity = &bfqq->entity; -+ int ref; -+ -+ BUG_ON(bfqq != bfqd->in_service_queue); -+ -+ /* -+ * Check whether the process is slow (see bfq_bfqq_is_slow). -+ */ -+ slow = bfq_bfqq_is_slow(bfqd, bfqq, compensate, reason, &delta); -+ -+ /* -+ * Increase service_from_backlogged before next statement, -+ * because the possible next invocation of -+ * bfq_bfqq_charge_time would likely inflate -+ * entity->service. In contrast, service_from_backlogged must -+ * contain real service, to enable the soft real-time -+ * heuristic to correctly compute the bandwidth consumed by -+ * bfqq. -+ */ -+ bfqq->service_from_backlogged += entity->service; -+ -+ /* -+ * As above explained, charge slow (typically seeky) and -+ * timed-out queues with the time and not the service -+ * received, to favor sequential workloads. -+ * -+ * Processes doing I/O in the slower disk zones will tend to -+ * be slow(er) even if not seeky. Therefore, since the -+ * estimated peak rate is actually an average over the disk -+ * surface, these processes may timeout just for bad luck. To -+ * avoid punishing them, do not charge time to processes that -+ * succeeded in consuming at least 2/3 of their budget. This -+ * allows BFQ to preserve enough elasticity to still perform -+ * bandwidth, and not time, distribution with little unlucky -+ * or quasi-sequential processes. -+ */ -+ if (bfqq->wr_coeff == 1 && -+ (slow || -+ (reason == BFQ_BFQQ_BUDGET_TIMEOUT && -+ bfq_bfqq_budget_left(bfqq) >= entity->budget / 3))) -+ bfq_bfqq_charge_time(bfqd, bfqq, delta); -+ -+ BUG_ON(bfqq->entity.budget < bfqq->entity.service); -+ -+ if (reason == BFQ_BFQQ_TOO_IDLE && -+ entity->service <= 2 * entity->budget / 10) -+ bfq_clear_bfqq_IO_bound(bfqq); -+ -+ if (bfqd->low_latency && bfqq->wr_coeff == 1) -+ bfqq->last_wr_start_finish = jiffies; -+ -+ if (bfqd->low_latency && bfqd->bfq_wr_max_softrt_rate > 0 && -+ RB_EMPTY_ROOT(&bfqq->sort_list)) { -+ /* -+ * If we get here, and there are no outstanding -+ * requests, then the request pattern is isochronous -+ * (see the comments on the function -+ * bfq_bfqq_softrt_next_start()). Thus we can compute -+ * soft_rt_next_start. If, instead, the queue still -+ * has outstanding requests, then we have to wait for -+ * the completion of all the outstanding requests to -+ * discover whether the request pattern is actually -+ * isochronous. -+ */ -+ BUG_ON(bfqd->busy_queues < 1); -+ if (bfqq->dispatched == 0) { -+ bfqq->soft_rt_next_start = -+ bfq_bfqq_softrt_next_start(bfqd, bfqq); -+ bfq_log_bfqq(bfqd, bfqq, "new soft_rt_next %lu", -+ bfqq->soft_rt_next_start); -+ } else { -+ /* -+ * The application is still waiting for the -+ * completion of one or more requests: -+ * prevent it from possibly being incorrectly -+ * deemed as soft real-time by setting its -+ * soft_rt_next_start to infinity. In fact, -+ * without this assignment, the application -+ * would be incorrectly deemed as soft -+ * real-time if: -+ * 1) it issued a new request before the -+ * completion of all its in-flight -+ * requests, and -+ * 2) at that time, its soft_rt_next_start -+ * happened to be in the past. -+ */ -+ bfqq->soft_rt_next_start = -+ bfq_greatest_from_now(); -+ /* -+ * Schedule an update of soft_rt_next_start to when -+ * the task may be discovered to be isochronous. -+ */ -+ bfq_mark_bfqq_softrt_update(bfqq); -+ } -+ } -+ -+ bfq_log_bfqq(bfqd, bfqq, -+ "expire (%d, slow %d, num_disp %d, short_ttime %d, weight %d)", -+ reason, slow, bfqq->dispatched, -+ bfq_bfqq_has_short_ttime(bfqq), entity->weight); -+ -+ /* -+ * Increase, decrease or leave budget unchanged according to -+ * reason. -+ */ -+ BUG_ON(bfqq->entity.budget < bfqq->entity.service); -+ __bfq_bfqq_recalc_budget(bfqd, bfqq, reason); -+ BUG_ON(bfqq->next_rq == NULL && -+ bfqq->entity.budget < bfqq->entity.service); -+ ref = bfqq->ref; -+ __bfq_bfqq_expire(bfqd, bfqq); -+ -+ BUG_ON(ref > 1 && -+ !bfq_bfqq_busy(bfqq) && reason == BFQ_BFQQ_BUDGET_EXHAUSTED && -+ !bfq_class_idle(bfqq)); -+ -+ /* mark bfqq as waiting a request only if a bic still points to it */ -+ if (ref > 1 && !bfq_bfqq_busy(bfqq) && -+ reason != BFQ_BFQQ_BUDGET_TIMEOUT && -+ reason != BFQ_BFQQ_BUDGET_EXHAUSTED) -+ bfq_mark_bfqq_non_blocking_wait_rq(bfqq); -+} -+ -+/* -+ * Budget timeout is not implemented through a dedicated timer, but -+ * just checked on request arrivals and completions, as well as on -+ * idle timer expirations. -+ */ -+static bool bfq_bfqq_budget_timeout(struct bfq_queue *bfqq) -+{ -+ return time_is_before_eq_jiffies(bfqq->budget_timeout); -+} -+ -+/* -+ * If we expire a queue that is actively waiting (i.e., with the -+ * device idled) for the arrival of a new request, then we may incur -+ * the timestamp misalignment problem described in the body of the -+ * function __bfq_activate_entity. Hence we return true only if this -+ * condition does not hold, or if the queue is slow enough to deserve -+ * only to be kicked off for preserving a high throughput. -+ */ -+static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq) -+{ -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "may_budget_timeout: wait_request %d left %d timeout %d", -+ bfq_bfqq_wait_request(bfqq), -+ bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3, -+ bfq_bfqq_budget_timeout(bfqq)); -+ -+ return (!bfq_bfqq_wait_request(bfqq) || -+ bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3) -+ && -+ bfq_bfqq_budget_timeout(bfqq); -+} -+ -+/* -+ * For a queue that becomes empty, device idling is allowed only if -+ * this function returns true for that queue. As a consequence, since -+ * device idling plays a critical role for both throughput boosting -+ * and service guarantees, the return value of this function plays a -+ * critical role as well. -+ * -+ * In a nutshell, this function returns true only if idling is -+ * beneficial for throughput or, even if detrimental for throughput, -+ * idling is however necessary to preserve service guarantees (low -+ * latency, desired throughput distribution, ...). In particular, on -+ * NCQ-capable devices, this function tries to return false, so as to -+ * help keep the drives' internal queues full, whenever this helps the -+ * device boost the throughput without causing any service-guarantee -+ * issue. -+ * -+ * In more detail, the return value of this function is obtained by, -+ * first, computing a number of boolean variables that take into -+ * account throughput and service-guarantee issues, and, then, -+ * combining these variables in a logical expression. Most of the -+ * issues taken into account are not trivial. We discuss these issues -+ * while introducing the variables. -+ */ -+static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq) -+{ -+ struct bfq_data *bfqd = bfqq->bfqd; -+ bool rot_without_queueing = -+ !blk_queue_nonrot(bfqd->queue) && !bfqd->hw_tag, -+ bfqq_sequential_and_IO_bound, -+ idling_boosts_thr, idling_boosts_thr_without_issues, -+ idling_needed_for_service_guarantees, -+ asymmetric_scenario; -+ -+ if (bfqd->strict_guarantees) -+ return true; -+ -+ /* -+ * Idling is performed only if slice_idle > 0. In addition, we -+ * do not idle if -+ * (a) bfqq is async -+ * (b) bfqq is in the idle io prio class: in this case we do -+ * not idle because we want to minimize the bandwidth that -+ * queues in this class can steal to higher-priority queues -+ */ -+ if (bfqd->bfq_slice_idle == 0 || !bfq_bfqq_sync(bfqq) || -+ bfq_class_idle(bfqq)) -+ return false; -+ -+ bfqq_sequential_and_IO_bound = !BFQQ_SEEKY(bfqq) && -+ bfq_bfqq_IO_bound(bfqq) && bfq_bfqq_has_short_ttime(bfqq); -+ /* -+ * The next variable takes into account the cases where idling -+ * boosts the throughput. -+ * -+ * The value of the variable is computed considering, first, that -+ * idling is virtually always beneficial for the throughput if: -+ * (a) the device is not NCQ-capable and rotational, or -+ * (b) regardless of the presence of NCQ, the device is rotational and -+ * the request pattern for bfqq is I/O-bound and sequential, or -+ * (c) regardless of whether it is rotational, the device is -+ * not NCQ-capable and the request pattern for bfqq is -+ * I/O-bound and sequential. -+ * -+ * Secondly, and in contrast to the above item (b), idling an -+ * NCQ-capable flash-based device would not boost the -+ * throughput even with sequential I/O; rather it would lower -+ * the throughput in proportion to how fast the device -+ * is. Accordingly, the next variable is true if any of the -+ * above conditions (a), (b) or (c) is true, and, in -+ * particular, happens to be false if bfqd is an NCQ-capable -+ * flash-based device. -+ */ -+ idling_boosts_thr = rot_without_queueing || -+ ((!blk_queue_nonrot(bfqd->queue) || !bfqd->hw_tag) && -+ bfqq_sequential_and_IO_bound); -+ -+ /* -+ * The value of the next variable, -+ * idling_boosts_thr_without_issues, is equal to that of -+ * idling_boosts_thr, unless a special case holds. In this -+ * special case, described below, idling may cause problems to -+ * weight-raised queues. -+ * -+ * When the request pool is saturated (e.g., in the presence -+ * of write hogs), if the processes associated with -+ * non-weight-raised queues ask for requests at a lower rate, -+ * then processes associated with weight-raised queues have a -+ * higher probability to get a request from the pool -+ * immediately (or at least soon) when they need one. Thus -+ * they have a higher probability to actually get a fraction -+ * of the device throughput proportional to their high -+ * weight. This is especially true with NCQ-capable drives, -+ * which enqueue several requests in advance, and further -+ * reorder internally-queued requests. -+ * -+ * For this reason, we force to false the value of -+ * idling_boosts_thr_without_issues if there are weight-raised -+ * busy queues. In this case, and if bfqq is not weight-raised, -+ * this guarantees that the device is not idled for bfqq (if, -+ * instead, bfqq is weight-raised, then idling will be -+ * guaranteed by another variable, see below). Combined with -+ * the timestamping rules of BFQ (see [1] for details), this -+ * behavior causes bfqq, and hence any sync non-weight-raised -+ * queue, to get a lower number of requests served, and thus -+ * to ask for a lower number of requests from the request -+ * pool, before the busy weight-raised queues get served -+ * again. This often mitigates starvation problems in the -+ * presence of heavy write workloads and NCQ, thereby -+ * guaranteeing a higher application and system responsiveness -+ * in these hostile scenarios. -+ */ -+ idling_boosts_thr_without_issues = idling_boosts_thr && -+ bfqd->wr_busy_queues == 0; -+ -+ /* -+ * There is then a case where idling must be performed not -+ * for throughput concerns, but to preserve service -+ * guarantees. -+ * -+ * To introduce this case, we can note that allowing the drive -+ * to enqueue more than one request at a time, and hence -+ * delegating de facto final scheduling decisions to the -+ * drive's internal scheduler, entails loss of control on the -+ * actual request service order. In particular, the critical -+ * situation is when requests from different processes happen -+ * to be present, at the same time, in the internal queue(s) -+ * of the drive. In such a situation, the drive, by deciding -+ * the service order of the internally-queued requests, does -+ * determine also the actual throughput distribution among -+ * these processes. But the drive typically has no notion or -+ * concern about per-process throughput distribution, and -+ * makes its decisions only on a per-request basis. Therefore, -+ * the service distribution enforced by the drive's internal -+ * scheduler is likely to coincide with the desired -+ * device-throughput distribution only in a completely -+ * symmetric scenario where: -+ * (i) each of these processes must get the same throughput as -+ * the others; -+ * (ii) all these processes have the same I/O pattern -+ * (either sequential or random). -+ * In fact, in such a scenario, the drive will tend to treat -+ * the requests of each of these processes in about the same -+ * way as the requests of the others, and thus to provide -+ * each of these processes with about the same throughput -+ * (which is exactly the desired throughput distribution). In -+ * contrast, in any asymmetric scenario, device idling is -+ * certainly needed to guarantee that bfqq receives its -+ * assigned fraction of the device throughput (see [1] for -+ * details). -+ * -+ * We address this issue by controlling, actually, only the -+ * symmetry sub-condition (i), i.e., provided that -+ * sub-condition (i) holds, idling is not performed, -+ * regardless of whether sub-condition (ii) holds. In other -+ * words, only if sub-condition (i) holds, then idling is -+ * allowed, and the device tends to be prevented from queueing -+ * many requests, possibly of several processes. The reason -+ * for not controlling also sub-condition (ii) is that we -+ * exploit preemption to preserve guarantees in case of -+ * symmetric scenarios, even if (ii) does not hold, as -+ * explained in the next two paragraphs. -+ * -+ * Even if a queue, say Q, is expired when it remains idle, Q -+ * can still preempt the new in-service queue if the next -+ * request of Q arrives soon (see the comments on -+ * bfq_bfqq_update_budg_for_activation). If all queues and -+ * groups have the same weight, this form of preemption, -+ * combined with the hole-recovery heuristic described in the -+ * comments on function bfq_bfqq_update_budg_for_activation, -+ * are enough to preserve a correct bandwidth distribution in -+ * the mid term, even without idling. In fact, even if not -+ * idling allows the internal queues of the device to contain -+ * many requests, and thus to reorder requests, we can rather -+ * safely assume that the internal scheduler still preserves a -+ * minimum of mid-term fairness. The motivation for using -+ * preemption instead of idling is that, by not idling, -+ * service guarantees are preserved without minimally -+ * sacrificing throughput. In other words, both a high -+ * throughput and its desired distribution are obtained. -+ * -+ * More precisely, this preemption-based, idleless approach -+ * provides fairness in terms of IOPS, and not sectors per -+ * second. This can be seen with a simple example. Suppose -+ * that there are two queues with the same weight, but that -+ * the first queue receives requests of 8 sectors, while the -+ * second queue receives requests of 1024 sectors. In -+ * addition, suppose that each of the two queues contains at -+ * most one request at a time, which implies that each queue -+ * always remains idle after it is served. Finally, after -+ * remaining idle, each queue receives very quickly a new -+ * request. It follows that the two queues are served -+ * alternatively, preempting each other if needed. This -+ * implies that, although both queues have the same weight, -+ * the queue with large requests receives a service that is -+ * 1024/8 times as high as the service received by the other -+ * queue. -+ * -+ * On the other hand, device idling is performed, and thus -+ * pure sector-domain guarantees are provided, for the -+ * following queues, which are likely to need stronger -+ * throughput guarantees: weight-raised queues, and queues -+ * with a higher weight than other queues. When such queues -+ * are active, sub-condition (i) is false, which triggers -+ * device idling. -+ * -+ * According to the above considerations, the next variable is -+ * true (only) if sub-condition (i) holds. To compute the -+ * value of this variable, we not only use the return value of -+ * the function bfq_symmetric_scenario(), but also check -+ * whether bfqq is being weight-raised, because -+ * bfq_symmetric_scenario() does not take into account also -+ * weight-raised queues (see comments on -+ * bfq_weights_tree_add()). -+ * -+ * As a side note, it is worth considering that the above -+ * device-idling countermeasures may however fail in the -+ * following unlucky scenario: if idling is (correctly) -+ * disabled in a time period during which all symmetry -+ * sub-conditions hold, and hence the device is allowed to -+ * enqueue many requests, but at some later point in time some -+ * sub-condition stops to hold, then it may become impossible -+ * to let requests be served in the desired order until all -+ * the requests already queued in the device have been served. -+ */ -+ asymmetric_scenario = bfqq->wr_coeff > 1 || -+ !bfq_symmetric_scenario(bfqd); -+ -+ /* -+ * Finally, there is a case where maximizing throughput is the -+ * best choice even if it may cause unfairness toward -+ * bfqq. Such a case is when bfqq became active in a burst of -+ * queue activations. Queues that became active during a large -+ * burst benefit only from throughput, as discussed in the -+ * comments on bfq_handle_burst. Thus, if bfqq became active -+ * in a burst and not idling the device maximizes throughput, -+ * then the device must no be idled, because not idling the -+ * device provides bfqq and all other queues in the burst with -+ * maximum benefit. Combining this and the above case, we can -+ * now establish when idling is actually needed to preserve -+ * service guarantees. -+ */ -+ idling_needed_for_service_guarantees = -+ asymmetric_scenario && !bfq_bfqq_in_large_burst(bfqq); -+ -+ /* -+ * We have now all the components we need to compute the -+ * return value of the function, which is true only if idling -+ * either boosts the throughput (without issues), or is -+ * necessary to preserve service guarantees. -+ */ -+ bfq_log_bfqq(bfqd, bfqq, "may_idle: sync %d idling_boosts_thr %d", -+ bfq_bfqq_sync(bfqq), idling_boosts_thr); -+ -+ bfq_log_bfqq(bfqd, bfqq, -+ "may_idle: wr_busy %d boosts %d IO-bound %d guar %d", -+ bfqd->wr_busy_queues, -+ idling_boosts_thr_without_issues, -+ bfq_bfqq_IO_bound(bfqq), -+ idling_needed_for_service_guarantees); -+ -+ return idling_boosts_thr_without_issues || -+ idling_needed_for_service_guarantees; -+} -+ -+/* -+ * If the in-service queue is empty but the function bfq_bfqq_may_idle -+ * returns true, then: -+ * 1) the queue must remain in service and cannot be expired, and -+ * 2) the device must be idled to wait for the possible arrival of a new -+ * request for the queue. -+ * See the comments on the function bfq_bfqq_may_idle for the reasons -+ * why performing device idling is the best choice to boost the throughput -+ * and preserve service guarantees when bfq_bfqq_may_idle itself -+ * returns true. -+ */ -+static bool bfq_bfqq_must_idle(struct bfq_queue *bfqq) -+{ -+ return RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_bfqq_may_idle(bfqq); -+} -+ -+/* -+ * Select a queue for service. If we have a current queue in service, -+ * check whether to continue servicing it, or retrieve and set a new one. -+ */ -+static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd) -+{ -+ struct bfq_queue *bfqq; -+ struct request *next_rq; -+ enum bfqq_expiration reason = BFQ_BFQQ_BUDGET_TIMEOUT; -+ -+ bfqq = bfqd->in_service_queue; -+ if (!bfqq) -+ goto new_queue; -+ -+ bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue"); -+ -+ if (bfq_may_expire_for_budg_timeout(bfqq) && -+ !hrtimer_active(&bfqd->idle_slice_timer) && -+ !bfq_bfqq_must_idle(bfqq)) -+ goto expire; -+ -+check_queue: -+ /* -+ * This loop is rarely executed more than once. Even when it -+ * happens, it is much more convenient to re-execute this loop -+ * than to return NULL and trigger a new dispatch to get a -+ * request served. -+ */ -+ next_rq = bfqq->next_rq; -+ /* -+ * If bfqq has requests queued and it has enough budget left to -+ * serve them, keep the queue, otherwise expire it. -+ */ -+ if (next_rq) { -+ BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list)); -+ -+ if (bfq_serv_to_charge(next_rq, bfqq) > -+ bfq_bfqq_budget_left(bfqq)) { -+ /* -+ * Expire the queue for budget exhaustion, -+ * which makes sure that the next budget is -+ * enough to serve the next request, even if -+ * it comes from the fifo expired path. -+ */ -+ reason = BFQ_BFQQ_BUDGET_EXHAUSTED; -+ goto expire; -+ } else { -+ /* -+ * The idle timer may be pending because we may -+ * not disable disk idling even when a new request -+ * arrives. -+ */ -+ if (bfq_bfqq_wait_request(bfqq)) { -+ BUG_ON(!hrtimer_active(&bfqd->idle_slice_timer)); -+ /* -+ * If we get here: 1) at least a new request -+ * has arrived but we have not disabled the -+ * timer because the request was too small, -+ * 2) then the block layer has unplugged -+ * the device, causing the dispatch to be -+ * invoked. -+ * -+ * Since the device is unplugged, now the -+ * requests are probably large enough to -+ * provide a reasonable throughput. -+ * So we disable idling. -+ */ -+ bfq_clear_bfqq_wait_request(bfqq); -+ hrtimer_try_to_cancel(&bfqd->idle_slice_timer); -+ bfqg_stats_update_idle_time(bfqq_group(bfqq)); -+ } -+ goto keep_queue; -+ } -+ } -+ -+ /* -+ * No requests pending. However, if the in-service queue is idling -+ * for a new request, or has requests waiting for a completion and -+ * may idle after their completion, then keep it anyway. -+ */ -+ if (hrtimer_active(&bfqd->idle_slice_timer) || -+ (bfqq->dispatched != 0 && bfq_bfqq_may_idle(bfqq))) { -+ bfqq = NULL; -+ goto keep_queue; -+ } -+ -+ reason = BFQ_BFQQ_NO_MORE_REQUESTS; -+expire: -+ bfq_bfqq_expire(bfqd, bfqq, false, reason); -+new_queue: -+ bfqq = bfq_set_in_service_queue(bfqd); -+ if (bfqq) { -+ bfq_log_bfqq(bfqd, bfqq, "select_queue: checking new queue"); -+ goto check_queue; -+ } -+keep_queue: -+ if (bfqq) -+ bfq_log_bfqq(bfqd, bfqq, "select_queue: returned this queue"); -+ else -+ bfq_log(bfqd, "select_queue: no queue returned"); -+ -+ return bfqq; -+} -+ -+static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq) -+{ -+ struct bfq_entity *entity = &bfqq->entity; -+ -+ if (bfqq->wr_coeff > 1) { /* queue is being weight-raised */ -+ BUG_ON(bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time && -+ time_is_after_jiffies(bfqq->last_wr_start_finish)); -+ -+ bfq_log_bfqq(bfqd, bfqq, -+ "raising period dur %u/%u msec, old coeff %u, w %d(%d)", -+ jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish), -+ jiffies_to_msecs(bfqq->wr_cur_max_time), -+ bfqq->wr_coeff, -+ bfqq->entity.weight, bfqq->entity.orig_weight); -+ -+ BUG_ON(bfqq != bfqd->in_service_queue && entity->weight != -+ entity->orig_weight * bfqq->wr_coeff); -+ if (entity->prio_changed) -+ bfq_log_bfqq(bfqd, bfqq, "WARN: pending prio change"); -+ -+ /* -+ * If the queue was activated in a burst, or too much -+ * time has elapsed from the beginning of this -+ * weight-raising period, then end weight raising. -+ */ -+ if (bfq_bfqq_in_large_burst(bfqq)) -+ bfq_bfqq_end_wr(bfqq); -+ else if (time_is_before_jiffies(bfqq->last_wr_start_finish + -+ bfqq->wr_cur_max_time)) { -+ if (bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time || -+ time_is_before_jiffies(bfqq->wr_start_at_switch_to_srt + -+ bfq_wr_duration(bfqd))) -+ bfq_bfqq_end_wr(bfqq); -+ else { -+ /* switch back to interactive wr */ -+ bfqq->wr_coeff = bfqd->bfq_wr_coeff; -+ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); -+ bfqq->last_wr_start_finish = -+ bfqq->wr_start_at_switch_to_srt; -+ BUG_ON(time_is_after_jiffies( -+ bfqq->last_wr_start_finish)); -+ bfqq->entity.prio_changed = 1; -+ bfq_log_bfqq(bfqd, bfqq, -+ "back to interactive wr"); -+ } -+ } -+ } -+ /* -+ * To improve latency (for this or other queues), immediately -+ * update weight both if it must be raised and if it must be -+ * lowered. Since, entity may be on some active tree here, and -+ * might have a pending change of its ioprio class, invoke -+ * next function with the last parameter unset (see the -+ * comments on the function). -+ */ -+ if ((entity->weight > entity->orig_weight) != (bfqq->wr_coeff > 1)) -+ __bfq_entity_update_weight_prio(bfq_entity_service_tree(entity), -+ entity, false); -+} -+ -+/* -+ * Dispatch one request from bfqq, moving it to the request queue -+ * dispatch list. -+ */ -+static int bfq_dispatch_request(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq) -+{ -+ int dispatched = 0; -+ struct request *rq = bfqq->next_rq; -+ unsigned long service_to_charge; -+ -+ BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list)); -+ BUG_ON(!rq); -+ service_to_charge = bfq_serv_to_charge(rq, bfqq); -+ -+ BUG_ON(service_to_charge > bfq_bfqq_budget_left(bfqq)); -+ -+ BUG_ON(bfqq->entity.budget < bfqq->entity.service); -+ -+ bfq_bfqq_served(bfqq, service_to_charge); -+ -+ BUG_ON(bfqq->entity.budget < bfqq->entity.service); -+ -+ bfq_dispatch_insert(bfqd->queue, rq); -+ -+ /* -+ * If weight raising has to terminate for bfqq, then next -+ * function causes an immediate update of bfqq's weight, -+ * without waiting for next activation. As a consequence, on -+ * expiration, bfqq will be timestamped as if has never been -+ * weight-raised during this service slot, even if it has -+ * received part or even most of the service as a -+ * weight-raised queue. This inflates bfqq's timestamps, which -+ * is beneficial, as bfqq is then more willing to leave the -+ * device immediately to possible other weight-raised queues. -+ */ -+ bfq_update_wr_data(bfqd, bfqq); -+ -+ bfq_log_bfqq(bfqd, bfqq, -+ "dispatched %u sec req (%llu), budg left %d", -+ blk_rq_sectors(rq), -+ (unsigned long long) blk_rq_pos(rq), -+ bfq_bfqq_budget_left(bfqq)); -+ -+ dispatched++; -+ -+ if (!bfqd->in_service_bic) { -+ atomic_long_inc(&RQ_BIC(rq)->icq.ioc->refcount); -+ bfqd->in_service_bic = RQ_BIC(rq); -+ } -+ -+ if (bfqd->busy_queues > 1 && bfq_class_idle(bfqq)) -+ goto expire; -+ -+ return dispatched; -+ -+expire: -+ bfq_bfqq_expire(bfqd, bfqq, false, BFQ_BFQQ_BUDGET_EXHAUSTED); -+ return dispatched; -+} -+ -+static int __bfq_forced_dispatch_bfqq(struct bfq_queue *bfqq) -+{ -+ int dispatched = 0; -+ -+ while (bfqq->next_rq) { -+ bfq_dispatch_insert(bfqq->bfqd->queue, bfqq->next_rq); -+ dispatched++; -+ } -+ -+ BUG_ON(!list_empty(&bfqq->fifo)); -+ return dispatched; -+} -+ -+/* -+ * Drain our current requests. -+ * Used for barriers and when switching io schedulers on-the-fly. -+ */ -+static int bfq_forced_dispatch(struct bfq_data *bfqd) -+{ -+ struct bfq_queue *bfqq, *n; -+ struct bfq_service_tree *st; -+ int dispatched = 0; -+ -+ bfqq = bfqd->in_service_queue; -+ if (bfqq) -+ __bfq_bfqq_expire(bfqd, bfqq); -+ -+ /* -+ * Loop through classes, and be careful to leave the scheduler -+ * in a consistent state, as feedback mechanisms and vtime -+ * updates cannot be disabled during the process. -+ */ -+ list_for_each_entry_safe(bfqq, n, &bfqd->active_list, bfqq_list) { -+ st = bfq_entity_service_tree(&bfqq->entity); -+ -+ dispatched += __bfq_forced_dispatch_bfqq(bfqq); -+ -+ bfqq->max_budget = bfq_max_budget(bfqd); -+ bfq_forget_idle(st); -+ } -+ -+ BUG_ON(bfqd->busy_queues != 0); -+ -+ return dispatched; -+} -+ -+static int bfq_dispatch_requests(struct request_queue *q, int force) -+{ -+ struct bfq_data *bfqd = q->elevator->elevator_data; -+ struct bfq_queue *bfqq; -+ -+ bfq_log(bfqd, "dispatch requests: %d busy queues", bfqd->busy_queues); -+ -+ if (bfqd->busy_queues == 0) -+ return 0; -+ -+ if (unlikely(force)) -+ return bfq_forced_dispatch(bfqd); -+ -+ /* -+ * Force device to serve one request at a time if -+ * strict_guarantees is true. Forcing this service scheme is -+ * currently the ONLY way to guarantee that the request -+ * service order enforced by the scheduler is respected by a -+ * queueing device. Otherwise the device is free even to make -+ * some unlucky request wait for as long as the device -+ * wishes. -+ * -+ * Of course, serving one request at at time may cause loss of -+ * throughput. -+ */ -+ if (bfqd->strict_guarantees && bfqd->rq_in_driver > 0) -+ return 0; -+ -+ bfqq = bfq_select_queue(bfqd); -+ if (!bfqq) -+ return 0; -+ -+ BUG_ON(bfqq->entity.budget < bfqq->entity.service); -+ -+ BUG_ON(bfq_bfqq_wait_request(bfqq)); -+ -+ if (!bfq_dispatch_request(bfqd, bfqq)) -+ return 0; -+ -+ bfq_log_bfqq(bfqd, bfqq, "dispatched %s request", -+ bfq_bfqq_sync(bfqq) ? "sync" : "async"); -+ -+ BUG_ON(bfqq->next_rq == NULL && -+ bfqq->entity.budget < bfqq->entity.service); -+ return 1; -+} -+ -+/* -+ * Task holds one reference to the queue, dropped when task exits. Each rq -+ * in-flight on this queue also holds a reference, dropped when rq is freed. -+ * -+ * Queue lock must be held here. Recall not to use bfqq after calling -+ * this function on it. -+ */ -+static void bfq_put_queue(struct bfq_queue *bfqq) -+{ -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+ struct bfq_group *bfqg = bfqq_group(bfqq); -+#endif -+ -+ BUG_ON(bfqq->ref <= 0); -+ -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d", bfqq, bfqq->ref); -+ bfqq->ref--; -+ if (bfqq->ref) -+ return; -+ -+ BUG_ON(rb_first(&bfqq->sort_list)); -+ BUG_ON(bfqq->allocated[READ] + bfqq->allocated[WRITE] != 0); -+ BUG_ON(bfqq->entity.tree); -+ BUG_ON(bfq_bfqq_busy(bfqq)); -+ -+ if (bfq_bfqq_sync(bfqq)) -+ /* -+ * The fact that this queue is being destroyed does not -+ * invalidate the fact that this queue may have been -+ * activated during the current burst. As a consequence, -+ * although the queue does not exist anymore, and hence -+ * needs to be removed from the burst list if there, -+ * the burst size has not to be decremented. -+ */ -+ hlist_del_init(&bfqq->burst_list_node); -+ -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p freed", bfqq); -+ -+ kmem_cache_free(bfq_pool, bfqq); -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+ bfqg_put(bfqg); -+#endif -+} -+ -+static void bfq_put_cooperator(struct bfq_queue *bfqq) -+{ -+ struct bfq_queue *__bfqq, *next; -+ -+ /* -+ * If this queue was scheduled to merge with another queue, be -+ * sure to drop the reference taken on that queue (and others in -+ * the merge chain). See bfq_setup_merge and bfq_merge_bfqqs. -+ */ -+ __bfqq = bfqq->new_bfqq; -+ while (__bfqq) { -+ if (__bfqq == bfqq) -+ break; -+ next = __bfqq->new_bfqq; -+ bfq_put_queue(__bfqq); -+ __bfqq = next; -+ } -+} -+ -+static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) -+{ -+ if (bfqq == bfqd->in_service_queue) { -+ __bfq_bfqq_expire(bfqd, bfqq); -+ bfq_schedule_dispatch(bfqd); -+ } -+ -+ bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, bfqq->ref); -+ -+ bfq_put_cooperator(bfqq); -+ -+ bfq_put_queue(bfqq); /* release process reference */ -+} -+ -+static void bfq_init_icq(struct io_cq *icq) -+{ -+ icq_to_bic(icq)->ttime.last_end_request = ktime_get_ns() - (1ULL<<32); -+} -+ -+static void bfq_exit_icq(struct io_cq *icq) -+{ -+ struct bfq_io_cq *bic = icq_to_bic(icq); -+ struct bfq_data *bfqd = bic_to_bfqd(bic); -+ -+ if (bic_to_bfqq(bic, false)) { -+ bfq_exit_bfqq(bfqd, bic_to_bfqq(bic, false)); -+ bic_set_bfqq(bic, NULL, false); -+ } -+ -+ if (bic_to_bfqq(bic, true)) { -+ /* -+ * If the bic is using a shared queue, put the reference -+ * taken on the io_context when the bic started using a -+ * shared bfq_queue. -+ */ -+ if (bfq_bfqq_coop(bic_to_bfqq(bic, true))) -+ put_io_context(icq->ioc); -+ bfq_exit_bfqq(bfqd, bic_to_bfqq(bic, true)); -+ bic_set_bfqq(bic, NULL, true); -+ } -+} -+ -+/* -+ * Update the entity prio values; note that the new values will not -+ * be used until the next (re)activation. -+ */ -+static void bfq_set_next_ioprio_data(struct bfq_queue *bfqq, -+ struct bfq_io_cq *bic) -+{ -+ struct task_struct *tsk = current; -+ int ioprio_class; -+ -+ ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio); -+ switch (ioprio_class) { -+ default: -+ dev_err(bfqq->bfqd->queue->backing_dev_info->dev, -+ "bfq: bad prio class %d\n", ioprio_class); -+ case IOPRIO_CLASS_NONE: -+ /* -+ * No prio set, inherit CPU scheduling settings. -+ */ -+ bfqq->new_ioprio = task_nice_ioprio(tsk); -+ bfqq->new_ioprio_class = task_nice_ioclass(tsk); -+ break; -+ case IOPRIO_CLASS_RT: -+ bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio); -+ bfqq->new_ioprio_class = IOPRIO_CLASS_RT; -+ break; -+ case IOPRIO_CLASS_BE: -+ bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio); -+ bfqq->new_ioprio_class = IOPRIO_CLASS_BE; -+ break; -+ case IOPRIO_CLASS_IDLE: -+ bfqq->new_ioprio_class = IOPRIO_CLASS_IDLE; -+ bfqq->new_ioprio = 7; -+ break; -+ } -+ -+ if (bfqq->new_ioprio >= IOPRIO_BE_NR) { -+ pr_crit("bfq_set_next_ioprio_data: new_ioprio %d\n", -+ bfqq->new_ioprio); -+ BUG(); -+ } -+ -+ bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->new_ioprio); -+ bfqq->entity.prio_changed = 1; -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "set_next_ioprio_data: bic_class %d prio %d class %d", -+ ioprio_class, bfqq->new_ioprio, bfqq->new_ioprio_class); -+} -+ -+static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio) -+{ -+ struct bfq_data *bfqd = bic_to_bfqd(bic); -+ struct bfq_queue *bfqq; -+ unsigned long uninitialized_var(flags); -+ int ioprio = bic->icq.ioc->ioprio; -+ -+ /* -+ * This condition may trigger on a newly created bic, be sure to -+ * drop the lock before returning. -+ */ -+ if (unlikely(!bfqd) || likely(bic->ioprio == ioprio)) -+ return; -+ -+ bic->ioprio = ioprio; -+ -+ bfqq = bic_to_bfqq(bic, false); -+ if (bfqq) { -+ /* release process reference on this queue */ -+ bfq_put_queue(bfqq); -+ bfqq = bfq_get_queue(bfqd, bio, BLK_RW_ASYNC, bic); -+ bic_set_bfqq(bic, bfqq, false); -+ bfq_log_bfqq(bfqd, bfqq, -+ "check_ioprio_change: bfqq %p %d", -+ bfqq, bfqq->ref); -+ } -+ -+ bfqq = bic_to_bfqq(bic, true); -+ if (bfqq) -+ bfq_set_next_ioprio_data(bfqq, bic); -+} -+ -+static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, -+ struct bfq_io_cq *bic, pid_t pid, int is_sync) -+{ -+ RB_CLEAR_NODE(&bfqq->entity.rb_node); -+ INIT_LIST_HEAD(&bfqq->fifo); -+ INIT_HLIST_NODE(&bfqq->burst_list_node); -+ BUG_ON(!hlist_unhashed(&bfqq->burst_list_node)); -+ -+ bfqq->ref = 0; -+ bfqq->bfqd = bfqd; -+ -+ if (bic) -+ bfq_set_next_ioprio_data(bfqq, bic); -+ -+ if (is_sync) { -+ /* -+ * No need to mark as has_short_ttime if in -+ * idle_class, because no device idling is performed -+ * for queues in idle class -+ */ -+ if (!bfq_class_idle(bfqq)) -+ /* tentatively mark as has_short_ttime */ -+ bfq_mark_bfqq_has_short_ttime(bfqq); -+ bfq_mark_bfqq_sync(bfqq); -+ bfq_mark_bfqq_just_created(bfqq); -+ } else -+ bfq_clear_bfqq_sync(bfqq); -+ bfq_mark_bfqq_IO_bound(bfqq); -+ -+ /* Tentative initial value to trade off between thr and lat */ -+ bfqq->max_budget = (2 * bfq_max_budget(bfqd)) / 3; -+ bfqq->pid = pid; -+ -+ bfqq->wr_coeff = 1; -+ bfqq->last_wr_start_finish = jiffies; -+ bfqq->wr_start_at_switch_to_srt = bfq_smallest_from_now(); -+ bfqq->budget_timeout = bfq_smallest_from_now(); -+ bfqq->split_time = bfq_smallest_from_now(); -+ -+ /* -+ * Set to the value for which bfqq will not be deemed as -+ * soft rt when it becomes backlogged. -+ */ -+ bfqq->soft_rt_next_start = bfq_greatest_from_now(); -+ -+ /* first request is almost certainly seeky */ -+ bfqq->seek_history = 1; -+} -+ -+static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd, -+ struct bfq_group *bfqg, -+ int ioprio_class, int ioprio) -+{ -+ switch (ioprio_class) { -+ case IOPRIO_CLASS_RT: -+ return &bfqg->async_bfqq[0][ioprio]; -+ case IOPRIO_CLASS_NONE: -+ ioprio = IOPRIO_NORM; -+ /* fall through */ -+ case IOPRIO_CLASS_BE: -+ return &bfqg->async_bfqq[1][ioprio]; -+ case IOPRIO_CLASS_IDLE: -+ return &bfqg->async_idle_bfqq; -+ default: -+ BUG(); -+ } -+} -+ -+static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd, -+ struct bio *bio, bool is_sync, -+ struct bfq_io_cq *bic) -+{ -+ const int ioprio = IOPRIO_PRIO_DATA(bic->ioprio); -+ const int ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio); -+ struct bfq_queue **async_bfqq = NULL; -+ struct bfq_queue *bfqq; -+ struct bfq_group *bfqg; -+ -+ rcu_read_lock(); -+ -+ bfqg = bfq_find_set_group(bfqd, bio_blkcg(bio)); -+ if (!bfqg) { -+ bfqq = &bfqd->oom_bfqq; -+ goto out; -+ } -+ -+ if (!is_sync) { -+ async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class, -+ ioprio); -+ bfqq = *async_bfqq; -+ if (bfqq) -+ goto out; -+ } -+ -+ bfqq = kmem_cache_alloc_node(bfq_pool, -+ GFP_NOWAIT | __GFP_ZERO | __GFP_NOWARN, -+ bfqd->queue->node); -+ -+ if (bfqq) { -+ bfq_init_bfqq(bfqd, bfqq, bic, current->pid, -+ is_sync); -+ bfq_init_entity(&bfqq->entity, bfqg); -+ bfq_log_bfqq(bfqd, bfqq, "allocated"); -+ } else { -+ bfqq = &bfqd->oom_bfqq; -+ bfq_log_bfqq(bfqd, bfqq, "using oom bfqq"); -+ goto out; -+ } -+ -+ /* -+ * Pin the queue now that it's allocated, scheduler exit will -+ * prune it. -+ */ -+ if (async_bfqq) { -+ bfqq->ref++; /* -+ * Extra group reference, w.r.t. sync -+ * queue. This extra reference is removed -+ * only if bfqq->bfqg disappears, to -+ * guarantee that this queue is not freed -+ * until its group goes away. -+ */ -+ bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d", -+ bfqq, bfqq->ref); -+ *async_bfqq = bfqq; -+ } -+ -+out: -+ bfqq->ref++; /* get a process reference to this queue */ -+ bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq, bfqq->ref); -+ rcu_read_unlock(); -+ return bfqq; -+} -+ -+static void bfq_update_io_thinktime(struct bfq_data *bfqd, -+ struct bfq_io_cq *bic) -+{ -+ struct bfq_ttime *ttime = &bic->ttime; -+ u64 elapsed = ktime_get_ns() - bic->ttime.last_end_request; -+ -+ elapsed = min_t(u64, elapsed, 2 * bfqd->bfq_slice_idle); -+ -+ ttime->ttime_samples = (7*bic->ttime.ttime_samples + 256) / 8; -+ ttime->ttime_total = div_u64(7*ttime->ttime_total + 256*elapsed, 8); -+ ttime->ttime_mean = div64_ul(ttime->ttime_total + 128, -+ ttime->ttime_samples); -+} -+ -+static void -+bfq_update_io_seektime(struct bfq_data *bfqd, struct bfq_queue *bfqq, -+ struct request *rq) -+{ -+ bfqq->seek_history <<= 1; -+ bfqq->seek_history |= -+ get_sdist(bfqq->last_request_pos, rq) > BFQQ_SEEK_THR && -+ (!blk_queue_nonrot(bfqd->queue) || -+ blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT); -+} -+ -+static void bfq_update_has_short_ttime(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq, -+ struct bfq_io_cq *bic) -+{ -+ bool has_short_ttime = true; -+ -+ /* -+ * No need to update has_short_ttime if bfqq is async or in -+ * idle io prio class, or if bfq_slice_idle is zero, because -+ * no device idling is performed for bfqq in this case. -+ */ -+ if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq) || -+ bfqd->bfq_slice_idle == 0) -+ return; -+ -+ /* Idle window just restored, statistics are meaningless. */ -+ if (time_is_after_eq_jiffies(bfqq->split_time + -+ bfqd->bfq_wr_min_idle_time)) -+ return; -+ -+ /* Think time is infinite if no process is linked to -+ * bfqq. Otherwise check average think time to -+ * decide whether to mark as has_short_ttime -+ */ -+ if (atomic_read(&bic->icq.ioc->active_ref) == 0 || -+ (bfq_sample_valid(bic->ttime.ttime_samples) && -+ bic->ttime.ttime_mean > bfqd->bfq_slice_idle)) -+ has_short_ttime = false; -+ -+ bfq_log_bfqq(bfqd, bfqq, "update_has_short_ttime: has_short_ttime %d", -+ has_short_ttime); -+ -+ if (has_short_ttime) -+ bfq_mark_bfqq_has_short_ttime(bfqq); -+ else -+ bfq_clear_bfqq_has_short_ttime(bfqq); -+} -+ -+/* -+ * Called when a new fs request (rq) is added to bfqq. Check if there's -+ * something we should do about it. -+ */ -+static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, -+ struct request *rq) -+{ -+ struct bfq_io_cq *bic = RQ_BIC(rq); -+ -+ if (rq->cmd_flags & REQ_META) -+ bfqq->meta_pending++; -+ -+ bfq_update_io_thinktime(bfqd, bic); -+ bfq_update_has_short_ttime(bfqd, bfqq, bic); -+ bfq_update_io_seektime(bfqd, bfqq, rq); -+ -+ bfq_log_bfqq(bfqd, bfqq, -+ "rq_enqueued: has_short_ttime=%d (seeky %d)", -+ bfq_bfqq_has_short_ttime(bfqq), BFQQ_SEEKY(bfqq)); -+ -+ bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); -+ -+ if (bfqq == bfqd->in_service_queue && bfq_bfqq_wait_request(bfqq)) { -+ bool small_req = bfqq->queued[rq_is_sync(rq)] == 1 && -+ blk_rq_sectors(rq) < 32; -+ bool budget_timeout = bfq_bfqq_budget_timeout(bfqq); -+ -+ /* -+ * There is just this request queued: if the request -+ * is small and the queue is not to be expired, then -+ * just exit. -+ * -+ * In this way, if the device is being idled to wait -+ * for a new request from the in-service queue, we -+ * avoid unplugging the device and committing the -+ * device to serve just a small request. On the -+ * contrary, we wait for the block layer to decide -+ * when to unplug the device: hopefully, new requests -+ * will be merged to this one quickly, then the device -+ * will be unplugged and larger requests will be -+ * dispatched. -+ */ -+ if (small_req && !budget_timeout) -+ return; -+ -+ /* -+ * A large enough request arrived, or the queue is to -+ * be expired: in both cases disk idling is to be -+ * stopped, so clear wait_request flag and reset -+ * timer. -+ */ -+ bfq_clear_bfqq_wait_request(bfqq); -+ hrtimer_try_to_cancel(&bfqd->idle_slice_timer); -+ bfqg_stats_update_idle_time(bfqq_group(bfqq)); -+ -+ /* -+ * The queue is not empty, because a new request just -+ * arrived. Hence we can safely expire the queue, in -+ * case of budget timeout, without risking that the -+ * timestamps of the queue are not updated correctly. -+ * See [1] for more details. -+ */ -+ if (budget_timeout) -+ bfq_bfqq_expire(bfqd, bfqq, false, -+ BFQ_BFQQ_BUDGET_TIMEOUT); -+ -+ /* -+ * Let the request rip immediately, or let a new queue be -+ * selected if bfqq has just been expired. -+ */ -+ __blk_run_queue(bfqd->queue); -+ } -+} -+ -+static void bfq_insert_request(struct request_queue *q, struct request *rq) -+{ -+ struct bfq_data *bfqd = q->elevator->elevator_data; -+ struct bfq_queue *bfqq = RQ_BFQQ(rq), *new_bfqq; -+ -+ assert_spin_locked(bfqd->queue->queue_lock); -+ -+ /* -+ * An unplug may trigger a requeue of a request from the device -+ * driver: make sure we are in process context while trying to -+ * merge two bfq_queues. -+ */ -+ if (!in_interrupt()) { -+ new_bfqq = bfq_setup_cooperator(bfqd, bfqq, rq, true); -+ if (new_bfqq) { -+ if (bic_to_bfqq(RQ_BIC(rq), 1) != bfqq) -+ new_bfqq = bic_to_bfqq(RQ_BIC(rq), 1); -+ /* -+ * Release the request's reference to the old bfqq -+ * and make sure one is taken to the shared queue. -+ */ -+ new_bfqq->allocated[rq_data_dir(rq)]++; -+ bfqq->allocated[rq_data_dir(rq)]--; -+ new_bfqq->ref++; -+ bfq_clear_bfqq_just_created(bfqq); -+ if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq) -+ bfq_merge_bfqqs(bfqd, RQ_BIC(rq), -+ bfqq, new_bfqq); -+ /* -+ * rq is about to be enqueued into new_bfqq, -+ * release rq reference on bfqq -+ */ -+ bfq_put_queue(bfqq); -+ rq->elv.priv[1] = new_bfqq; -+ bfqq = new_bfqq; -+ } -+ } -+ -+ bfq_add_request(rq); -+ -+ rq->fifo_time = ktime_get_ns() + bfqd->bfq_fifo_expire[rq_is_sync(rq)]; -+ list_add_tail(&rq->queuelist, &bfqq->fifo); -+ -+ bfq_rq_enqueued(bfqd, bfqq, rq); -+} -+ -+static void bfq_update_hw_tag(struct bfq_data *bfqd) -+{ -+ bfqd->max_rq_in_driver = max_t(int, bfqd->max_rq_in_driver, -+ bfqd->rq_in_driver); -+ -+ if (bfqd->hw_tag == 1) -+ return; -+ -+ /* -+ * This sample is valid if the number of outstanding requests -+ * is large enough to allow a queueing behavior. Note that the -+ * sum is not exact, as it's not taking into account deactivated -+ * requests. -+ */ -+ if (bfqd->rq_in_driver + bfqd->queued < BFQ_HW_QUEUE_THRESHOLD) -+ return; -+ -+ if (bfqd->hw_tag_samples++ < BFQ_HW_QUEUE_SAMPLES) -+ return; -+ -+ bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD; -+ bfqd->max_rq_in_driver = 0; -+ bfqd->hw_tag_samples = 0; -+} -+ -+static void bfq_completed_request(struct request_queue *q, struct request *rq) -+{ -+ struct bfq_queue *bfqq = RQ_BFQQ(rq); -+ struct bfq_data *bfqd = bfqq->bfqd; -+ u64 now_ns; -+ u32 delta_us; -+ -+ bfq_log_bfqq(bfqd, bfqq, "completed one req with %u sects left", -+ blk_rq_sectors(rq)); -+ -+ assert_spin_locked(bfqd->queue->queue_lock); -+ bfq_update_hw_tag(bfqd); -+ -+ BUG_ON(!bfqd->rq_in_driver); -+ BUG_ON(!bfqq->dispatched); -+ bfqd->rq_in_driver--; -+ bfqq->dispatched--; -+ bfqg_stats_update_completion(bfqq_group(bfqq), -+ rq_start_time_ns(rq), -+ rq_io_start_time_ns(rq), -+ rq->cmd_flags); -+ -+ if (!bfqq->dispatched && !bfq_bfqq_busy(bfqq)) { -+ BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list)); -+ /* -+ * Set budget_timeout (which we overload to store the -+ * time at which the queue remains with no backlog and -+ * no outstanding request; used by the weight-raising -+ * mechanism). -+ */ -+ bfqq->budget_timeout = jiffies; -+ -+ bfq_weights_tree_remove(bfqd, &bfqq->entity, -+ &bfqd->queue_weights_tree); -+ } -+ -+ now_ns = ktime_get_ns(); -+ -+ RQ_BIC(rq)->ttime.last_end_request = now_ns; -+ -+ /* -+ * Using us instead of ns, to get a reasonable precision in -+ * computing rate in next check. -+ */ -+ delta_us = div_u64(now_ns - bfqd->last_completion, NSEC_PER_USEC); -+ -+ bfq_log(bfqd, "rq_completed: delta %uus/%luus max_size %u rate %llu/%llu", -+ delta_us, BFQ_MIN_TT/NSEC_PER_USEC, bfqd->last_rq_max_size, -+ (USEC_PER_SEC* -+ (u64)((bfqd->last_rq_max_size<>BFQ_RATE_SHIFT, -+ (USEC_PER_SEC*(u64)(1UL<<(BFQ_RATE_SHIFT-10)))>>BFQ_RATE_SHIFT); -+ -+ /* -+ * If the request took rather long to complete, and, according -+ * to the maximum request size recorded, this completion latency -+ * implies that the request was certainly served at a very low -+ * rate (less than 1M sectors/sec), then the whole observation -+ * interval that lasts up to this time instant cannot be a -+ * valid time interval for computing a new peak rate. Invoke -+ * bfq_update_rate_reset to have the following three steps -+ * taken: -+ * - close the observation interval at the last (previous) -+ * request dispatch or completion -+ * - compute rate, if possible, for that observation interval -+ * - reset to zero samples, which will trigger a proper -+ * re-initialization of the observation interval on next -+ * dispatch -+ */ -+ if (delta_us > BFQ_MIN_TT/NSEC_PER_USEC && -+ (bfqd->last_rq_max_size<last_completion = now_ns; -+ -+ /* -+ * If we are waiting to discover whether the request pattern -+ * of the task associated with the queue is actually -+ * isochronous, and both requisites for this condition to hold -+ * are now satisfied, then compute soft_rt_next_start (see the -+ * comments on the function bfq_bfqq_softrt_next_start()). We -+ * schedule this delayed check when bfqq expires, if it still -+ * has in-flight requests. -+ */ -+ if (bfq_bfqq_softrt_update(bfqq) && bfqq->dispatched == 0 && -+ RB_EMPTY_ROOT(&bfqq->sort_list)) -+ bfqq->soft_rt_next_start = -+ bfq_bfqq_softrt_next_start(bfqd, bfqq); -+ -+ /* -+ * If this is the in-service queue, check if it needs to be expired, -+ * or if we want to idle in case it has no pending requests. -+ */ -+ if (bfqd->in_service_queue == bfqq) { -+ if (bfqq->dispatched == 0 && bfq_bfqq_must_idle(bfqq)) { -+ bfq_arm_slice_timer(bfqd); -+ goto out; -+ } else if (bfq_may_expire_for_budg_timeout(bfqq)) -+ bfq_bfqq_expire(bfqd, bfqq, false, -+ BFQ_BFQQ_BUDGET_TIMEOUT); -+ else if (RB_EMPTY_ROOT(&bfqq->sort_list) && -+ (bfqq->dispatched == 0 || -+ !bfq_bfqq_may_idle(bfqq))) -+ bfq_bfqq_expire(bfqd, bfqq, false, -+ BFQ_BFQQ_NO_MORE_REQUESTS); -+ } -+ -+ if (!bfqd->rq_in_driver) -+ bfq_schedule_dispatch(bfqd); -+ -+out: -+ return; -+} -+ -+static int __bfq_may_queue(struct bfq_queue *bfqq) -+{ -+ if (bfq_bfqq_wait_request(bfqq) && bfq_bfqq_must_alloc(bfqq)) { -+ bfq_clear_bfqq_must_alloc(bfqq); -+ return ELV_MQUEUE_MUST; -+ } -+ -+ return ELV_MQUEUE_MAY; -+} -+ -+static int bfq_may_queue(struct request_queue *q, unsigned int op) -+{ -+ struct bfq_data *bfqd = q->elevator->elevator_data; -+ struct task_struct *tsk = current; -+ struct bfq_io_cq *bic; -+ struct bfq_queue *bfqq; -+ -+ /* -+ * Don't force setup of a queue from here, as a call to may_queue -+ * does not necessarily imply that a request actually will be -+ * queued. So just lookup a possibly existing queue, or return -+ * 'may queue' if that fails. -+ */ -+ bic = bfq_bic_lookup(bfqd, tsk->io_context); -+ if (!bic) -+ return ELV_MQUEUE_MAY; -+ -+ bfqq = bic_to_bfqq(bic, op_is_sync(op)); -+ if (bfqq) -+ return __bfq_may_queue(bfqq); -+ -+ return ELV_MQUEUE_MAY; -+} -+ -+/* -+ * Queue lock held here. -+ */ -+static void bfq_put_request(struct request *rq) -+{ -+ struct bfq_queue *bfqq = RQ_BFQQ(rq); -+ -+ if (bfqq) { -+ const int rw = rq_data_dir(rq); -+ -+ BUG_ON(!bfqq->allocated[rw]); -+ bfqq->allocated[rw]--; -+ -+ rq->elv.priv[0] = NULL; -+ rq->elv.priv[1] = NULL; -+ -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "put_request %p, %d", -+ bfqq, bfqq->ref); -+ bfq_put_queue(bfqq); -+ } -+} -+ -+/* -+ * Returns NULL if a new bfqq should be allocated, or the old bfqq if this -+ * was the last process referring to that bfqq. -+ */ -+static struct bfq_queue * -+bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq) -+{ -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue"); -+ -+ put_io_context(bic->icq.ioc); -+ -+ if (bfqq_process_refs(bfqq) == 1) { -+ bfqq->pid = current->pid; -+ bfq_clear_bfqq_coop(bfqq); -+ bfq_clear_bfqq_split_coop(bfqq); -+ return bfqq; -+ } -+ -+ bic_set_bfqq(bic, NULL, 1); -+ -+ bfq_put_cooperator(bfqq); -+ -+ bfq_put_queue(bfqq); -+ return NULL; -+} -+ -+/* -+ * Allocate bfq data structures associated with this request. -+ */ -+static int bfq_set_request(struct request_queue *q, struct request *rq, -+ struct bio *bio, gfp_t gfp_mask) -+{ -+ struct bfq_data *bfqd = q->elevator->elevator_data; -+ struct bfq_io_cq *bic = icq_to_bic(rq->elv.icq); -+ const int rw = rq_data_dir(rq); -+ const int is_sync = rq_is_sync(rq); -+ struct bfq_queue *bfqq; -+ unsigned long flags; -+ bool bfqq_already_existing = false, split = false; -+ -+ spin_lock_irqsave(q->queue_lock, flags); -+ -+ if (!bic) -+ goto queue_fail; -+ -+ bfq_check_ioprio_change(bic, bio); -+ -+ bfq_bic_update_cgroup(bic, bio); -+ -+new_queue: -+ bfqq = bic_to_bfqq(bic, is_sync); -+ if (!bfqq || bfqq == &bfqd->oom_bfqq) { -+ if (bfqq) -+ bfq_put_queue(bfqq); -+ bfqq = bfq_get_queue(bfqd, bio, is_sync, bic); -+ BUG_ON(!hlist_unhashed(&bfqq->burst_list_node)); -+ -+ bic_set_bfqq(bic, bfqq, is_sync); -+ if (split && is_sync) { -+ bfq_log_bfqq(bfqd, bfqq, -+ "set_request: was_in_list %d " -+ "was_in_large_burst %d " -+ "large burst in progress %d", -+ bic->was_in_burst_list, -+ bic->saved_in_large_burst, -+ bfqd->large_burst); -+ -+ if ((bic->was_in_burst_list && bfqd->large_burst) || -+ bic->saved_in_large_burst) { -+ bfq_log_bfqq(bfqd, bfqq, -+ "set_request: marking in " -+ "large burst"); -+ bfq_mark_bfqq_in_large_burst(bfqq); -+ } else { -+ bfq_log_bfqq(bfqd, bfqq, -+ "set_request: clearing in " -+ "large burst"); -+ bfq_clear_bfqq_in_large_burst(bfqq); -+ if (bic->was_in_burst_list) -+ hlist_add_head(&bfqq->burst_list_node, -+ &bfqd->burst_list); -+ } -+ bfqq->split_time = jiffies; -+ } -+ } else { -+ /* If the queue was seeky for too long, break it apart. */ -+ if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) { -+ bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq"); -+ -+ /* Update bic before losing reference to bfqq */ -+ if (bfq_bfqq_in_large_burst(bfqq)) -+ bic->saved_in_large_burst = true; -+ -+ bfqq = bfq_split_bfqq(bic, bfqq); -+ split = true; -+ if (!bfqq) -+ goto new_queue; -+ else -+ bfqq_already_existing = true; -+ } -+ } -+ -+ bfqq->allocated[rw]++; -+ bfqq->ref++; -+ bfq_log_bfqq(bfqd, bfqq, "set_request: bfqq %p, %d", bfqq, bfqq->ref); -+ -+ rq->elv.priv[0] = bic; -+ rq->elv.priv[1] = bfqq; -+ -+ /* -+ * If a bfq_queue has only one process reference, it is owned -+ * by only one bfq_io_cq: we can set the bic field of the -+ * bfq_queue to the address of that structure. Also, if the -+ * queue has just been split, mark a flag so that the -+ * information is available to the other scheduler hooks. -+ */ -+ if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) { -+ bfqq->bic = bic; -+ if (split) { -+ /* -+ * If the queue has just been split from a shared -+ * queue, restore the idle window and the possible -+ * weight raising period. -+ */ -+ bfq_bfqq_resume_state(bfqq, bfqd, bic, -+ bfqq_already_existing); -+ } -+ } -+ -+ if (unlikely(bfq_bfqq_just_created(bfqq))) -+ bfq_handle_burst(bfqd, bfqq); -+ -+ spin_unlock_irqrestore(q->queue_lock, flags); -+ -+ return 0; -+ -+queue_fail: -+ bfq_schedule_dispatch(bfqd); -+ spin_unlock_irqrestore(q->queue_lock, flags); -+ -+ return 1; -+} -+ -+static void bfq_kick_queue(struct work_struct *work) -+{ -+ struct bfq_data *bfqd = -+ container_of(work, struct bfq_data, unplug_work); -+ struct request_queue *q = bfqd->queue; -+ -+ spin_lock_irq(q->queue_lock); -+ __blk_run_queue(q); -+ spin_unlock_irq(q->queue_lock); -+} -+ -+/* -+ * Handler of the expiration of the timer running if the in-service queue -+ * is idling inside its time slice. -+ */ -+static enum hrtimer_restart bfq_idle_slice_timer(struct hrtimer *timer) -+{ -+ struct bfq_data *bfqd = container_of(timer, struct bfq_data, -+ idle_slice_timer); -+ struct bfq_queue *bfqq; -+ unsigned long flags; -+ enum bfqq_expiration reason; -+ -+ spin_lock_irqsave(bfqd->queue->queue_lock, flags); -+ -+ bfqq = bfqd->in_service_queue; -+ /* -+ * Theoretical race here: the in-service queue can be NULL or -+ * different from the queue that was idling if the timer handler -+ * spins on the queue_lock and a new request arrives for the -+ * current queue and there is a full dispatch cycle that changes -+ * the in-service queue. This can hardly happen, but in the worst -+ * case we just expire a queue too early. -+ */ -+ if (bfqq) { -+ bfq_log_bfqq(bfqd, bfqq, "slice_timer expired"); -+ bfq_clear_bfqq_wait_request(bfqq); -+ -+ if (bfq_bfqq_budget_timeout(bfqq)) -+ /* -+ * Also here the queue can be safely expired -+ * for budget timeout without wasting -+ * guarantees -+ */ -+ reason = BFQ_BFQQ_BUDGET_TIMEOUT; -+ else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0) -+ /* -+ * The queue may not be empty upon timer expiration, -+ * because we may not disable the timer when the -+ * first request of the in-service queue arrives -+ * during disk idling. -+ */ -+ reason = BFQ_BFQQ_TOO_IDLE; -+ else -+ goto schedule_dispatch; -+ -+ bfq_bfqq_expire(bfqd, bfqq, true, reason); -+ } -+ -+schedule_dispatch: -+ bfq_schedule_dispatch(bfqd); -+ -+ spin_unlock_irqrestore(bfqd->queue->queue_lock, flags); -+ return HRTIMER_NORESTART; -+} -+ -+static void bfq_shutdown_timer_wq(struct bfq_data *bfqd) -+{ -+ hrtimer_cancel(&bfqd->idle_slice_timer); -+ cancel_work_sync(&bfqd->unplug_work); -+} -+ -+static void __bfq_put_async_bfqq(struct bfq_data *bfqd, -+ struct bfq_queue **bfqq_ptr) -+{ -+ struct bfq_group *root_group = bfqd->root_group; -+ struct bfq_queue *bfqq = *bfqq_ptr; -+ -+ bfq_log(bfqd, "put_async_bfqq: %p", bfqq); -+ if (bfqq) { -+ bfq_bfqq_move(bfqd, bfqq, root_group); -+ bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d", -+ bfqq, bfqq->ref); -+ bfq_put_queue(bfqq); -+ *bfqq_ptr = NULL; -+ } -+} -+ -+/* -+ * Release all the bfqg references to its async queues. If we are -+ * deallocating the group these queues may still contain requests, so -+ * we reparent them to the root cgroup (i.e., the only one that will -+ * exist for sure until all the requests on a device are gone). -+ */ -+static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg) -+{ -+ int i, j; -+ -+ for (i = 0; i < 2; i++) -+ for (j = 0; j < IOPRIO_BE_NR; j++) -+ __bfq_put_async_bfqq(bfqd, &bfqg->async_bfqq[i][j]); -+ -+ __bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq); -+} -+ -+static void bfq_exit_queue(struct elevator_queue *e) -+{ -+ struct bfq_data *bfqd = e->elevator_data; -+ struct request_queue *q = bfqd->queue; -+ struct bfq_queue *bfqq, *n; -+ -+ bfq_shutdown_timer_wq(bfqd); -+ -+ spin_lock_irq(q->queue_lock); -+ -+ BUG_ON(bfqd->in_service_queue); -+ list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list) -+ bfq_deactivate_bfqq(bfqd, bfqq, false, false); -+ -+ spin_unlock_irq(q->queue_lock); -+ -+ bfq_shutdown_timer_wq(bfqd); -+ -+ BUG_ON(hrtimer_active(&bfqd->idle_slice_timer)); -+ -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+ blkcg_deactivate_policy(q, &blkcg_policy_bfq); -+#else -+ bfq_put_async_queues(bfqd, bfqd->root_group); -+ kfree(bfqd->root_group); -+#endif -+ -+ kfree(bfqd); -+} -+ -+static void bfq_init_root_group(struct bfq_group *root_group, -+ struct bfq_data *bfqd) -+{ -+ int i; -+ -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+ root_group->entity.parent = NULL; -+ root_group->my_entity = NULL; -+ root_group->bfqd = bfqd; -+#endif -+ root_group->rq_pos_tree = RB_ROOT; -+ for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) -+ root_group->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT; -+ root_group->sched_data.bfq_class_idle_last_service = jiffies; -+} -+ -+static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) -+{ -+ struct bfq_data *bfqd; -+ struct elevator_queue *eq; -+ -+ eq = elevator_alloc(q, e); -+ if (!eq) -+ return -ENOMEM; -+ -+ bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node); -+ if (!bfqd) { -+ kobject_put(&eq->kobj); -+ return -ENOMEM; -+ } -+ eq->elevator_data = bfqd; -+ -+ /* -+ * Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues. -+ * Grab a permanent reference to it, so that the normal code flow -+ * will not attempt to free it. -+ */ -+ bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, NULL, 1, 0); -+ bfqd->oom_bfqq.ref++; -+ bfqd->oom_bfqq.new_ioprio = BFQ_DEFAULT_QUEUE_IOPRIO; -+ bfqd->oom_bfqq.new_ioprio_class = IOPRIO_CLASS_BE; -+ bfqd->oom_bfqq.entity.new_weight = -+ bfq_ioprio_to_weight(bfqd->oom_bfqq.new_ioprio); -+ -+ /* oom_bfqq does not participate to bursts */ -+ bfq_clear_bfqq_just_created(&bfqd->oom_bfqq); -+ /* -+ * Trigger weight initialization, according to ioprio, at the -+ * oom_bfqq's first activation. The oom_bfqq's ioprio and ioprio -+ * class won't be changed any more. -+ */ -+ bfqd->oom_bfqq.entity.prio_changed = 1; -+ -+ bfqd->queue = q; -+ -+ spin_lock_irq(q->queue_lock); -+ q->elevator = eq; -+ spin_unlock_irq(q->queue_lock); -+ -+ bfqd->root_group = bfq_create_group_hierarchy(bfqd, q->node); -+ if (!bfqd->root_group) -+ goto out_free; -+ bfq_init_root_group(bfqd->root_group, bfqd); -+ bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group); -+ -+ hrtimer_init(&bfqd->idle_slice_timer, CLOCK_MONOTONIC, -+ HRTIMER_MODE_REL); -+ bfqd->idle_slice_timer.function = bfq_idle_slice_timer; -+ -+ bfqd->queue_weights_tree = RB_ROOT; -+ bfqd->group_weights_tree = RB_ROOT; -+ -+ INIT_WORK(&bfqd->unplug_work, bfq_kick_queue); -+ -+ INIT_LIST_HEAD(&bfqd->active_list); -+ INIT_LIST_HEAD(&bfqd->idle_list); -+ INIT_HLIST_HEAD(&bfqd->burst_list); -+ -+ bfqd->hw_tag = -1; -+ -+ bfqd->bfq_max_budget = bfq_default_max_budget; -+ -+ bfqd->bfq_fifo_expire[0] = bfq_fifo_expire[0]; -+ bfqd->bfq_fifo_expire[1] = bfq_fifo_expire[1]; -+ bfqd->bfq_back_max = bfq_back_max; -+ bfqd->bfq_back_penalty = bfq_back_penalty; -+ bfqd->bfq_slice_idle = bfq_slice_idle; -+ bfqd->bfq_timeout = bfq_timeout; -+ -+ bfqd->bfq_requests_within_timer = 120; -+ -+ bfqd->bfq_large_burst_thresh = 8; -+ bfqd->bfq_burst_interval = msecs_to_jiffies(180); -+ -+ bfqd->low_latency = true; -+ -+ /* -+ * Trade-off between responsiveness and fairness. -+ */ -+ bfqd->bfq_wr_coeff = 30; -+ bfqd->bfq_wr_rt_max_time = msecs_to_jiffies(300); -+ bfqd->bfq_wr_max_time = 0; -+ bfqd->bfq_wr_min_idle_time = msecs_to_jiffies(2000); -+ bfqd->bfq_wr_min_inter_arr_async = msecs_to_jiffies(500); -+ bfqd->bfq_wr_max_softrt_rate = 7000; /* -+ * Approximate rate required -+ * to playback or record a -+ * high-definition compressed -+ * video. -+ */ -+ bfqd->wr_busy_queues = 0; -+ -+ /* -+ * Begin by assuming, optimistically, that the device is a -+ * high-speed one, and that its peak rate is equal to 2/3 of -+ * the highest reference rate. -+ */ -+ bfqd->RT_prod = R_fast[blk_queue_nonrot(bfqd->queue)] * -+ T_fast[blk_queue_nonrot(bfqd->queue)]; -+ bfqd->peak_rate = R_fast[blk_queue_nonrot(bfqd->queue)] * 2 / 3; -+ bfqd->device_speed = BFQ_BFQD_FAST; -+ -+ return 0; -+ -+out_free: -+ kfree(bfqd); -+ kobject_put(&eq->kobj); -+ return -ENOMEM; -+} -+ -+static void bfq_slab_kill(void) -+{ -+ kmem_cache_destroy(bfq_pool); -+} -+ -+static int __init bfq_slab_setup(void) -+{ -+ bfq_pool = KMEM_CACHE(bfq_queue, 0); -+ if (!bfq_pool) -+ return -ENOMEM; -+ return 0; -+} -+ -+static ssize_t bfq_var_show(unsigned int var, char *page) -+{ -+ return sprintf(page, "%u\n", var); -+} -+ -+static ssize_t bfq_var_store(unsigned long *var, const char *page, -+ size_t count) -+{ -+ unsigned long new_val; -+ int ret = kstrtoul(page, 10, &new_val); -+ -+ if (ret == 0) -+ *var = new_val; -+ -+ return count; -+} -+ -+static ssize_t bfq_wr_max_time_show(struct elevator_queue *e, char *page) -+{ -+ struct bfq_data *bfqd = e->elevator_data; -+ -+ return sprintf(page, "%d\n", bfqd->bfq_wr_max_time > 0 ? -+ jiffies_to_msecs(bfqd->bfq_wr_max_time) : -+ jiffies_to_msecs(bfq_wr_duration(bfqd))); -+} -+ -+static ssize_t bfq_weights_show(struct elevator_queue *e, char *page) -+{ -+ struct bfq_queue *bfqq; -+ struct bfq_data *bfqd = e->elevator_data; -+ ssize_t num_char = 0; -+ -+ num_char += sprintf(page + num_char, "Tot reqs queued %d\n\n", -+ bfqd->queued); -+ -+ spin_lock_irq(bfqd->queue->queue_lock); -+ -+ num_char += sprintf(page + num_char, "Active:\n"); -+ list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list) { -+ num_char += sprintf(page + num_char, -+ "pid%d: weight %hu, nr_queued %d %d, ", -+ bfqq->pid, -+ bfqq->entity.weight, -+ bfqq->queued[0], -+ bfqq->queued[1]); -+ num_char += sprintf(page + num_char, -+ "dur %d/%u\n", -+ jiffies_to_msecs( -+ jiffies - -+ bfqq->last_wr_start_finish), -+ jiffies_to_msecs(bfqq->wr_cur_max_time)); -+ } -+ -+ num_char += sprintf(page + num_char, "Idle:\n"); -+ list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list) { -+ num_char += sprintf(page + num_char, -+ "pid%d: weight %hu, dur %d/%u\n", -+ bfqq->pid, -+ bfqq->entity.weight, -+ jiffies_to_msecs(jiffies - -+ bfqq->last_wr_start_finish), -+ jiffies_to_msecs(bfqq->wr_cur_max_time)); -+ } -+ -+ spin_unlock_irq(bfqd->queue->queue_lock); -+ -+ return num_char; -+} -+ -+#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ -+static ssize_t __FUNC(struct elevator_queue *e, char *page) \ -+{ \ -+ struct bfq_data *bfqd = e->elevator_data; \ -+ u64 __data = __VAR; \ -+ if (__CONV == 1) \ -+ __data = jiffies_to_msecs(__data); \ -+ else if (__CONV == 2) \ -+ __data = div_u64(__data, NSEC_PER_MSEC); \ -+ return bfq_var_show(__data, (page)); \ -+} -+SHOW_FUNCTION(bfq_fifo_expire_sync_show, bfqd->bfq_fifo_expire[1], 2); -+SHOW_FUNCTION(bfq_fifo_expire_async_show, bfqd->bfq_fifo_expire[0], 2); -+SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0); -+SHOW_FUNCTION(bfq_back_seek_penalty_show, bfqd->bfq_back_penalty, 0); -+SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 2); -+SHOW_FUNCTION(bfq_max_budget_show, bfqd->bfq_user_max_budget, 0); -+SHOW_FUNCTION(bfq_timeout_sync_show, bfqd->bfq_timeout, 1); -+SHOW_FUNCTION(bfq_strict_guarantees_show, bfqd->strict_guarantees, 0); -+SHOW_FUNCTION(bfq_low_latency_show, bfqd->low_latency, 0); -+SHOW_FUNCTION(bfq_wr_coeff_show, bfqd->bfq_wr_coeff, 0); -+SHOW_FUNCTION(bfq_wr_rt_max_time_show, bfqd->bfq_wr_rt_max_time, 1); -+SHOW_FUNCTION(bfq_wr_min_idle_time_show, bfqd->bfq_wr_min_idle_time, 1); -+SHOW_FUNCTION(bfq_wr_min_inter_arr_async_show, bfqd->bfq_wr_min_inter_arr_async, -+ 1); -+SHOW_FUNCTION(bfq_wr_max_softrt_rate_show, bfqd->bfq_wr_max_softrt_rate, 0); -+#undef SHOW_FUNCTION -+ -+#define USEC_SHOW_FUNCTION(__FUNC, __VAR) \ -+static ssize_t __FUNC(struct elevator_queue *e, char *page) \ -+{ \ -+ struct bfq_data *bfqd = e->elevator_data; \ -+ u64 __data = __VAR; \ -+ __data = div_u64(__data, NSEC_PER_USEC); \ -+ return bfq_var_show(__data, (page)); \ -+} -+USEC_SHOW_FUNCTION(bfq_slice_idle_us_show, bfqd->bfq_slice_idle); -+#undef USEC_SHOW_FUNCTION -+ -+#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ -+static ssize_t \ -+__FUNC(struct elevator_queue *e, const char *page, size_t count) \ -+{ \ -+ struct bfq_data *bfqd = e->elevator_data; \ -+ unsigned long uninitialized_var(__data); \ -+ int ret = bfq_var_store(&__data, (page), count); \ -+ if (__data < (MIN)) \ -+ __data = (MIN); \ -+ else if (__data > (MAX)) \ -+ __data = (MAX); \ -+ if (__CONV == 1) \ -+ *(__PTR) = msecs_to_jiffies(__data); \ -+ else if (__CONV == 2) \ -+ *(__PTR) = (u64)__data * NSEC_PER_MSEC; \ -+ else \ -+ *(__PTR) = __data; \ -+ return ret; \ -+} -+STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1, -+ INT_MAX, 2); -+STORE_FUNCTION(bfq_fifo_expire_async_store, &bfqd->bfq_fifo_expire[0], 1, -+ INT_MAX, 2); -+STORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0); -+STORE_FUNCTION(bfq_back_seek_penalty_store, &bfqd->bfq_back_penalty, 1, -+ INT_MAX, 0); -+STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 2); -+STORE_FUNCTION(bfq_wr_coeff_store, &bfqd->bfq_wr_coeff, 1, INT_MAX, 0); -+STORE_FUNCTION(bfq_wr_max_time_store, &bfqd->bfq_wr_max_time, 0, INT_MAX, 1); -+STORE_FUNCTION(bfq_wr_rt_max_time_store, &bfqd->bfq_wr_rt_max_time, 0, INT_MAX, -+ 1); -+STORE_FUNCTION(bfq_wr_min_idle_time_store, &bfqd->bfq_wr_min_idle_time, 0, -+ INT_MAX, 1); -+STORE_FUNCTION(bfq_wr_min_inter_arr_async_store, -+ &bfqd->bfq_wr_min_inter_arr_async, 0, INT_MAX, 1); -+STORE_FUNCTION(bfq_wr_max_softrt_rate_store, &bfqd->bfq_wr_max_softrt_rate, 0, -+ INT_MAX, 0); -+#undef STORE_FUNCTION -+ -+#define USEC_STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \ -+static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)\ -+{ \ -+ struct bfq_data *bfqd = e->elevator_data; \ -+ unsigned long uninitialized_var(__data); \ -+ int ret = bfq_var_store(&__data, (page), count); \ -+ if (__data < (MIN)) \ -+ __data = (MIN); \ -+ else if (__data > (MAX)) \ -+ __data = (MAX); \ -+ *(__PTR) = (u64)__data * NSEC_PER_USEC; \ -+ return ret; \ -+} -+USEC_STORE_FUNCTION(bfq_slice_idle_us_store, &bfqd->bfq_slice_idle, 0, -+ UINT_MAX); -+#undef USEC_STORE_FUNCTION -+ -+/* do nothing for the moment */ -+static ssize_t bfq_weights_store(struct elevator_queue *e, -+ const char *page, size_t count) -+{ -+ return count; -+} -+ -+static ssize_t bfq_max_budget_store(struct elevator_queue *e, -+ const char *page, size_t count) -+{ -+ struct bfq_data *bfqd = e->elevator_data; -+ unsigned long uninitialized_var(__data); -+ int ret = bfq_var_store(&__data, (page), count); -+ -+ if (__data == 0) -+ bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd); -+ else { -+ if (__data > INT_MAX) -+ __data = INT_MAX; -+ bfqd->bfq_max_budget = __data; -+ } -+ -+ bfqd->bfq_user_max_budget = __data; -+ -+ return ret; -+} -+ -+/* -+ * Leaving this name to preserve name compatibility with cfq -+ * parameters, but this timeout is used for both sync and async. -+ */ -+static ssize_t bfq_timeout_sync_store(struct elevator_queue *e, -+ const char *page, size_t count) -+{ -+ struct bfq_data *bfqd = e->elevator_data; -+ unsigned long uninitialized_var(__data); -+ int ret = bfq_var_store(&__data, (page), count); -+ -+ if (__data < 1) -+ __data = 1; -+ else if (__data > INT_MAX) -+ __data = INT_MAX; -+ -+ bfqd->bfq_timeout = msecs_to_jiffies(__data); -+ if (bfqd->bfq_user_max_budget == 0) -+ bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd); -+ -+ return ret; -+} -+ -+static ssize_t bfq_strict_guarantees_store(struct elevator_queue *e, -+ const char *page, size_t count) -+{ -+ struct bfq_data *bfqd = e->elevator_data; -+ unsigned long uninitialized_var(__data); -+ int ret = bfq_var_store(&__data, (page), count); -+ -+ if (__data > 1) -+ __data = 1; -+ if (!bfqd->strict_guarantees && __data == 1 -+ && bfqd->bfq_slice_idle < 8 * NSEC_PER_MSEC) -+ bfqd->bfq_slice_idle = 8 * NSEC_PER_MSEC; -+ -+ bfqd->strict_guarantees = __data; -+ -+ return ret; -+} -+ -+static ssize_t bfq_low_latency_store(struct elevator_queue *e, -+ const char *page, size_t count) -+{ -+ struct bfq_data *bfqd = e->elevator_data; -+ unsigned long uninitialized_var(__data); -+ int ret = bfq_var_store(&__data, (page), count); -+ -+ if (__data > 1) -+ __data = 1; -+ if (__data == 0 && bfqd->low_latency != 0) -+ bfq_end_wr(bfqd); -+ bfqd->low_latency = __data; -+ -+ return ret; -+} -+ -+#define BFQ_ATTR(name) \ -+ __ATTR(name, S_IRUGO|S_IWUSR, bfq_##name##_show, bfq_##name##_store) -+ -+static struct elv_fs_entry bfq_attrs[] = { -+ BFQ_ATTR(fifo_expire_sync), -+ BFQ_ATTR(fifo_expire_async), -+ BFQ_ATTR(back_seek_max), -+ BFQ_ATTR(back_seek_penalty), -+ BFQ_ATTR(slice_idle), -+ BFQ_ATTR(slice_idle_us), -+ BFQ_ATTR(max_budget), -+ BFQ_ATTR(timeout_sync), -+ BFQ_ATTR(strict_guarantees), -+ BFQ_ATTR(low_latency), -+ BFQ_ATTR(wr_coeff), -+ BFQ_ATTR(wr_max_time), -+ BFQ_ATTR(wr_rt_max_time), -+ BFQ_ATTR(wr_min_idle_time), -+ BFQ_ATTR(wr_min_inter_arr_async), -+ BFQ_ATTR(wr_max_softrt_rate), -+ BFQ_ATTR(weights), -+ __ATTR_NULL -+}; -+ -+static struct elevator_type iosched_bfq = { -+ .ops.sq = { -+ .elevator_merge_fn = bfq_merge, -+ .elevator_merged_fn = bfq_merged_request, -+ .elevator_merge_req_fn = bfq_merged_requests, -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+ .elevator_bio_merged_fn = bfq_bio_merged, -+#endif -+ .elevator_allow_bio_merge_fn = bfq_allow_bio_merge, -+ .elevator_allow_rq_merge_fn = bfq_allow_rq_merge, -+ .elevator_dispatch_fn = bfq_dispatch_requests, -+ .elevator_add_req_fn = bfq_insert_request, -+ .elevator_activate_req_fn = bfq_activate_request, -+ .elevator_deactivate_req_fn = bfq_deactivate_request, -+ .elevator_completed_req_fn = bfq_completed_request, -+ .elevator_former_req_fn = elv_rb_former_request, -+ .elevator_latter_req_fn = elv_rb_latter_request, -+ .elevator_init_icq_fn = bfq_init_icq, -+ .elevator_exit_icq_fn = bfq_exit_icq, -+ .elevator_set_req_fn = bfq_set_request, -+ .elevator_put_req_fn = bfq_put_request, -+ .elevator_may_queue_fn = bfq_may_queue, -+ .elevator_init_fn = bfq_init_queue, -+ .elevator_exit_fn = bfq_exit_queue, -+ }, -+ .icq_size = sizeof(struct bfq_io_cq), -+ .icq_align = __alignof__(struct bfq_io_cq), -+ .elevator_attrs = bfq_attrs, -+ .elevator_name = "bfq-sq", -+ .elevator_owner = THIS_MODULE, -+}; -+ -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+static struct blkcg_policy blkcg_policy_bfq = { -+ .dfl_cftypes = bfq_blkg_files, -+ .legacy_cftypes = bfq_blkcg_legacy_files, -+ -+ .cpd_alloc_fn = bfq_cpd_alloc, -+ .cpd_init_fn = bfq_cpd_init, -+ .cpd_bind_fn = bfq_cpd_init, -+ .cpd_free_fn = bfq_cpd_free, -+ -+ .pd_alloc_fn = bfq_pd_alloc, -+ .pd_init_fn = bfq_pd_init, -+ .pd_offline_fn = bfq_pd_offline, -+ .pd_free_fn = bfq_pd_free, -+ .pd_reset_stats_fn = bfq_pd_reset_stats, -+}; -+#endif -+ -+static int __init bfq_init(void) -+{ -+ int ret; -+ char msg[60] = "BFQ I/O-scheduler: v8r12"; -+ -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+ ret = blkcg_policy_register(&blkcg_policy_bfq); -+ if (ret) -+ return ret; -+#endif -+ -+ ret = -ENOMEM; -+ if (bfq_slab_setup()) -+ goto err_pol_unreg; -+ -+ /* -+ * Times to load large popular applications for the typical -+ * systems installed on the reference devices (see the -+ * comments before the definitions of the next two -+ * arrays). Actually, we use slightly slower values, as the -+ * estimated peak rate tends to be smaller than the actual -+ * peak rate. The reason for this last fact is that estimates -+ * are computed over much shorter time intervals than the long -+ * intervals typically used for benchmarking. Why? First, to -+ * adapt more quickly to variations. Second, because an I/O -+ * scheduler cannot rely on a peak-rate-evaluation workload to -+ * be run for a long time. -+ */ -+ T_slow[0] = msecs_to_jiffies(3500); /* actually 4 sec */ -+ T_slow[1] = msecs_to_jiffies(6000); /* actually 6.5 sec */ -+ T_fast[0] = msecs_to_jiffies(7000); /* actually 8 sec */ -+ T_fast[1] = msecs_to_jiffies(2500); /* actually 3 sec */ -+ -+ /* -+ * Thresholds that determine the switch between speed classes -+ * (see the comments before the definition of the array -+ * device_speed_thresh). These thresholds are biased towards -+ * transitions to the fast class. This is safer than the -+ * opposite bias. In fact, a wrong transition to the slow -+ * class results in short weight-raising periods, because the -+ * speed of the device then tends to be higher that the -+ * reference peak rate. On the opposite end, a wrong -+ * transition to the fast class tends to increase -+ * weight-raising periods, because of the opposite reason. -+ */ -+ device_speed_thresh[0] = (4 * R_slow[0]) / 3; -+ device_speed_thresh[1] = (4 * R_slow[1]) / 3; -+ -+ ret = elv_register(&iosched_bfq); -+ if (ret) -+ goto err_pol_unreg; -+ -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+ strcat(msg, " (with cgroups support)"); -+#endif -+ pr_info("%s", msg); -+ -+ return 0; -+ -+err_pol_unreg: -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+ blkcg_policy_unregister(&blkcg_policy_bfq); -+#endif -+ return ret; -+} -+ -+static void __exit bfq_exit(void) -+{ -+ elv_unregister(&iosched_bfq); -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+ blkcg_policy_unregister(&blkcg_policy_bfq); -+#endif -+ bfq_slab_kill(); -+} -+ -+module_init(bfq_init); -+module_exit(bfq_exit); -+ -+MODULE_AUTHOR("Arianna Avanzini, Fabio Checconi, Paolo Valente"); -+MODULE_LICENSE("GPL"); - -From e24d2e6461479dbd13d58be2dc44b23b5e24487c Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Mon, 19 Dec 2016 17:13:39 +0100 -Subject: [PATCH 07/51] Add config and build bits for bfq-mq-iosched - -Signed-off-by: Paolo Valente ---- - block/Kconfig.iosched | 10 +++++++++ - block/Makefile | 1 + - block/bfq-cgroup-included.c | 4 ++-- - block/bfq-mq-iosched.c | 25 ++++++++++++----------- - block/bfq-sched.c | 50 ++++++++++++++++++++++----------------------- - block/bfq-sq-iosched.c | 24 +++++++++++----------- - block/bfq.h | 36 +++++++++++++++++++++----------- - 8 files changed, 88 insertions(+), 64 deletions(-) - -diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched -index 9e3f4c2f7390..2d94af3d8b0a 100644 ---- a/block/Kconfig.iosched -+++ b/block/Kconfig.iosched -@@ -96,6 +96,16 @@ config DEFAULT_IOSCHED - default "bfq-sq" if DEFAULT_BFQ_SQ - default "noop" if DEFAULT_NOOP - -+config MQ_IOSCHED_BFQ -+ tristate "BFQ-MQ I/O Scheduler" -+ default y -+ ---help--- -+ BFQ I/O scheduler for BLK-MQ. BFQ-MQ distributes bandwidth -+ among all processes according to their weights, regardless of -+ the device parameters and with any workload. It also -+ guarantees a low latency to interactive and soft real-time -+ applications. Details in Documentation/block/bfq-iosched.txt -+ - config MQ_IOSCHED_DEADLINE - tristate "MQ deadline I/O scheduler" - default y -diff --git a/block/Makefile b/block/Makefile -index 59026b425791..a571329c23f0 100644 ---- a/block/Makefile -+++ b/block/Makefile -@@ -25,6 +25,7 @@ obj-$(CONFIG_MQ_IOSCHED_KYBER) += kyber-iosched.o - bfq-y := bfq-iosched.o bfq-wf2q.o bfq-cgroup.o - obj-$(CONFIG_IOSCHED_BFQ) += bfq.o - obj-$(CONFIG_IOSCHED_BFQ_SQ) += bfq-sq-iosched.o -+obj-$(CONFIG_MQ_IOSCHED_BFQ) += bfq-mq-iosched.o - - obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o - obj-$(CONFIG_BLK_CMDLINE_PARSER) += cmdline-parser.o -diff --git a/block/bfq-cgroup-included.c b/block/bfq-cgroup-included.c -index af7c216a3540..9c483b658179 100644 ---- a/block/bfq-cgroup-included.c -+++ b/block/bfq-cgroup-included.c -@@ -15,7 +15,7 @@ - * file. - */ - --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - - /* bfqg stats flags */ - enum bfqg_stats_flags { -@@ -1116,7 +1116,7 @@ static struct cftype bfq_blkg_files[] = { - {} /* terminate */ - }; - --#else /* CONFIG_BFQ_SQ_GROUP_IOSCHED */ -+#else /* BFQ_GROUP_IOSCHED_ENABLED */ - - static inline void bfqg_stats_update_io_add(struct bfq_group *bfqg, - struct bfq_queue *bfqq, unsigned int op) { } -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index 30d019fc67e0..e88e00f1e0a7 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -82,6 +82,7 @@ - #include - #include - #include "blk.h" -+#undef CONFIG_BFQ_GROUP_IOSCHED /* cgroups support not yet functional */ - #include "bfq.h" - - /* Expiration time of sync (0) and async (1) requests, in ns. */ -@@ -387,7 +388,7 @@ static bool bfq_differentiated_weights(struct bfq_data *bfqd) - return (!RB_EMPTY_ROOT(&bfqd->queue_weights_tree) && - (bfqd->queue_weights_tree.rb_node->rb_left || - bfqd->queue_weights_tree.rb_node->rb_right) --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - ) || - (!RB_EMPTY_ROOT(&bfqd->group_weights_tree) && - (bfqd->group_weights_tree.rb_node->rb_left || -@@ -1672,7 +1673,7 @@ static void bfq_merged_request(struct request_queue *q, struct request *req, - } - } - --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - static void bfq_bio_merged(struct request_queue *q, struct request *req, - struct bio *bio) - { -@@ -3879,7 +3880,7 @@ static int bfq_dispatch_requests(struct request_queue *q, int force) - */ - static void bfq_put_queue(struct bfq_queue *bfqq) - { --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - struct bfq_group *bfqg = bfqq_group(bfqq); - #endif - -@@ -3909,7 +3910,7 @@ static void bfq_put_queue(struct bfq_queue *bfqq) - bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p freed", bfqq); - - kmem_cache_free(bfq_pool, bfqq); --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - bfqg_put(bfqg); - #endif - } -@@ -4835,7 +4836,7 @@ static void bfq_exit_queue(struct elevator_queue *e) - - BUG_ON(hrtimer_active(&bfqd->idle_slice_timer)); - --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - blkcg_deactivate_policy(q, &blkcg_policy_bfq); - #else - bfq_put_async_queues(bfqd, bfqd->root_group); -@@ -4850,7 +4851,7 @@ static void bfq_init_root_group(struct bfq_group *root_group, - { - int i; - --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - root_group->entity.parent = NULL; - root_group->my_entity = NULL; - root_group->bfqd = bfqd; -@@ -5265,7 +5266,7 @@ static struct elevator_type iosched_bfq = { - .elevator_merge_fn = bfq_merge, - .elevator_merged_fn = bfq_merged_request, - .elevator_merge_req_fn = bfq_merged_requests, --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - .elevator_bio_merged_fn = bfq_bio_merged, - #endif - .elevator_allow_bio_merge_fn = bfq_allow_bio_merge, -@@ -5292,7 +5293,7 @@ static struct elevator_type iosched_bfq = { - .elevator_owner = THIS_MODULE, - }; - --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - static struct blkcg_policy blkcg_policy_bfq = { - .dfl_cftypes = bfq_blkg_files, - .legacy_cftypes = bfq_blkcg_legacy_files, -@@ -5315,7 +5316,7 @@ static int __init bfq_init(void) - int ret; - char msg[60] = "BFQ I/O-scheduler: v8r12"; - --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - ret = blkcg_policy_register(&blkcg_policy_bfq); - if (ret) - return ret; -@@ -5362,7 +5363,7 @@ static int __init bfq_init(void) - if (ret) - goto err_pol_unreg; - --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - strcat(msg, " (with cgroups support)"); - #endif - pr_info("%s", msg); -@@ -5370,7 +5371,7 @@ static int __init bfq_init(void) - return 0; - - err_pol_unreg: --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - blkcg_policy_unregister(&blkcg_policy_bfq); - #endif - return ret; -@@ -5379,7 +5380,7 @@ static int __init bfq_init(void) - static void __exit bfq_exit(void) - { - elv_unregister(&iosched_bfq); --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - blkcg_policy_unregister(&blkcg_policy_bfq); - #endif - bfq_slab_kill(); -diff --git a/block/bfq-sched.c b/block/bfq-sched.c -index 5c0f9290a79c..b54a638186e3 100644 ---- a/block/bfq-sched.c -+++ b/block/bfq-sched.c -@@ -136,7 +136,7 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd, - if (bfqq) - bfq_log_bfqq(bfqq->bfqd, bfqq, - "update_next_in_service: chosen this queue"); --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - else { - struct bfq_group *bfqg = - container_of(next_in_service, -@@ -149,7 +149,7 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd, - return parent_sched_may_change; - } - --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - /* both next loops stop at one of the child entities of the root group */ - #define for_each_entity(entity) \ - for (; entity ; entity = entity->parent) -@@ -243,7 +243,7 @@ static bool bfq_no_longer_next_in_service(struct bfq_entity *entity) - return false; - } - --#else /* CONFIG_BFQ_SQ_GROUP_IOSCHED */ -+#else /* BFQ_GROUP_IOSCHED_ENABLED */ - #define for_each_entity(entity) \ - for (; entity ; entity = NULL) - -@@ -260,7 +260,7 @@ static bool bfq_no_longer_next_in_service(struct bfq_entity *entity) - return true; - } - --#endif /* CONFIG_BFQ_SQ_GROUP_IOSCHED */ -+#endif /* BFQ_GROUP_IOSCHED_ENABLED */ - - /* - * Shift for timestamp calculations. This actually limits the maximum -@@ -323,7 +323,7 @@ static void bfq_calc_finish(struct bfq_entity *entity, unsigned long service) - bfq_log_bfqq(bfqq->bfqd, bfqq, - "calc_finish: start %llu, finish %llu, delta %llu", - start, finish, delta); --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - } else { - struct bfq_group *bfqg = - container_of(entity, struct bfq_group, entity); -@@ -473,7 +473,7 @@ static void bfq_update_active_node(struct rb_node *node) - bfq_log_bfqq(bfqq->bfqd, bfqq, - "update_active_node: new min_start %llu", - ((entity->min_start>>10)*1000)>>12); --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - } else { - struct bfq_group *bfqg = - container_of(entity, struct bfq_group, entity); -@@ -540,7 +540,7 @@ static void bfq_active_insert(struct bfq_service_tree *st, - { - struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); - struct rb_node *node = &entity->rb_node; --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - struct bfq_sched_data *sd = NULL; - struct bfq_group *bfqg = NULL; - struct bfq_data *bfqd = NULL; -@@ -555,7 +555,7 @@ static void bfq_active_insert(struct bfq_service_tree *st, - - bfq_update_active_tree(node); - --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - sd = entity->sched_data; - bfqg = container_of(sd, struct bfq_group, sched_data); - BUG_ON(!bfqg); -@@ -563,7 +563,7 @@ static void bfq_active_insert(struct bfq_service_tree *st, - #endif - if (bfqq) - list_add(&bfqq->bfqq_list, &bfqq->bfqd->active_list); --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - else { /* bfq_group */ - BUG_ON(!bfqd); - bfq_weights_tree_add(bfqd, entity, &bfqd->group_weights_tree); -@@ -652,7 +652,7 @@ static void bfq_active_extract(struct bfq_service_tree *st, - { - struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); - struct rb_node *node; --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - struct bfq_sched_data *sd = NULL; - struct bfq_group *bfqg = NULL; - struct bfq_data *bfqd = NULL; -@@ -664,7 +664,7 @@ static void bfq_active_extract(struct bfq_service_tree *st, - if (node) - bfq_update_active_tree(node); - --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - sd = entity->sched_data; - bfqg = container_of(sd, struct bfq_group, sched_data); - BUG_ON(!bfqg); -@@ -672,7 +672,7 @@ static void bfq_active_extract(struct bfq_service_tree *st, - #endif - if (bfqq) - list_del(&bfqq->bfqq_list); --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - else { /* bfq_group */ - BUG_ON(!bfqd); - bfq_weights_tree_remove(bfqd, entity, -@@ -809,14 +809,14 @@ __bfq_entity_update_weight_prio(struct bfq_service_tree *old_st, - unsigned int prev_weight, new_weight; - struct bfq_data *bfqd = NULL; - struct rb_root *root; --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - struct bfq_sched_data *sd; - struct bfq_group *bfqg; - #endif - - if (bfqq) - bfqd = bfqq->bfqd; --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - else { - sd = entity->my_sched_data; - bfqg = container_of(sd, struct bfq_group, sched_data); -@@ -907,7 +907,7 @@ __bfq_entity_update_weight_prio(struct bfq_service_tree *old_st, - return new_st; - } - --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - static void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg); - #endif - -@@ -936,7 +936,7 @@ static void bfq_bfqq_served(struct bfq_queue *bfqq, int served) - st->vtime += bfq_delta(served, st->wsum); - bfq_forget_idle(st); - } --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - bfqg_stats_set_start_empty_time(bfqq_group(bfqq)); - #endif - st = bfq_entity_service_tree(&bfqq->entity); -@@ -1060,7 +1060,7 @@ static void bfq_update_fin_time_enqueue(struct bfq_entity *entity, - bfq_log_bfqq(bfqq->bfqd, bfqq, - "__activate_entity: new queue finish %llu", - ((entity->finish>>10)*1000)>>12); --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - } else { - struct bfq_group *bfqg = - container_of(entity, struct bfq_group, entity); -@@ -1078,7 +1078,7 @@ static void bfq_update_fin_time_enqueue(struct bfq_entity *entity, - bfq_log_bfqq(bfqq->bfqd, bfqq, - "__activate_entity: queue %seligible in st %p", - entity->start <= st->vtime ? "" : "non ", st); --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - } else { - struct bfq_group *bfqg = - container_of(entity, struct bfq_group, entity); -@@ -1153,7 +1153,7 @@ static void __bfq_activate_entity(struct bfq_entity *entity, - - BUG_ON(entity->on_st && bfqq); - --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - if (entity->on_st && !bfqq) { - struct bfq_group *bfqg = - container_of(entity, struct bfq_group, -@@ -1485,7 +1485,7 @@ static void bfq_deactivate_entity(struct bfq_entity *entity, - if (bfqq) - bfq_log_bfqq(bfqq->bfqd, bfqq, - "invoking udpdate_next for this queue"); --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - else { - struct bfq_group *bfqg = - container_of(entity, -@@ -1525,7 +1525,7 @@ static u64 bfq_calc_vtime_jump(struct bfq_service_tree *st) - bfq_log_bfqq(bfqq->bfqd, bfqq, - "calc_vtime_jump: new value %llu", - root_entity->min_start); --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - else { - struct bfq_group *bfqg = - container_of(root_entity, struct bfq_group, -@@ -1661,7 +1661,7 @@ __bfq_lookup_next_entity(struct bfq_service_tree *st, bool in_service - "__lookup_next: start %llu vtime %llu st %p", - ((entity->start>>10)*1000)>>12, - ((new_vtime>>10)*1000)>>12, st); --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - else { - struct bfq_group *bfqg = - container_of(entity, struct bfq_group, entity); -@@ -1735,7 +1735,7 @@ static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd) - if (bfqq) - bfq_log_bfqq(bfqq->bfqd, bfqq, "chosen from st %p %d", - st + class_idx, class_idx); --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - else { - struct bfq_group *bfqg = - container_of(entity, struct bfq_group, entity); -@@ -1777,7 +1777,7 @@ static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd) - */ - sd = &bfqd->root_group->sched_data; - for (; sd ; sd = entity->my_sched_data) { --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - if (entity) { - struct bfq_group *bfqg = - container_of(entity, struct bfq_group, entity); -@@ -1867,7 +1867,7 @@ static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd) - bfq_log_bfqq(bfqd, bfqq, - "get_next_queue: this queue, finish %llu", - (((entity->finish>>10)*1000)>>10)>>2); --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - else { - struct bfq_group *bfqg = - container_of(entity, struct bfq_group, entity); -diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c -index 30d019fc67e0..25da0d1c0622 100644 ---- a/block/bfq-sq-iosched.c -+++ b/block/bfq-sq-iosched.c -@@ -387,7 +387,7 @@ static bool bfq_differentiated_weights(struct bfq_data *bfqd) - return (!RB_EMPTY_ROOT(&bfqd->queue_weights_tree) && - (bfqd->queue_weights_tree.rb_node->rb_left || - bfqd->queue_weights_tree.rb_node->rb_right) --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - ) || - (!RB_EMPTY_ROOT(&bfqd->group_weights_tree) && - (bfqd->group_weights_tree.rb_node->rb_left || -@@ -1672,7 +1672,7 @@ static void bfq_merged_request(struct request_queue *q, struct request *req, - } - } - --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - static void bfq_bio_merged(struct request_queue *q, struct request *req, - struct bio *bio) - { -@@ -3879,7 +3879,7 @@ static int bfq_dispatch_requests(struct request_queue *q, int force) - */ - static void bfq_put_queue(struct bfq_queue *bfqq) - { --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - struct bfq_group *bfqg = bfqq_group(bfqq); - #endif - -@@ -3909,7 +3909,7 @@ static void bfq_put_queue(struct bfq_queue *bfqq) - bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p freed", bfqq); - - kmem_cache_free(bfq_pool, bfqq); --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - bfqg_put(bfqg); - #endif - } -@@ -4835,7 +4835,7 @@ static void bfq_exit_queue(struct elevator_queue *e) - - BUG_ON(hrtimer_active(&bfqd->idle_slice_timer)); - --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - blkcg_deactivate_policy(q, &blkcg_policy_bfq); - #else - bfq_put_async_queues(bfqd, bfqd->root_group); -@@ -4850,7 +4850,7 @@ static void bfq_init_root_group(struct bfq_group *root_group, - { - int i; - --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - root_group->entity.parent = NULL; - root_group->my_entity = NULL; - root_group->bfqd = bfqd; -@@ -5265,7 +5265,7 @@ static struct elevator_type iosched_bfq = { - .elevator_merge_fn = bfq_merge, - .elevator_merged_fn = bfq_merged_request, - .elevator_merge_req_fn = bfq_merged_requests, --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - .elevator_bio_merged_fn = bfq_bio_merged, - #endif - .elevator_allow_bio_merge_fn = bfq_allow_bio_merge, -@@ -5292,7 +5292,7 @@ static struct elevator_type iosched_bfq = { - .elevator_owner = THIS_MODULE, - }; - --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - static struct blkcg_policy blkcg_policy_bfq = { - .dfl_cftypes = bfq_blkg_files, - .legacy_cftypes = bfq_blkcg_legacy_files, -@@ -5315,7 +5315,7 @@ static int __init bfq_init(void) - int ret; - char msg[60] = "BFQ I/O-scheduler: v8r12"; - --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - ret = blkcg_policy_register(&blkcg_policy_bfq); - if (ret) - return ret; -@@ -5362,7 +5362,7 @@ static int __init bfq_init(void) - if (ret) - goto err_pol_unreg; - --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - strcat(msg, " (with cgroups support)"); - #endif - pr_info("%s", msg); -@@ -5370,7 +5370,7 @@ static int __init bfq_init(void) - return 0; - - err_pol_unreg: --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - blkcg_policy_unregister(&blkcg_policy_bfq); - #endif - return ret; -@@ -5379,7 +5379,7 @@ static int __init bfq_init(void) - static void __exit bfq_exit(void) - { - elv_unregister(&iosched_bfq); --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - blkcg_policy_unregister(&blkcg_policy_bfq); - #endif - bfq_slab_kill(); -diff --git a/block/bfq.h b/block/bfq.h -index 34fc4697fd89..53954d1b87f8 100644 ---- a/block/bfq.h -+++ b/block/bfq.h -@@ -19,6 +19,18 @@ - #include - #include - -+/* -+ * Define an alternative macro to compile cgroups support. This is one -+ * of the steps needed to let bfq-mq share the files bfq-sched.c and -+ * bfq-cgroup.c with bfq-sq. For bfq-mq, the macro -+ * BFQ_GROUP_IOSCHED_ENABLED will be defined as a function of whether -+ * the configuration option CONFIG_BFQ_MQ_GROUP_IOSCHED, and not -+ * CONFIG_BFQ_GROUP_IOSCHED, is defined. -+ */ -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#define BFQ_GROUP_IOSCHED_ENABLED -+#endif -+ - #define BFQ_IOPRIO_CLASSES 3 - #define BFQ_CL_IDLE_TIMEOUT (HZ/5) - -@@ -344,7 +356,7 @@ struct bfq_io_cq { - struct bfq_ttime ttime; - /* per (request_queue, blkcg) ioprio */ - int ioprio; --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - uint64_t blkcg_serial_nr; /* the current blkcg serial */ - #endif - -@@ -671,7 +683,7 @@ static const char *checked_dev_name(const struct device *dev) - return nodev; - } - --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - static struct bfq_group *bfqq_group(struct bfq_queue *bfqq); - static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg); - -@@ -696,7 +708,7 @@ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg); - __pbuf, ##args); \ - } while (0) - --#else /* CONFIG_BFQ_SQ_GROUP_IOSCHED */ -+#else /* BFQ_GROUP_IOSCHED_ENABLED */ - - #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \ - pr_crit("%s bfq%d%c " fmt "\n", \ -@@ -705,7 +717,7 @@ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg); - ##args) - #define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do {} while (0) - --#endif /* CONFIG_BFQ_SQ_GROUP_IOSCHED */ -+#endif /* BFQ_GROUP_IOSCHED_ENABLED */ - - #define bfq_log(bfqd, fmt, args...) \ - pr_crit("%s bfq " fmt "\n", \ -@@ -713,7 +725,7 @@ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg); - ##args) - - #else /* CONFIG_BFQ_REDIRECT_TO_CONSOLE */ --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - static struct bfq_group *bfqq_group(struct bfq_queue *bfqq); - static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg); - -@@ -735,7 +747,7 @@ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg); - blk_add_trace_msg((bfqd)->queue, "%s " fmt, __pbuf, ##args); \ - } while (0) - --#else /* CONFIG_BFQ_SQ_GROUP_IOSCHED */ -+#else /* BFQ_GROUP_IOSCHED_ENABLED */ - - #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \ - blk_add_trace_msg((bfqd)->queue, "bfq%d%c " fmt, (bfqq)->pid, \ -@@ -743,7 +755,7 @@ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg); - ##args) - #define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do {} while (0) - --#endif /* CONFIG_BFQ_SQ_GROUP_IOSCHED */ -+#endif /* BFQ_GROUP_IOSCHED_ENABLED */ - - #define bfq_log(bfqd, fmt, args...) \ - blk_add_trace_msg((bfqd)->queue, "bfq " fmt, ##args) -@@ -763,7 +775,7 @@ enum bfqq_expiration { - - - struct bfqg_stats { --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - /* number of ios merged */ - struct blkg_rwstat merged; - /* total time spent on device in ns, may not be accurate w/ queueing */ -@@ -794,7 +806,7 @@ struct bfqg_stats { - #endif - }; - --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - /* - * struct bfq_group_data - per-blkcg storage for the blkio subsystem. - * -@@ -895,7 +907,7 @@ bfq_entity_service_tree(struct bfq_entity *entity) - bfq_log_bfqq(bfqq->bfqd, bfqq, - "entity_service_tree %p %d", - sched_data->service_tree + idx, idx); --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - else { - struct bfq_group *bfqg = - container_of(entity, struct bfq_group, entity); -@@ -924,7 +936,7 @@ static struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic) - return bic->icq.q->elevator->elevator_data; - } - --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - - static struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq) - { -@@ -953,7 +965,7 @@ static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd, - struct bfq_io_cq *bic); - static void bfq_end_wr_async_queues(struct bfq_data *bfqd, - struct bfq_group *bfqg); --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg); - #endif - static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq); - -From add91dbd756cf8ca3aa3add9a19eef742d5fca6b Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Fri, 20 Jan 2017 09:18:25 +0100 -Subject: [PATCH 08/51] Increase max policies for io controller - -To let bfq-mq policy be plugged too (however cgroups -suppport is not yet functional in bfq-mq). - -Signed-off-by: Paolo Valente ---- - include/linux/blkdev.h | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h -index bf000c58644b..10f892ca585d 100644 ---- a/include/linux/blkdev.h -+++ b/include/linux/blkdev.h -@@ -54,7 +54,7 @@ struct blk_stat_callback; - * Maximum number of blkcg policies allowed to be registered concurrently. - * Defined here to simplify include dependency. - */ --#define BLKCG_MAX_POLS 4 -+#define BLKCG_MAX_POLS 5 - - typedef void (rq_end_io_fn)(struct request *, blk_status_t); - - -From 2c39a1d9ab4516d44e01e96f19f578b927e7f2e9 Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Mon, 19 Dec 2016 18:11:33 +0100 -Subject: [PATCH 09/51] Copy header file bfq.h as bfq-mq.h - -This commit introduces the header file bfq-mq.h, that will play -for bfq-mq-iosched.c the same role that bfq.h plays for bfq-iosched.c. - -For the moment, the file bfq-mq.h is just a copy of bfq.h. - -Signed-off-by: Paolo Valente ---- - block/bfq-mq-iosched.c | 2 +- - block/bfq-mq.h | 973 +++++++++++++++++++++++++++++++++++++++++++++++++ - 2 files changed, 974 insertions(+), 1 deletion(-) - create mode 100644 block/bfq-mq.h - -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index e88e00f1e0a7..d1125aee658c 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -83,7 +83,7 @@ - #include - #include "blk.h" - #undef CONFIG_BFQ_GROUP_IOSCHED /* cgroups support not yet functional */ --#include "bfq.h" -+#include "bfq-mq.h" - - /* Expiration time of sync (0) and async (1) requests, in ns. */ - static const u64 bfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 }; -diff --git a/block/bfq-mq.h b/block/bfq-mq.h -new file mode 100644 -index 000000000000..53954d1b87f8 ---- /dev/null -+++ b/block/bfq-mq.h -@@ -0,0 +1,973 @@ -+/* -+ * BFQ v8r12 for 4.11.0: data structures and common functions prototypes. -+ * -+ * Based on ideas and code from CFQ: -+ * Copyright (C) 2003 Jens Axboe -+ * -+ * Copyright (C) 2008 Fabio Checconi -+ * Paolo Valente -+ * -+ * Copyright (C) 2015 Paolo Valente -+ * -+ * Copyright (C) 2017 Paolo Valente -+ */ -+ -+#ifndef _BFQ_H -+#define _BFQ_H -+ -+#include -+#include -+#include -+ -+/* -+ * Define an alternative macro to compile cgroups support. This is one -+ * of the steps needed to let bfq-mq share the files bfq-sched.c and -+ * bfq-cgroup.c with bfq-sq. For bfq-mq, the macro -+ * BFQ_GROUP_IOSCHED_ENABLED will be defined as a function of whether -+ * the configuration option CONFIG_BFQ_MQ_GROUP_IOSCHED, and not -+ * CONFIG_BFQ_GROUP_IOSCHED, is defined. -+ */ -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#define BFQ_GROUP_IOSCHED_ENABLED -+#endif -+ -+#define BFQ_IOPRIO_CLASSES 3 -+#define BFQ_CL_IDLE_TIMEOUT (HZ/5) -+ -+#define BFQ_MIN_WEIGHT 1 -+#define BFQ_MAX_WEIGHT 1000 -+#define BFQ_WEIGHT_CONVERSION_COEFF 10 -+ -+#define BFQ_DEFAULT_QUEUE_IOPRIO 4 -+ -+#define BFQ_WEIGHT_LEGACY_DFL 100 -+#define BFQ_DEFAULT_GRP_IOPRIO 0 -+#define BFQ_DEFAULT_GRP_CLASS IOPRIO_CLASS_BE -+ -+/* -+ * Soft real-time applications are extremely more latency sensitive -+ * than interactive ones. Over-raise the weight of the former to -+ * privilege them against the latter. -+ */ -+#define BFQ_SOFTRT_WEIGHT_FACTOR 100 -+ -+struct bfq_entity; -+ -+/** -+ * struct bfq_service_tree - per ioprio_class service tree. -+ * -+ * Each service tree represents a B-WF2Q+ scheduler on its own. Each -+ * ioprio_class has its own independent scheduler, and so its own -+ * bfq_service_tree. All the fields are protected by the queue lock -+ * of the containing bfqd. -+ */ -+struct bfq_service_tree { -+ /* tree for active entities (i.e., those backlogged) */ -+ struct rb_root active; -+ /* tree for idle entities (i.e., not backlogged, with V <= F_i)*/ -+ struct rb_root idle; -+ -+ struct bfq_entity *first_idle; /* idle entity with minimum F_i */ -+ struct bfq_entity *last_idle; /* idle entity with maximum F_i */ -+ -+ u64 vtime; /* scheduler virtual time */ -+ /* scheduler weight sum; active and idle entities contribute to it */ -+ unsigned long wsum; -+}; -+ -+/** -+ * struct bfq_sched_data - multi-class scheduler. -+ * -+ * bfq_sched_data is the basic scheduler queue. It supports three -+ * ioprio_classes, and can be used either as a toplevel queue or as an -+ * intermediate queue in a hierarchical setup. -+ * -+ * The supported ioprio_classes are the same as in CFQ, in descending -+ * priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE. -+ * Requests from higher priority queues are served before all the -+ * requests from lower priority queues; among requests of the same -+ * queue requests are served according to B-WF2Q+. -+ * -+ * The schedule is implemented by the service trees, plus the field -+ * @next_in_service, which points to the entity on the active trees -+ * that will be served next, if 1) no changes in the schedule occurs -+ * before the current in-service entity is expired, 2) the in-service -+ * queue becomes idle when it expires, and 3) if the entity pointed by -+ * in_service_entity is not a queue, then the in-service child entity -+ * of the entity pointed by in_service_entity becomes idle on -+ * expiration. This peculiar definition allows for the following -+ * optimization, not yet exploited: while a given entity is still in -+ * service, we already know which is the best candidate for next -+ * service among the other active entitities in the same parent -+ * entity. We can then quickly compare the timestamps of the -+ * in-service entity with those of such best candidate. -+ * -+ * All the fields are protected by the queue lock of the containing -+ * bfqd. -+ */ -+struct bfq_sched_data { -+ struct bfq_entity *in_service_entity; /* entity in service */ -+ /* head-of-the-line entity in the scheduler (see comments above) */ -+ struct bfq_entity *next_in_service; -+ /* array of service trees, one per ioprio_class */ -+ struct bfq_service_tree service_tree[BFQ_IOPRIO_CLASSES]; -+ /* last time CLASS_IDLE was served */ -+ unsigned long bfq_class_idle_last_service; -+ -+}; -+ -+/** -+ * struct bfq_weight_counter - counter of the number of all active entities -+ * with a given weight. -+ */ -+struct bfq_weight_counter { -+ unsigned int weight; /* weight of the entities this counter refers to */ -+ unsigned int num_active; /* nr of active entities with this weight */ -+ /* -+ * Weights tree member (see bfq_data's @queue_weights_tree and -+ * @group_weights_tree) -+ */ -+ struct rb_node weights_node; -+}; -+ -+/** -+ * struct bfq_entity - schedulable entity. -+ * -+ * A bfq_entity is used to represent either a bfq_queue (leaf node in the -+ * cgroup hierarchy) or a bfq_group into the upper level scheduler. Each -+ * entity belongs to the sched_data of the parent group in the cgroup -+ * hierarchy. Non-leaf entities have also their own sched_data, stored -+ * in @my_sched_data. -+ * -+ * Each entity stores independently its priority values; this would -+ * allow different weights on different devices, but this -+ * functionality is not exported to userspace by now. Priorities and -+ * weights are updated lazily, first storing the new values into the -+ * new_* fields, then setting the @prio_changed flag. As soon as -+ * there is a transition in the entity state that allows the priority -+ * update to take place the effective and the requested priority -+ * values are synchronized. -+ * -+ * Unless cgroups are used, the weight value is calculated from the -+ * ioprio to export the same interface as CFQ. When dealing with -+ * ``well-behaved'' queues (i.e., queues that do not spend too much -+ * time to consume their budget and have true sequential behavior, and -+ * when there are no external factors breaking anticipation) the -+ * relative weights at each level of the cgroups hierarchy should be -+ * guaranteed. All the fields are protected by the queue lock of the -+ * containing bfqd. -+ */ -+struct bfq_entity { -+ struct rb_node rb_node; /* service_tree member */ -+ /* pointer to the weight counter associated with this entity */ -+ struct bfq_weight_counter *weight_counter; -+ -+ /* -+ * Flag, true if the entity is on a tree (either the active or -+ * the idle one of its service_tree) or is in service. -+ */ -+ bool on_st; -+ -+ u64 finish; /* B-WF2Q+ finish timestamp (aka F_i) */ -+ u64 start; /* B-WF2Q+ start timestamp (aka S_i) */ -+ -+ /* tree the entity is enqueued into; %NULL if not on a tree */ -+ struct rb_root *tree; -+ -+ /* -+ * minimum start time of the (active) subtree rooted at this -+ * entity; used for O(log N) lookups into active trees -+ */ -+ u64 min_start; -+ -+ /* amount of service received during the last service slot */ -+ int service; -+ -+ /* budget, used also to calculate F_i: F_i = S_i + @budget / @weight */ -+ int budget; -+ -+ unsigned int weight; /* weight of the queue */ -+ unsigned int new_weight; /* next weight if a change is in progress */ -+ -+ /* original weight, used to implement weight boosting */ -+ unsigned int orig_weight; -+ -+ /* parent entity, for hierarchical scheduling */ -+ struct bfq_entity *parent; -+ -+ /* -+ * For non-leaf nodes in the hierarchy, the associated -+ * scheduler queue, %NULL on leaf nodes. -+ */ -+ struct bfq_sched_data *my_sched_data; -+ /* the scheduler queue this entity belongs to */ -+ struct bfq_sched_data *sched_data; -+ -+ /* flag, set to request a weight, ioprio or ioprio_class change */ -+ int prio_changed; -+}; -+ -+struct bfq_group; -+ -+/** -+ * struct bfq_queue - leaf schedulable entity. -+ * -+ * A bfq_queue is a leaf request queue; it can be associated with an -+ * io_context or more, if it is async or shared between cooperating -+ * processes. @cgroup holds a reference to the cgroup, to be sure that it -+ * does not disappear while a bfqq still references it (mostly to avoid -+ * races between request issuing and task migration followed by cgroup -+ * destruction). -+ * All the fields are protected by the queue lock of the containing bfqd. -+ */ -+struct bfq_queue { -+ /* reference counter */ -+ int ref; -+ /* parent bfq_data */ -+ struct bfq_data *bfqd; -+ -+ /* current ioprio and ioprio class */ -+ unsigned short ioprio, ioprio_class; -+ /* next ioprio and ioprio class if a change is in progress */ -+ unsigned short new_ioprio, new_ioprio_class; -+ -+ /* -+ * Shared bfq_queue if queue is cooperating with one or more -+ * other queues. -+ */ -+ struct bfq_queue *new_bfqq; -+ /* request-position tree member (see bfq_group's @rq_pos_tree) */ -+ struct rb_node pos_node; -+ /* request-position tree root (see bfq_group's @rq_pos_tree) */ -+ struct rb_root *pos_root; -+ -+ /* sorted list of pending requests */ -+ struct rb_root sort_list; -+ /* if fifo isn't expired, next request to serve */ -+ struct request *next_rq; -+ /* number of sync and async requests queued */ -+ int queued[2]; -+ /* number of sync and async requests currently allocated */ -+ int allocated[2]; -+ /* number of pending metadata requests */ -+ int meta_pending; -+ /* fifo list of requests in sort_list */ -+ struct list_head fifo; -+ -+ /* entity representing this queue in the scheduler */ -+ struct bfq_entity entity; -+ -+ /* maximum budget allowed from the feedback mechanism */ -+ int max_budget; -+ /* budget expiration (in jiffies) */ -+ unsigned long budget_timeout; -+ -+ /* number of requests on the dispatch list or inside driver */ -+ int dispatched; -+ -+ unsigned int flags; /* status flags.*/ -+ -+ /* node for active/idle bfqq list inside parent bfqd */ -+ struct list_head bfqq_list; -+ -+ /* bit vector: a 1 for each seeky requests in history */ -+ u32 seek_history; -+ -+ /* node for the device's burst list */ -+ struct hlist_node burst_list_node; -+ -+ /* position of the last request enqueued */ -+ sector_t last_request_pos; -+ -+ /* Number of consecutive pairs of request completion and -+ * arrival, such that the queue becomes idle after the -+ * completion, but the next request arrives within an idle -+ * time slice; used only if the queue's IO_bound flag has been -+ * cleared. -+ */ -+ unsigned int requests_within_timer; -+ -+ /* pid of the process owning the queue, used for logging purposes */ -+ pid_t pid; -+ -+ /* -+ * Pointer to the bfq_io_cq owning the bfq_queue, set to %NULL -+ * if the queue is shared. -+ */ -+ struct bfq_io_cq *bic; -+ -+ /* current maximum weight-raising time for this queue */ -+ unsigned long wr_cur_max_time; -+ /* -+ * Minimum time instant such that, only if a new request is -+ * enqueued after this time instant in an idle @bfq_queue with -+ * no outstanding requests, then the task associated with the -+ * queue it is deemed as soft real-time (see the comments on -+ * the function bfq_bfqq_softrt_next_start()) -+ */ -+ unsigned long soft_rt_next_start; -+ /* -+ * Start time of the current weight-raising period if -+ * the @bfq-queue is being weight-raised, otherwise -+ * finish time of the last weight-raising period. -+ */ -+ unsigned long last_wr_start_finish; -+ /* factor by which the weight of this queue is multiplied */ -+ unsigned int wr_coeff; -+ /* -+ * Time of the last transition of the @bfq_queue from idle to -+ * backlogged. -+ */ -+ unsigned long last_idle_bklogged; -+ /* -+ * Cumulative service received from the @bfq_queue since the -+ * last transition from idle to backlogged. -+ */ -+ unsigned long service_from_backlogged; -+ /* -+ * Value of wr start time when switching to soft rt -+ */ -+ unsigned long wr_start_at_switch_to_srt; -+ -+ unsigned long split_time; /* time of last split */ -+}; -+ -+/** -+ * struct bfq_ttime - per process thinktime stats. -+ */ -+struct bfq_ttime { -+ u64 last_end_request; /* completion time of last request */ -+ -+ u64 ttime_total; /* total process thinktime */ -+ unsigned long ttime_samples; /* number of thinktime samples */ -+ u64 ttime_mean; /* average process thinktime */ -+ -+}; -+ -+/** -+ * struct bfq_io_cq - per (request_queue, io_context) structure. -+ */ -+struct bfq_io_cq { -+ /* associated io_cq structure */ -+ struct io_cq icq; /* must be the first member */ -+ /* array of two process queues, the sync and the async */ -+ struct bfq_queue *bfqq[2]; -+ /* associated @bfq_ttime struct */ -+ struct bfq_ttime ttime; -+ /* per (request_queue, blkcg) ioprio */ -+ int ioprio; -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ uint64_t blkcg_serial_nr; /* the current blkcg serial */ -+#endif -+ -+ /* -+ * Snapshot of the has_short_time flag before merging; taken -+ * to remember its value while the queue is merged, so as to -+ * be able to restore it in case of split. -+ */ -+ bool saved_has_short_ttime; -+ /* -+ * Same purpose as the previous two fields for the I/O bound -+ * classification of a queue. -+ */ -+ bool saved_IO_bound; -+ -+ /* -+ * Same purpose as the previous fields for the value of the -+ * field keeping the queue's belonging to a large burst -+ */ -+ bool saved_in_large_burst; -+ /* -+ * True if the queue belonged to a burst list before its merge -+ * with another cooperating queue. -+ */ -+ bool was_in_burst_list; -+ -+ /* -+ * Similar to previous fields: save wr information. -+ */ -+ unsigned long saved_wr_coeff; -+ unsigned long saved_last_wr_start_finish; -+ unsigned long saved_wr_start_at_switch_to_srt; -+ unsigned int saved_wr_cur_max_time; -+}; -+ -+enum bfq_device_speed { -+ BFQ_BFQD_FAST, -+ BFQ_BFQD_SLOW, -+}; -+ -+/** -+ * struct bfq_data - per-device data structure. -+ * -+ * All the fields are protected by the @queue lock. -+ */ -+struct bfq_data { -+ /* request queue for the device */ -+ struct request_queue *queue; -+ -+ /* root bfq_group for the device */ -+ struct bfq_group *root_group; -+ -+ /* -+ * rbtree of weight counters of @bfq_queues, sorted by -+ * weight. Used to keep track of whether all @bfq_queues have -+ * the same weight. The tree contains one counter for each -+ * distinct weight associated to some active and not -+ * weight-raised @bfq_queue (see the comments to the functions -+ * bfq_weights_tree_[add|remove] for further details). -+ */ -+ struct rb_root queue_weights_tree; -+ /* -+ * rbtree of non-queue @bfq_entity weight counters, sorted by -+ * weight. Used to keep track of whether all @bfq_groups have -+ * the same weight. The tree contains one counter for each -+ * distinct weight associated to some active @bfq_group (see -+ * the comments to the functions bfq_weights_tree_[add|remove] -+ * for further details). -+ */ -+ struct rb_root group_weights_tree; -+ -+ /* -+ * Number of bfq_queues containing requests (including the -+ * queue in service, even if it is idling). -+ */ -+ int busy_queues; -+ /* number of weight-raised busy @bfq_queues */ -+ int wr_busy_queues; -+ /* number of queued requests */ -+ int queued; -+ /* number of requests dispatched and waiting for completion */ -+ int rq_in_driver; -+ -+ /* -+ * Maximum number of requests in driver in the last -+ * @hw_tag_samples completed requests. -+ */ -+ int max_rq_in_driver; -+ /* number of samples used to calculate hw_tag */ -+ int hw_tag_samples; -+ /* flag set to one if the driver is showing a queueing behavior */ -+ int hw_tag; -+ -+ /* number of budgets assigned */ -+ int budgets_assigned; -+ -+ /* -+ * Timer set when idling (waiting) for the next request from -+ * the queue in service. -+ */ -+ struct hrtimer idle_slice_timer; -+ /* delayed work to restart dispatching on the request queue */ -+ struct work_struct unplug_work; -+ -+ /* bfq_queue in service */ -+ struct bfq_queue *in_service_queue; -+ /* bfq_io_cq (bic) associated with the @in_service_queue */ -+ struct bfq_io_cq *in_service_bic; -+ -+ /* on-disk position of the last served request */ -+ sector_t last_position; -+ -+ /* time of last request completion (ns) */ -+ u64 last_completion; -+ -+ /* time of first rq dispatch in current observation interval (ns) */ -+ u64 first_dispatch; -+ /* time of last rq dispatch in current observation interval (ns) */ -+ u64 last_dispatch; -+ -+ /* beginning of the last budget */ -+ ktime_t last_budget_start; -+ /* beginning of the last idle slice */ -+ ktime_t last_idling_start; -+ -+ /* number of samples in current observation interval */ -+ int peak_rate_samples; -+ /* num of samples of seq dispatches in current observation interval */ -+ u32 sequential_samples; -+ /* total num of sectors transferred in current observation interval */ -+ u64 tot_sectors_dispatched; -+ /* max rq size seen during current observation interval (sectors) */ -+ u32 last_rq_max_size; -+ /* time elapsed from first dispatch in current observ. interval (us) */ -+ u64 delta_from_first; -+ /* current estimate of device peak rate */ -+ u32 peak_rate; -+ -+ /* maximum budget allotted to a bfq_queue before rescheduling */ -+ int bfq_max_budget; -+ -+ /* list of all the bfq_queues active on the device */ -+ struct list_head active_list; -+ /* list of all the bfq_queues idle on the device */ -+ struct list_head idle_list; -+ -+ /* -+ * Timeout for async/sync requests; when it fires, requests -+ * are served in fifo order. -+ */ -+ u64 bfq_fifo_expire[2]; -+ /* weight of backward seeks wrt forward ones */ -+ unsigned int bfq_back_penalty; -+ /* maximum allowed backward seek */ -+ unsigned int bfq_back_max; -+ /* maximum idling time */ -+ u32 bfq_slice_idle; -+ -+ /* user-configured max budget value (0 for auto-tuning) */ -+ int bfq_user_max_budget; -+ /* -+ * Timeout for bfq_queues to consume their budget; used to -+ * prevent seeky queues from imposing long latencies to -+ * sequential or quasi-sequential ones (this also implies that -+ * seeky queues cannot receive guarantees in the service -+ * domain; after a timeout they are charged for the time they -+ * have been in service, to preserve fairness among them, but -+ * without service-domain guarantees). -+ */ -+ unsigned int bfq_timeout; -+ -+ /* -+ * Number of consecutive requests that must be issued within -+ * the idle time slice to set again idling to a queue which -+ * was marked as non-I/O-bound (see the definition of the -+ * IO_bound flag for further details). -+ */ -+ unsigned int bfq_requests_within_timer; -+ -+ /* -+ * Force device idling whenever needed to provide accurate -+ * service guarantees, without caring about throughput -+ * issues. CAVEAT: this may even increase latencies, in case -+ * of useless idling for processes that did stop doing I/O. -+ */ -+ bool strict_guarantees; -+ -+ /* -+ * Last time at which a queue entered the current burst of -+ * queues being activated shortly after each other; for more -+ * details about this and the following parameters related to -+ * a burst of activations, see the comments on the function -+ * bfq_handle_burst. -+ */ -+ unsigned long last_ins_in_burst; -+ /* -+ * Reference time interval used to decide whether a queue has -+ * been activated shortly after @last_ins_in_burst. -+ */ -+ unsigned long bfq_burst_interval; -+ /* number of queues in the current burst of queue activations */ -+ int burst_size; -+ -+ /* common parent entity for the queues in the burst */ -+ struct bfq_entity *burst_parent_entity; -+ /* Maximum burst size above which the current queue-activation -+ * burst is deemed as 'large'. -+ */ -+ unsigned long bfq_large_burst_thresh; -+ /* true if a large queue-activation burst is in progress */ -+ bool large_burst; -+ /* -+ * Head of the burst list (as for the above fields, more -+ * details in the comments on the function bfq_handle_burst). -+ */ -+ struct hlist_head burst_list; -+ -+ /* if set to true, low-latency heuristics are enabled */ -+ bool low_latency; -+ /* -+ * Maximum factor by which the weight of a weight-raised queue -+ * is multiplied. -+ */ -+ unsigned int bfq_wr_coeff; -+ /* maximum duration of a weight-raising period (jiffies) */ -+ unsigned int bfq_wr_max_time; -+ -+ /* Maximum weight-raising duration for soft real-time processes */ -+ unsigned int bfq_wr_rt_max_time; -+ /* -+ * Minimum idle period after which weight-raising may be -+ * reactivated for a queue (in jiffies). -+ */ -+ unsigned int bfq_wr_min_idle_time; -+ /* -+ * Minimum period between request arrivals after which -+ * weight-raising may be reactivated for an already busy async -+ * queue (in jiffies). -+ */ -+ unsigned long bfq_wr_min_inter_arr_async; -+ -+ /* Max service-rate for a soft real-time queue, in sectors/sec */ -+ unsigned int bfq_wr_max_softrt_rate; -+ /* -+ * Cached value of the product R*T, used for computing the -+ * maximum duration of weight raising automatically. -+ */ -+ u64 RT_prod; -+ /* device-speed class for the low-latency heuristic */ -+ enum bfq_device_speed device_speed; -+ -+ /* fallback dummy bfqq for extreme OOM conditions */ -+ struct bfq_queue oom_bfqq; -+}; -+ -+enum bfqq_state_flags { -+ BFQ_BFQQ_FLAG_just_created = 0, /* queue just allocated */ -+ BFQ_BFQQ_FLAG_busy, /* has requests or is in service */ -+ BFQ_BFQQ_FLAG_wait_request, /* waiting for a request */ -+ BFQ_BFQQ_FLAG_non_blocking_wait_rq, /* -+ * waiting for a request -+ * without idling the device -+ */ -+ BFQ_BFQQ_FLAG_must_alloc, /* must be allowed rq alloc */ -+ BFQ_BFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */ -+ BFQ_BFQQ_FLAG_has_short_ttime, /* queue has a short think time */ -+ BFQ_BFQQ_FLAG_sync, /* synchronous queue */ -+ BFQ_BFQQ_FLAG_IO_bound, /* -+ * bfqq has timed-out at least once -+ * having consumed at most 2/10 of -+ * its budget -+ */ -+ BFQ_BFQQ_FLAG_in_large_burst, /* -+ * bfqq activated in a large burst, -+ * see comments to bfq_handle_burst. -+ */ -+ BFQ_BFQQ_FLAG_softrt_update, /* -+ * may need softrt-next-start -+ * update -+ */ -+ BFQ_BFQQ_FLAG_coop, /* bfqq is shared */ -+ BFQ_BFQQ_FLAG_split_coop /* shared bfqq will be split */ -+}; -+ -+#define BFQ_BFQQ_FNS(name) \ -+static void bfq_mark_bfqq_##name(struct bfq_queue *bfqq) \ -+{ \ -+ (bfqq)->flags |= (1 << BFQ_BFQQ_FLAG_##name); \ -+} \ -+static void bfq_clear_bfqq_##name(struct bfq_queue *bfqq) \ -+{ \ -+ (bfqq)->flags &= ~(1 << BFQ_BFQQ_FLAG_##name); \ -+} \ -+static int bfq_bfqq_##name(const struct bfq_queue *bfqq) \ -+{ \ -+ return ((bfqq)->flags & (1 << BFQ_BFQQ_FLAG_##name)) != 0; \ -+} -+ -+BFQ_BFQQ_FNS(just_created); -+BFQ_BFQQ_FNS(busy); -+BFQ_BFQQ_FNS(wait_request); -+BFQ_BFQQ_FNS(non_blocking_wait_rq); -+BFQ_BFQQ_FNS(must_alloc); -+BFQ_BFQQ_FNS(fifo_expire); -+BFQ_BFQQ_FNS(has_short_ttime); -+BFQ_BFQQ_FNS(sync); -+BFQ_BFQQ_FNS(IO_bound); -+BFQ_BFQQ_FNS(in_large_burst); -+BFQ_BFQQ_FNS(coop); -+BFQ_BFQQ_FNS(split_coop); -+BFQ_BFQQ_FNS(softrt_update); -+#undef BFQ_BFQQ_FNS -+ -+/* Logging facilities. */ -+#ifdef CONFIG_BFQ_REDIRECT_TO_CONSOLE -+ -+static const char *checked_dev_name(const struct device *dev) -+{ -+ static const char nodev[] = "nodev"; -+ -+ if (dev) -+ return dev_name(dev); -+ -+ return nodev; -+} -+ -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+static struct bfq_group *bfqq_group(struct bfq_queue *bfqq); -+static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg); -+ -+#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \ -+ char __pbuf[128]; \ -+ \ -+ assert_spin_locked((bfqd)->queue->queue_lock); \ -+ blkg_path(bfqg_to_blkg(bfqq_group(bfqq)), __pbuf, sizeof(__pbuf)); \ -+ pr_crit("%s bfq%d%c %s " fmt "\n", \ -+ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \ -+ (bfqq)->pid, \ -+ bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \ -+ __pbuf, ##args); \ -+} while (0) -+ -+#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \ -+ char __pbuf[128]; \ -+ \ -+ blkg_path(bfqg_to_blkg(bfqg), __pbuf, sizeof(__pbuf)); \ -+ pr_crit("%s %s " fmt "\n", \ -+ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \ -+ __pbuf, ##args); \ -+} while (0) -+ -+#else /* BFQ_GROUP_IOSCHED_ENABLED */ -+ -+#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \ -+ pr_crit("%s bfq%d%c " fmt "\n", \ -+ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \ -+ (bfqq)->pid, bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \ -+ ##args) -+#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do {} while (0) -+ -+#endif /* BFQ_GROUP_IOSCHED_ENABLED */ -+ -+#define bfq_log(bfqd, fmt, args...) \ -+ pr_crit("%s bfq " fmt "\n", \ -+ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \ -+ ##args) -+ -+#else /* CONFIG_BFQ_REDIRECT_TO_CONSOLE */ -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+static struct bfq_group *bfqq_group(struct bfq_queue *bfqq); -+static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg); -+ -+#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \ -+ char __pbuf[128]; \ -+ \ -+ assert_spin_locked((bfqd)->queue->queue_lock); \ -+ blkg_path(bfqg_to_blkg(bfqq_group(bfqq)), __pbuf, sizeof(__pbuf)); \ -+ blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s " fmt, \ -+ (bfqq)->pid, \ -+ bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \ -+ __pbuf, ##args); \ -+} while (0) -+ -+#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \ -+ char __pbuf[128]; \ -+ \ -+ blkg_path(bfqg_to_blkg(bfqg), __pbuf, sizeof(__pbuf)); \ -+ blk_add_trace_msg((bfqd)->queue, "%s " fmt, __pbuf, ##args); \ -+} while (0) -+ -+#else /* BFQ_GROUP_IOSCHED_ENABLED */ -+ -+#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \ -+ blk_add_trace_msg((bfqd)->queue, "bfq%d%c " fmt, (bfqq)->pid, \ -+ bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \ -+ ##args) -+#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do {} while (0) -+ -+#endif /* BFQ_GROUP_IOSCHED_ENABLED */ -+ -+#define bfq_log(bfqd, fmt, args...) \ -+ blk_add_trace_msg((bfqd)->queue, "bfq " fmt, ##args) -+#endif /* CONFIG_BFQ_REDIRECT_TO_CONSOLE */ -+ -+/* Expiration reasons. */ -+enum bfqq_expiration { -+ BFQ_BFQQ_TOO_IDLE = 0, /* -+ * queue has been idling for -+ * too long -+ */ -+ BFQ_BFQQ_BUDGET_TIMEOUT, /* budget took too long to be used */ -+ BFQ_BFQQ_BUDGET_EXHAUSTED, /* budget consumed */ -+ BFQ_BFQQ_NO_MORE_REQUESTS, /* the queue has no more requests */ -+ BFQ_BFQQ_PREEMPTED /* preemption in progress */ -+}; -+ -+ -+struct bfqg_stats { -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ /* number of ios merged */ -+ struct blkg_rwstat merged; -+ /* total time spent on device in ns, may not be accurate w/ queueing */ -+ struct blkg_rwstat service_time; -+ /* total time spent waiting in scheduler queue in ns */ -+ struct blkg_rwstat wait_time; -+ /* number of IOs queued up */ -+ struct blkg_rwstat queued; -+ /* total disk time and nr sectors dispatched by this group */ -+ struct blkg_stat time; -+ /* sum of number of ios queued across all samples */ -+ struct blkg_stat avg_queue_size_sum; -+ /* count of samples taken for average */ -+ struct blkg_stat avg_queue_size_samples; -+ /* how many times this group has been removed from service tree */ -+ struct blkg_stat dequeue; -+ /* total time spent waiting for it to be assigned a timeslice. */ -+ struct blkg_stat group_wait_time; -+ /* time spent idling for this blkcg_gq */ -+ struct blkg_stat idle_time; -+ /* total time with empty current active q with other requests queued */ -+ struct blkg_stat empty_time; -+ /* fields after this shouldn't be cleared on stat reset */ -+ uint64_t start_group_wait_time; -+ uint64_t start_idle_time; -+ uint64_t start_empty_time; -+ uint16_t flags; -+#endif -+}; -+ -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+/* -+ * struct bfq_group_data - per-blkcg storage for the blkio subsystem. -+ * -+ * @ps: @blkcg_policy_storage that this structure inherits -+ * @weight: weight of the bfq_group -+ */ -+struct bfq_group_data { -+ /* must be the first member */ -+ struct blkcg_policy_data pd; -+ -+ unsigned int weight; -+}; -+ -+/** -+ * struct bfq_group - per (device, cgroup) data structure. -+ * @entity: schedulable entity to insert into the parent group sched_data. -+ * @sched_data: own sched_data, to contain child entities (they may be -+ * both bfq_queues and bfq_groups). -+ * @bfqd: the bfq_data for the device this group acts upon. -+ * @async_bfqq: array of async queues for all the tasks belonging to -+ * the group, one queue per ioprio value per ioprio_class, -+ * except for the idle class that has only one queue. -+ * @async_idle_bfqq: async queue for the idle class (ioprio is ignored). -+ * @my_entity: pointer to @entity, %NULL for the toplevel group; used -+ * to avoid too many special cases during group creation/ -+ * migration. -+ * @active_entities: number of active entities belonging to the group; -+ * unused for the root group. Used to know whether there -+ * are groups with more than one active @bfq_entity -+ * (see the comments to the function -+ * bfq_bfqq_may_idle()). -+ * @rq_pos_tree: rbtree sorted by next_request position, used when -+ * determining if two or more queues have interleaving -+ * requests (see bfq_find_close_cooperator()). -+ * -+ * Each (device, cgroup) pair has its own bfq_group, i.e., for each cgroup -+ * there is a set of bfq_groups, each one collecting the lower-level -+ * entities belonging to the group that are acting on the same device. -+ * -+ * Locking works as follows: -+ * o @bfqd is protected by the queue lock, RCU is used to access it -+ * from the readers. -+ * o All the other fields are protected by the @bfqd queue lock. -+ */ -+struct bfq_group { -+ /* must be the first member */ -+ struct blkg_policy_data pd; -+ -+ struct bfq_entity entity; -+ struct bfq_sched_data sched_data; -+ -+ void *bfqd; -+ -+ struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR]; -+ struct bfq_queue *async_idle_bfqq; -+ -+ struct bfq_entity *my_entity; -+ -+ int active_entities; -+ -+ struct rb_root rq_pos_tree; -+ -+ struct bfqg_stats stats; -+}; -+ -+#else -+struct bfq_group { -+ struct bfq_sched_data sched_data; -+ -+ struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR]; -+ struct bfq_queue *async_idle_bfqq; -+ -+ struct rb_root rq_pos_tree; -+}; -+#endif -+ -+static struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity); -+ -+static unsigned int bfq_class_idx(struct bfq_entity *entity) -+{ -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); -+ -+ return bfqq ? bfqq->ioprio_class - 1 : -+ BFQ_DEFAULT_GRP_CLASS - 1; -+} -+ -+static struct bfq_service_tree * -+bfq_entity_service_tree(struct bfq_entity *entity) -+{ -+ struct bfq_sched_data *sched_data = entity->sched_data; -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); -+ unsigned int idx = bfq_class_idx(entity); -+ -+ BUG_ON(idx >= BFQ_IOPRIO_CLASSES); -+ BUG_ON(sched_data == NULL); -+ -+ if (bfqq) -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "entity_service_tree %p %d", -+ sched_data->service_tree + idx, idx); -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ else { -+ struct bfq_group *bfqg = -+ container_of(entity, struct bfq_group, entity); -+ -+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg, -+ "entity_service_tree %p %d", -+ sched_data->service_tree + idx, idx); -+ } -+#endif -+ return sched_data->service_tree + idx; -+} -+ -+static struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, bool is_sync) -+{ -+ return bic->bfqq[is_sync]; -+} -+ -+static void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, -+ bool is_sync) -+{ -+ bic->bfqq[is_sync] = bfqq; -+} -+ -+static struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic) -+{ -+ return bic->icq.q->elevator->elevator_data; -+} -+ -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ -+static struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq) -+{ -+ struct bfq_entity *group_entity = bfqq->entity.parent; -+ -+ if (!group_entity) -+ group_entity = &bfqq->bfqd->root_group->entity; -+ -+ return container_of(group_entity, struct bfq_group, entity); -+} -+ -+#else -+ -+static struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq) -+{ -+ return bfqq->bfqd->root_group; -+} -+ -+#endif -+ -+static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio); -+static void bfq_put_queue(struct bfq_queue *bfqq); -+static void bfq_dispatch_insert(struct request_queue *q, struct request *rq); -+static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd, -+ struct bio *bio, bool is_sync, -+ struct bfq_io_cq *bic); -+static void bfq_end_wr_async_queues(struct bfq_data *bfqd, -+ struct bfq_group *bfqg); -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg); -+#endif -+static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq); -+ -+#endif /* _BFQ_H */ - -From 0bd96428e086fd28800efdf5f0a5f62869af6e30 Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Sat, 21 Jan 2017 12:41:14 +0100 -Subject: [PATCH 10/51] Move thinktime from bic to bfqq - -Prep change to make it possible to protect this field with a -scheduler lock. - -Signed-off-by: Paolo Valente ---- - block/bfq-mq-iosched.c | 28 ++++++++++++++-------------- - block/bfq-mq.h | 30 ++++++++++++++++-------------- - 2 files changed, 30 insertions(+), 28 deletions(-) - -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index d1125aee658c..65f5dfb79417 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -698,6 +698,7 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd, - if (unlikely(busy)) - old_wr_coeff = bfqq->wr_coeff; - -+ bfqq->ttime = bic->saved_ttime; - bfqq->wr_coeff = bic->saved_wr_coeff; - bfqq->wr_start_at_switch_to_srt = bic->saved_wr_start_at_switch_to_srt; - BUG_ON(time_is_after_jiffies(bfqq->wr_start_at_switch_to_srt)); -@@ -1287,7 +1288,7 @@ static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd, - * details on the usage of the next variable. - */ - arrived_in_time = ktime_get_ns() <= -- RQ_BIC(rq)->ttime.last_end_request + -+ bfqq->ttime.last_end_request + - bfqd->bfq_slice_idle * 3; - - bfq_log_bfqq(bfqd, bfqq, -@@ -2048,6 +2049,7 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq) - if (!bic) - return; - -+ bic->saved_ttime = bfqq->ttime; - bic->saved_has_short_ttime = bfq_bfqq_has_short_ttime(bfqq); - bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq); - bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq); -@@ -3948,11 +3950,6 @@ static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) - bfq_put_queue(bfqq); /* release process reference */ - } - --static void bfq_init_icq(struct io_cq *icq) --{ -- icq_to_bic(icq)->ttime.last_end_request = ktime_get_ns() - (1ULL<<32); --} -- - static void bfq_exit_icq(struct io_cq *icq) - { - struct bfq_io_cq *bic = icq_to_bic(icq); -@@ -4084,6 +4081,9 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, - bfq_mark_bfqq_just_created(bfqq); - } else - bfq_clear_bfqq_sync(bfqq); -+ -+ bfqq->ttime.last_end_request = ktime_get_ns() - (1ULL<<32); -+ - bfq_mark_bfqq_IO_bound(bfqq); - - /* Tentative initial value to trade off between thr and lat */ -@@ -4191,14 +4191,14 @@ static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd, - } - - static void bfq_update_io_thinktime(struct bfq_data *bfqd, -- struct bfq_io_cq *bic) -+ struct bfq_queue *bfqq) - { -- struct bfq_ttime *ttime = &bic->ttime; -- u64 elapsed = ktime_get_ns() - bic->ttime.last_end_request; -+ struct bfq_ttime *ttime = &bfqq->ttime; -+ u64 elapsed = ktime_get_ns() - bfqq->ttime.last_end_request; - - elapsed = min_t(u64, elapsed, 2 * bfqd->bfq_slice_idle); - -- ttime->ttime_samples = (7*bic->ttime.ttime_samples + 256) / 8; -+ ttime->ttime_samples = (7*bfqq->ttime.ttime_samples + 256) / 8; - ttime->ttime_total = div_u64(7*ttime->ttime_total + 256*elapsed, 8); - ttime->ttime_mean = div64_ul(ttime->ttime_total + 128, - ttime->ttime_samples); -@@ -4240,8 +4240,8 @@ static void bfq_update_has_short_ttime(struct bfq_data *bfqd, - * decide whether to mark as has_short_ttime - */ - if (atomic_read(&bic->icq.ioc->active_ref) == 0 || -- (bfq_sample_valid(bic->ttime.ttime_samples) && -- bic->ttime.ttime_mean > bfqd->bfq_slice_idle)) -+ (bfq_sample_valid(bfqq->ttime.ttime_samples) && -+ bfqq->ttime.ttime_mean > bfqd->bfq_slice_idle)) - has_short_ttime = false; - - bfq_log_bfqq(bfqd, bfqq, "update_has_short_ttime: has_short_ttime %d", -@@ -4265,7 +4265,7 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, - if (rq->cmd_flags & REQ_META) - bfqq->meta_pending++; - -- bfq_update_io_thinktime(bfqd, bic); -+ bfq_update_io_thinktime(bfqd, bfqq); - bfq_update_has_short_ttime(bfqd, bfqq, bic); - bfq_update_io_seektime(bfqd, bfqq, rq); - -@@ -4436,7 +4436,7 @@ static void bfq_completed_request(struct request_queue *q, struct request *rq) - - now_ns = ktime_get_ns(); - -- RQ_BIC(rq)->ttime.last_end_request = now_ns; -+ bfqq->ttime.last_end_request = now_ns; - - /* - * Using us instead of ns, to get a reasonable precision in -diff --git a/block/bfq-mq.h b/block/bfq-mq.h -index 53954d1b87f8..0f51f270469c 100644 ---- a/block/bfq-mq.h -+++ b/block/bfq-mq.h -@@ -210,6 +210,18 @@ struct bfq_entity { - struct bfq_group; - - /** -+ * struct bfq_ttime - per process thinktime stats. -+ */ -+struct bfq_ttime { -+ u64 last_end_request; /* completion time of last request */ -+ -+ u64 ttime_total; /* total process thinktime */ -+ unsigned long ttime_samples; /* number of thinktime samples */ -+ u64 ttime_mean; /* average process thinktime */ -+ -+}; -+ -+/** - * struct bfq_queue - leaf schedulable entity. - * - * A bfq_queue is a leaf request queue; it can be associated with an -@@ -270,6 +282,9 @@ struct bfq_queue { - /* node for active/idle bfqq list inside parent bfqd */ - struct list_head bfqq_list; - -+ /* associated @bfq_ttime struct */ -+ struct bfq_ttime ttime; -+ - /* bit vector: a 1 for each seeky requests in history */ - u32 seek_history; - -@@ -333,18 +348,6 @@ struct bfq_queue { - }; - - /** -- * struct bfq_ttime - per process thinktime stats. -- */ --struct bfq_ttime { -- u64 last_end_request; /* completion time of last request */ -- -- u64 ttime_total; /* total process thinktime */ -- unsigned long ttime_samples; /* number of thinktime samples */ -- u64 ttime_mean; /* average process thinktime */ -- --}; -- --/** - * struct bfq_io_cq - per (request_queue, io_context) structure. - */ - struct bfq_io_cq { -@@ -352,8 +355,6 @@ struct bfq_io_cq { - struct io_cq icq; /* must be the first member */ - /* array of two process queues, the sync and the async */ - struct bfq_queue *bfqq[2]; -- /* associated @bfq_ttime struct */ -- struct bfq_ttime ttime; - /* per (request_queue, blkcg) ioprio */ - int ioprio; - #ifdef BFQ_GROUP_IOSCHED_ENABLED -@@ -390,6 +391,7 @@ struct bfq_io_cq { - unsigned long saved_last_wr_start_finish; - unsigned long saved_wr_start_at_switch_to_srt; - unsigned int saved_wr_cur_max_time; -+ struct bfq_ttime saved_ttime; - }; - - enum bfq_device_speed { - -From 351a9aea7c0c9c30edacdbf2a3c0d089470de1e8 Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Wed, 18 Jan 2017 11:42:22 +0100 -Subject: [PATCH 11/51] Embed bfq-ioc.c and add locking on request queue - -The version of bfq-ioc.c for bfq-iosched.c is not correct any more for -bfq-mq, because, in bfq-mq, the request queue lock is not being held -when bfq_bic_lookup is invoked. That function must then take that look -on its own. This commit removes the inclusion of bfq-ioc.c, copies the -content of bfq-ioc.c into bfq-mq-iosched.c, and adds the grabbing of -the lock. - -Signed-off-by: Paolo Valente ---- - block/bfq-mq-iosched.c | 39 ++++++++++++++++++++++++++++++++++++--- - 1 file changed, 36 insertions(+), 3 deletions(-) - -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index 65f5dfb79417..756a618d5902 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -195,7 +195,39 @@ static int device_speed_thresh[2]; - - static void bfq_schedule_dispatch(struct bfq_data *bfqd); - --#include "bfq-ioc.c" -+/** -+ * icq_to_bic - convert iocontext queue structure to bfq_io_cq. -+ * @icq: the iocontext queue. -+ */ -+static struct bfq_io_cq *icq_to_bic(struct io_cq *icq) -+{ -+ /* bic->icq is the first member, %NULL will convert to %NULL */ -+ return container_of(icq, struct bfq_io_cq, icq); -+} -+ -+/** -+ * bfq_bic_lookup - search into @ioc a bic associated to @bfqd. -+ * @bfqd: the lookup key. -+ * @ioc: the io_context of the process doing I/O. -+ * @q: the request queue. -+ */ -+static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd, -+ struct io_context *ioc, -+ struct request_queue *q) -+{ -+ if (ioc) { -+ struct bfq_io_cq *icq; -+ -+ spin_lock_irq(q->queue_lock); -+ icq = icq_to_bic(ioc_lookup_icq(ioc, q)); -+ spin_unlock_irq(q->queue_lock); -+ -+ return icq; -+ } -+ -+ return NULL; -+} -+ - #include "bfq-sched.c" - #include "bfq-cgroup-included.c" - -@@ -1520,13 +1552,14 @@ static void bfq_add_request(struct request *rq) - } - - static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd, -- struct bio *bio) -+ struct bio *bio, -+ struct request_queue *q) - { - struct task_struct *tsk = current; - struct bfq_io_cq *bic; - struct bfq_queue *bfqq; - -- bic = bfq_bic_lookup(bfqd, tsk->io_context); -+ bic = bfq_bic_lookup(bfqd, tsk->io_context, q); - if (!bic) - return NULL; - - -From ed0d64e27b2308813a2a846139e405e0479f0849 Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Tue, 20 Dec 2016 09:07:19 +0100 -Subject: [PATCH 12/51] Modify interface and operation to comply with - blk-mq-sched - -As for modifications of the operation, the major changes are the introduction -of a scheduler lock, and the moving to deferred work of the body of the hook -exit_icq. The latter change has been made to avoid deadlocks caused by the -combination of the following facts: 1) such a body takes the scheduler lock, -and, if not deferred, 2) it does so from inside the exit_icq hook, which is -invoked with the queue lock held, and 3) there is at least one code path, -namely that starting from bfq_bio_merge, which takes these locks in the -opposite order. - -Signed-off-by: Paolo Valente ---- - block/bfq-cgroup-included.c | 4 - - block/bfq-mq-iosched.c | 695 ++++++++++++++++++++++++-------------------- - block/bfq-mq.h | 35 +-- - 3 files changed, 394 insertions(+), 340 deletions(-) - -diff --git a/block/bfq-cgroup-included.c b/block/bfq-cgroup-included.c -index 9c483b658179..8a73de76f32b 100644 ---- a/block/bfq-cgroup-included.c -+++ b/block/bfq-cgroup-included.c -@@ -472,8 +472,6 @@ static struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, - struct bfq_group *bfqg, *parent; - struct bfq_entity *entity; - -- assert_spin_locked(bfqd->queue->queue_lock); -- - bfqg = bfq_lookup_bfqg(bfqd, blkcg); - - if (unlikely(!bfqg)) -@@ -602,8 +600,6 @@ static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd, - struct bfq_group *bfqg; - struct bfq_entity *entity; - -- lockdep_assert_held(bfqd->queue->queue_lock); -- - bfqg = bfq_find_set_group(bfqd, blkcg); - - if (unlikely(!bfqg)) -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index 756a618d5902..c963d92a32c2 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -81,7 +81,13 @@ - #include - #include - #include -+#include -+#include -+ - #include "blk.h" -+#include "blk-mq.h" -+#include "blk-mq-tag.h" -+#include "blk-mq-sched.h" - #undef CONFIG_BFQ_GROUP_IOSCHED /* cgroups support not yet functional */ - #include "bfq-mq.h" - -@@ -193,8 +199,6 @@ static int device_speed_thresh[2]; - #define RQ_BIC(rq) ((struct bfq_io_cq *) (rq)->elv.priv[0]) - #define RQ_BFQQ(rq) ((rq)->elv.priv[1]) - --static void bfq_schedule_dispatch(struct bfq_data *bfqd); -- - /** - * icq_to_bic - convert iocontext queue structure to bfq_io_cq. - * @icq: the iocontext queue. -@@ -216,11 +220,12 @@ static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd, - struct request_queue *q) - { - if (ioc) { -+ unsigned long flags; - struct bfq_io_cq *icq; - -- spin_lock_irq(q->queue_lock); -+ spin_lock_irqsave(q->queue_lock, flags); - icq = icq_to_bic(ioc_lookup_icq(ioc, q)); -- spin_unlock_irq(q->queue_lock); -+ spin_unlock_irqrestore(q->queue_lock, flags); - - return icq; - } -@@ -244,7 +249,7 @@ static void bfq_schedule_dispatch(struct bfq_data *bfqd) - { - if (bfqd->queued != 0) { - bfq_log(bfqd, "schedule dispatch"); -- kblockd_schedule_work(&bfqd->unplug_work); -+ blk_mq_run_hw_queues(bfqd->queue, true); - } - } - -@@ -768,9 +773,7 @@ static int bfqq_process_refs(struct bfq_queue *bfqq) - { - int process_refs, io_refs; - -- lockdep_assert_held(bfqq->bfqd->queue->queue_lock); -- -- io_refs = bfqq->allocated[READ] + bfqq->allocated[WRITE]; -+ io_refs = bfqq->allocated; - process_refs = bfqq->ref - io_refs - bfqq->entity.on_st; - BUG_ON(process_refs < 0); - return process_refs; -@@ -1584,6 +1587,7 @@ static sector_t get_sdist(sector_t last_pos, struct request *rq) - return sdist; - } - -+#if 0 /* Still not clear if we can do without next two functions */ - static void bfq_activate_request(struct request_queue *q, struct request *rq) - { - struct bfq_data *bfqd = q->elevator->elevator_data; -@@ -1597,8 +1601,10 @@ static void bfq_deactivate_request(struct request_queue *q, struct request *rq) - BUG_ON(bfqd->rq_in_driver == 0); - bfqd->rq_in_driver--; - } -+#endif - --static void bfq_remove_request(struct request *rq) -+static void bfq_remove_request(struct request_queue *q, -+ struct request *rq) - { - struct bfq_queue *bfqq = RQ_BFQQ(rq); - struct bfq_data *bfqd = bfqq->bfqd; -@@ -1619,6 +1625,10 @@ static void bfq_remove_request(struct request *rq) - bfqd->queued--; - elv_rb_del(&bfqq->sort_list, rq); - -+ elv_rqhash_del(q, rq); -+ if (q->last_merge == rq) -+ q->last_merge = NULL; -+ - if (RB_EMPTY_ROOT(&bfqq->sort_list)) { - bfqq->next_rq = NULL; - -@@ -1659,13 +1669,36 @@ static void bfq_remove_request(struct request *rq) - bfqg_stats_update_io_remove(bfqq_group(bfqq), rq->cmd_flags); - } - --static enum elv_merge bfq_merge(struct request_queue *q, struct request **req, -- struct bio *bio) -+static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio) -+{ -+ struct request_queue *q = hctx->queue; -+ struct bfq_data *bfqd = q->elevator->elevator_data; -+ struct request *free = NULL; -+ bool ret; -+ -+ spin_lock_irq(&bfqd->lock); -+ ret = blk_mq_sched_try_merge(q, bio, &free); -+ -+ /* -+ * XXX Not yet freeing without lock held, to avoid an -+ * inconsistency with respect to the lock-protected invocation -+ * of blk_mq_sched_try_insert_merge in bfq_bio_merge. Waiting -+ * for clarifications from Jens. -+ */ -+ if (free) -+ blk_mq_free_request(free); -+ spin_unlock_irq(&bfqd->lock); -+ -+ return ret; -+} -+ -+static int bfq_request_merge(struct request_queue *q, struct request **req, -+ struct bio *bio) - { - struct bfq_data *bfqd = q->elevator->elevator_data; - struct request *__rq; - -- __rq = bfq_find_rq_fmerge(bfqd, bio); -+ __rq = bfq_find_rq_fmerge(bfqd, bio, q); - if (__rq && elv_bio_merge_ok(__rq, bio)) { - *req = __rq; - return ELEVATOR_FRONT_MERGE; -@@ -1674,7 +1707,7 @@ static enum elv_merge bfq_merge(struct request_queue *q, struct request **req, - return ELEVATOR_NO_MERGE; - } - --static void bfq_merged_request(struct request_queue *q, struct request *req, -+static void bfq_request_merged(struct request_queue *q, struct request *req, - enum elv_merge type) - { - if (type == ELEVATOR_FRONT_MERGE && -@@ -1689,6 +1722,8 @@ static void bfq_merged_request(struct request_queue *q, struct request *req, - /* Reposition request in its sort_list */ - elv_rb_del(&bfqq->sort_list, req); - elv_rb_add(&bfqq->sort_list, req); -+ -+ spin_lock_irq(&bfqd->lock); - /* Choose next request to be served for bfqq */ - prev = bfqq->next_rq; - next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req, -@@ -1704,22 +1739,19 @@ static void bfq_merged_request(struct request_queue *q, struct request *req, - bfq_updated_next_req(bfqd, bfqq); - bfq_pos_tree_add_move(bfqd, bfqq); - } -+ spin_unlock_irq(&bfqd->lock); - } - } - --#ifdef BFQ_GROUP_IOSCHED_ENABLED --static void bfq_bio_merged(struct request_queue *q, struct request *req, -- struct bio *bio) --{ -- bfqg_stats_update_io_merged(bfqq_group(RQ_BFQQ(req)), bio->bi_opf); --} --#endif -- --static void bfq_merged_requests(struct request_queue *q, struct request *rq, -+static void bfq_requests_merged(struct request_queue *q, struct request *rq, - struct request *next) - { - struct bfq_queue *bfqq = RQ_BFQQ(rq), *next_bfqq = RQ_BFQQ(next); - -+ if (!RB_EMPTY_NODE(&rq->rb_node)) -+ goto end; -+ spin_lock_irq(&bfqq->bfqd->lock); -+ - /* - * If next and rq belong to the same bfq_queue and next is older - * than rq, then reposition rq in the fifo (by substituting next -@@ -1740,7 +1772,10 @@ static void bfq_merged_requests(struct request_queue *q, struct request *rq, - if (bfqq->next_rq == next) - bfqq->next_rq = rq; - -- bfq_remove_request(next); -+ bfq_remove_request(q, next); -+ -+ spin_unlock_irq(&bfqq->bfqd->lock); -+end: - bfqg_stats_update_io_merged(bfqq_group(bfqq), next->cmd_flags); - } - -@@ -1786,7 +1821,7 @@ static void bfq_end_wr(struct bfq_data *bfqd) - { - struct bfq_queue *bfqq; - -- spin_lock_irq(bfqd->queue->queue_lock); -+ spin_lock_irq(&bfqd->lock); - - list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list) - bfq_bfqq_end_wr(bfqq); -@@ -1794,7 +1829,7 @@ static void bfq_end_wr(struct bfq_data *bfqd) - bfq_bfqq_end_wr(bfqq); - bfq_end_wr_async(bfqd); - -- spin_unlock_irq(bfqd->queue->queue_lock); -+ spin_unlock_irq(&bfqd->lock); - } - - static sector_t bfq_io_struct_pos(void *io_struct, bool request) -@@ -2184,8 +2219,8 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic, - bfq_put_queue(bfqq); - } - --static int bfq_allow_bio_merge(struct request_queue *q, struct request *rq, -- struct bio *bio) -+static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq, -+ struct bio *bio) - { - struct bfq_data *bfqd = q->elevator->elevator_data; - bool is_sync = op_is_sync(bio->bi_opf); -@@ -2203,7 +2238,7 @@ static int bfq_allow_bio_merge(struct request_queue *q, struct request *rq, - * merge only if rq is queued there. - * Queue lock is held here. - */ -- bic = bfq_bic_lookup(bfqd, current->io_context); -+ bic = bfq_bic_lookup(bfqd, current->io_context, q); - if (!bic) - return false; - -@@ -2228,12 +2263,6 @@ static int bfq_allow_bio_merge(struct request_queue *q, struct request *rq, - return bfqq == RQ_BFQQ(rq); - } - --static int bfq_allow_rq_merge(struct request_queue *q, struct request *rq, -- struct request *next) --{ -- return RQ_BFQQ(rq) == RQ_BFQQ(next); --} -- - /* - * Set the maximum time for the in-service queue to consume its - * budget. This prevents seeky processes from lowering the throughput. -@@ -2264,7 +2293,6 @@ static void __bfq_set_in_service_queue(struct bfq_data *bfqd, - { - if (bfqq) { - bfqg_stats_update_avg_queue_size(bfqq_group(bfqq)); -- bfq_mark_bfqq_must_alloc(bfqq); - bfq_clear_bfqq_fifo_expire(bfqq); - - bfqd->budgets_assigned = (bfqd->budgets_assigned*7 + 256) / 8; -@@ -2703,27 +2731,28 @@ static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq) - } - - /* -- * Move request from internal lists to the dispatch list of the request queue -+ * Remove request from internal lists. - */ --static void bfq_dispatch_insert(struct request_queue *q, struct request *rq) -+static void bfq_dispatch_remove(struct request_queue *q, struct request *rq) - { - struct bfq_queue *bfqq = RQ_BFQQ(rq); - - /* -- * For consistency, the next instruction should have been executed -- * after removing the request from the queue and dispatching it. -- * We execute instead this instruction before bfq_remove_request() -- * (and hence introduce a temporary inconsistency), for efficiency. -- * In fact, in a forced_dispatch, this prevents two counters related -- * to bfqq->dispatched to risk to be uselessly decremented if bfqq -- * is not in service, and then to be incremented again after -- * incrementing bfqq->dispatched. -+ * For consistency, the next instruction should have been -+ * executed after removing the request from the queue and -+ * dispatching it. We execute instead this instruction before -+ * bfq_remove_request() (and hence introduce a temporary -+ * inconsistency), for efficiency. In fact, should this -+ * dispatch occur for a non in-service bfqq, this anticipated -+ * increment prevents two counters related to bfqq->dispatched -+ * from risking to be, first, uselessly decremented, and then -+ * incremented again when the (new) value of bfqq->dispatched -+ * happens to be taken into account. - */ - bfqq->dispatched++; - bfq_update_peak_rate(q->elevator->elevator_data, rq); - -- bfq_remove_request(rq); -- elv_dispatch_sort(q, rq); -+ bfq_remove_request(q, rq); - } - - static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq) -@@ -3605,7 +3634,7 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd) - bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue"); - - if (bfq_may_expire_for_budg_timeout(bfqq) && -- !hrtimer_active(&bfqd->idle_slice_timer) && -+ !bfq_bfqq_wait_request(bfqq) && - !bfq_bfqq_must_idle(bfqq)) - goto expire; - -@@ -3641,7 +3670,6 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd) - * arrives. - */ - if (bfq_bfqq_wait_request(bfqq)) { -- BUG_ON(!hrtimer_active(&bfqd->idle_slice_timer)); - /* - * If we get here: 1) at least a new request - * has arrived but we have not disabled the -@@ -3668,7 +3696,7 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd) - * for a new request, or has requests waiting for a completion and - * may idle after their completion, then keep it anyway. - */ -- if (hrtimer_active(&bfqd->idle_slice_timer) || -+ if (bfq_bfqq_wait_request(bfqq) || - (bfqq->dispatched != 0 && bfq_bfqq_may_idle(bfqq))) { - bfqq = NULL; - goto keep_queue; -@@ -3753,13 +3781,11 @@ static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq) - } - - /* -- * Dispatch one request from bfqq, moving it to the request queue -- * dispatch list. -+ * Dispatch next request from bfqq. - */ --static int bfq_dispatch_request(struct bfq_data *bfqd, -- struct bfq_queue *bfqq) -+static struct request *bfq_dispatch_rq_from_bfqq(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq) - { -- int dispatched = 0; - struct request *rq = bfqq->next_rq; - unsigned long service_to_charge; - -@@ -3775,7 +3801,7 @@ static int bfq_dispatch_request(struct bfq_data *bfqd, - - BUG_ON(bfqq->entity.budget < bfqq->entity.service); - -- bfq_dispatch_insert(bfqd->queue, rq); -+ bfq_dispatch_remove(bfqd->queue, rq); - - /* - * If weight raising has to terminate for bfqq, then next -@@ -3791,86 +3817,61 @@ static int bfq_dispatch_request(struct bfq_data *bfqd, - bfq_update_wr_data(bfqd, bfqq); - - bfq_log_bfqq(bfqd, bfqq, -- "dispatched %u sec req (%llu), budg left %d", -+ "dispatched %u sec req (%llu), budg left %d, new disp_nr %d", - blk_rq_sectors(rq), - (unsigned long long) blk_rq_pos(rq), -- bfq_bfqq_budget_left(bfqq)); -- -- dispatched++; -+ bfq_bfqq_budget_left(bfqq), -+ bfqq->dispatched); - - if (!bfqd->in_service_bic) { - atomic_long_inc(&RQ_BIC(rq)->icq.ioc->refcount); - bfqd->in_service_bic = RQ_BIC(rq); - } - -+ /* -+ * Expire bfqq, pretending that its budget expired, if bfqq -+ * belongs to CLASS_IDLE and other queues are waiting for -+ * service. -+ */ - if (bfqd->busy_queues > 1 && bfq_class_idle(bfqq)) - goto expire; - -- return dispatched; -+ return rq; - - expire: - bfq_bfqq_expire(bfqd, bfqq, false, BFQ_BFQQ_BUDGET_EXHAUSTED); -- return dispatched; --} -- --static int __bfq_forced_dispatch_bfqq(struct bfq_queue *bfqq) --{ -- int dispatched = 0; -- -- while (bfqq->next_rq) { -- bfq_dispatch_insert(bfqq->bfqd->queue, bfqq->next_rq); -- dispatched++; -- } -- -- BUG_ON(!list_empty(&bfqq->fifo)); -- return dispatched; -+ return rq; - } - --/* -- * Drain our current requests. -- * Used for barriers and when switching io schedulers on-the-fly. -- */ --static int bfq_forced_dispatch(struct bfq_data *bfqd) -+static bool bfq_has_work(struct blk_mq_hw_ctx *hctx) - { -- struct bfq_queue *bfqq, *n; -- struct bfq_service_tree *st; -- int dispatched = 0; -- -- bfqq = bfqd->in_service_queue; -- if (bfqq) -- __bfq_bfqq_expire(bfqd, bfqq); -+ struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; - - /* -- * Loop through classes, and be careful to leave the scheduler -- * in a consistent state, as feedback mechanisms and vtime -- * updates cannot be disabled during the process. -+ * Avoiding lock: a race on bfqd->busy_queues should cause at -+ * most a call to dispatch for nothing - */ -- list_for_each_entry_safe(bfqq, n, &bfqd->active_list, bfqq_list) { -- st = bfq_entity_service_tree(&bfqq->entity); -- -- dispatched += __bfq_forced_dispatch_bfqq(bfqq); -- -- bfqq->max_budget = bfq_max_budget(bfqd); -- bfq_forget_idle(st); -- } -- -- BUG_ON(bfqd->busy_queues != 0); -- -- return dispatched; -+ return !list_empty_careful(&bfqd->dispatch) || -+ bfqd->busy_queues > 0; - } - --static int bfq_dispatch_requests(struct request_queue *q, int force) -+static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) - { -- struct bfq_data *bfqd = q->elevator->elevator_data; -- struct bfq_queue *bfqq; -+ struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; -+ struct request *rq = NULL; -+ struct bfq_queue *bfqq = NULL; -+ -+ if (!list_empty(&bfqd->dispatch)) { -+ rq = list_first_entry(&bfqd->dispatch, struct request, -+ queuelist); -+ list_del_init(&rq->queuelist); -+ goto exit; -+ } - - bfq_log(bfqd, "dispatch requests: %d busy queues", bfqd->busy_queues); - - if (bfqd->busy_queues == 0) -- return 0; -- -- if (unlikely(force)) -- return bfq_forced_dispatch(bfqd); -+ goto exit; - - /* - * Force device to serve one request at a time if -@@ -3885,25 +3886,39 @@ static int bfq_dispatch_requests(struct request_queue *q, int force) - * throughput. - */ - if (bfqd->strict_guarantees && bfqd->rq_in_driver > 0) -- return 0; -+ goto exit; - - bfqq = bfq_select_queue(bfqd); - if (!bfqq) -- return 0; -+ goto exit; - - BUG_ON(bfqq->entity.budget < bfqq->entity.service); - - BUG_ON(bfq_bfqq_wait_request(bfqq)); - -- if (!bfq_dispatch_request(bfqd, bfqq)) -- return 0; -- -- bfq_log_bfqq(bfqd, bfqq, "dispatched %s request", -- bfq_bfqq_sync(bfqq) ? "sync" : "async"); -+ rq = bfq_dispatch_rq_from_bfqq(bfqd, bfqq); - - BUG_ON(bfqq->next_rq == NULL && - bfqq->entity.budget < bfqq->entity.service); -- return 1; -+exit: -+ if (rq) { -+ rq->rq_flags |= RQF_STARTED; -+ bfqd->rq_in_driver++; -+ } -+ -+ return rq; -+} -+ -+static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) -+{ -+ struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; -+ struct request *rq; -+ -+ spin_lock_irq(&bfqd->lock); -+ rq = __bfq_dispatch_request(hctx); -+ spin_unlock_irq(&bfqd->lock); -+ -+ return rq; - } - - /* -@@ -3921,13 +3936,14 @@ static void bfq_put_queue(struct bfq_queue *bfqq) - - BUG_ON(bfqq->ref <= 0); - -- bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d", bfqq, bfqq->ref); -+ if (bfqq->bfqd) -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d", bfqq, bfqq->ref); -+ - bfqq->ref--; - if (bfqq->ref) - return; - - BUG_ON(rb_first(&bfqq->sort_list)); -- BUG_ON(bfqq->allocated[READ] + bfqq->allocated[WRITE] != 0); - BUG_ON(bfqq->entity.tree); - BUG_ON(bfq_bfqq_busy(bfqq)); - -@@ -3942,7 +3958,8 @@ static void bfq_put_queue(struct bfq_queue *bfqq) - */ - hlist_del_init(&bfqq->burst_list_node); - -- bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p freed", bfqq); -+ if (bfqq->bfqd) -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p freed", bfqq); - - kmem_cache_free(bfq_pool, bfqq); - #ifdef BFQ_GROUP_IOSCHED_ENABLED -@@ -3983,29 +4000,52 @@ static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) - bfq_put_queue(bfqq); /* release process reference */ - } - --static void bfq_exit_icq(struct io_cq *icq) -+static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync) - { -- struct bfq_io_cq *bic = icq_to_bic(icq); -- struct bfq_data *bfqd = bic_to_bfqd(bic); -+ struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync); -+ struct bfq_data *bfqd; - -- if (bic_to_bfqq(bic, false)) { -- bfq_exit_bfqq(bfqd, bic_to_bfqq(bic, false)); -- bic_set_bfqq(bic, NULL, false); -- } -+ if (bfqq) -+ bfqd = bfqq->bfqd; /* NULL if scheduler already exited */ - -- if (bic_to_bfqq(bic, true)) { -+ if (bfqq && bfqd) { -+ spin_lock_irq(&bfqd->lock); - /* - * If the bic is using a shared queue, put the reference - * taken on the io_context when the bic started using a - * shared bfq_queue. - */ -- if (bfq_bfqq_coop(bic_to_bfqq(bic, true))) -- put_io_context(icq->ioc); -- bfq_exit_bfqq(bfqd, bic_to_bfqq(bic, true)); -- bic_set_bfqq(bic, NULL, true); -+ if (is_sync && bfq_bfqq_coop(bfqq)) -+ put_io_context(bic->icq.ioc); -+ bfq_exit_bfqq(bfqd, bfqq); -+ bic_set_bfqq(bic, NULL, is_sync); -+ spin_unlock_irq(&bfqd->lock); - } - } - -+static void bfq_exit_icq_body(struct work_struct *work) -+{ -+ struct bfq_io_cq *bic = -+ container_of(work, struct bfq_io_cq, exit_icq_work); -+ -+ bfq_exit_icq_bfqq(bic, true); -+ bfq_exit_icq_bfqq(bic, false); -+} -+ -+static void bfq_init_icq(struct io_cq *icq) -+{ -+ struct bfq_io_cq *bic = icq_to_bic(icq); -+ -+ INIT_WORK(&bic->exit_icq_work, bfq_exit_icq_body); -+} -+ -+static void bfq_exit_icq(struct io_cq *icq) -+{ -+ struct bfq_io_cq *bic = icq_to_bic(icq); -+ -+ kblockd_schedule_work(&bic->exit_icq_work); -+} -+ - /* - * Update the entity prio values; note that the new values will not - * be used until the next (re)activation. -@@ -4015,6 +4055,10 @@ static void bfq_set_next_ioprio_data(struct bfq_queue *bfqq, - { - struct task_struct *tsk = current; - int ioprio_class; -+ struct bfq_data *bfqd = bfqq->bfqd; -+ -+ if (!bfqd) -+ return; - - ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio); - switch (ioprio_class) { -@@ -4095,6 +4139,8 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, - INIT_HLIST_NODE(&bfqq->burst_list_node); - BUG_ON(!hlist_unhashed(&bfqq->burst_list_node)); - -+ spin_lock_init(&bfqq->lock); -+ - bfqq->ref = 0; - bfqq->bfqd = bfqd; - -@@ -4351,22 +4397,13 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, - if (budget_timeout) - bfq_bfqq_expire(bfqd, bfqq, false, - BFQ_BFQQ_BUDGET_TIMEOUT); -- -- /* -- * Let the request rip immediately, or let a new queue be -- * selected if bfqq has just been expired. -- */ -- __blk_run_queue(bfqd->queue); - } - } - --static void bfq_insert_request(struct request_queue *q, struct request *rq) -+static void __bfq_insert_request(struct bfq_data *bfqd, struct request *rq) - { -- struct bfq_data *bfqd = q->elevator->elevator_data; - struct bfq_queue *bfqq = RQ_BFQQ(rq), *new_bfqq; - -- assert_spin_locked(bfqd->queue->queue_lock); -- - /* - * An unplug may trigger a requeue of a request from the device - * driver: make sure we are in process context while trying to -@@ -4381,8 +4418,8 @@ static void bfq_insert_request(struct request_queue *q, struct request *rq) - * Release the request's reference to the old bfqq - * and make sure one is taken to the shared queue. - */ -- new_bfqq->allocated[rq_data_dir(rq)]++; -- bfqq->allocated[rq_data_dir(rq)]--; -+ new_bfqq->allocated++; -+ bfqq->allocated--; - new_bfqq->ref++; - bfq_clear_bfqq_just_created(bfqq); - if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq) -@@ -4406,6 +4443,55 @@ static void bfq_insert_request(struct request_queue *q, struct request *rq) - bfq_rq_enqueued(bfqd, bfqq, rq); - } - -+static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, -+ bool at_head) -+{ -+ struct request_queue *q = hctx->queue; -+ struct bfq_data *bfqd = q->elevator->elevator_data; -+ -+ spin_lock_irq(&bfqd->lock); -+ if (blk_mq_sched_try_insert_merge(q, rq)) -+ goto done; -+ spin_unlock_irq(&bfqd->lock); -+ -+ blk_mq_sched_request_inserted(rq); -+ -+ spin_lock_irq(&bfqd->lock); -+ if (at_head || blk_rq_is_passthrough(rq)) { -+ struct bfq_queue *bfqq = RQ_BFQQ(rq); -+ -+ if (at_head) -+ list_add(&rq->queuelist, &bfqd->dispatch); -+ else -+ list_add_tail(&rq->queuelist, &bfqd->dispatch); -+ -+ if (bfqq) -+ bfqq->dispatched++; -+ } else { -+ __bfq_insert_request(bfqd, rq); -+ -+ if (rq_mergeable(rq)) { -+ elv_rqhash_add(q, rq); -+ if (!q->last_merge) -+ q->last_merge = rq; -+ } -+ } -+done: -+ spin_unlock_irq(&bfqd->lock); -+} -+ -+static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx, -+ struct list_head *list, bool at_head) -+{ -+ while (!list_empty(list)) { -+ struct request *rq; -+ -+ rq = list_first_entry(list, struct request, queuelist); -+ list_del_init(&rq->queuelist); -+ bfq_insert_request(hctx, rq, at_head); -+ } -+} -+ - static void bfq_update_hw_tag(struct bfq_data *bfqd) - { - bfqd->max_rq_in_driver = max_t(int, bfqd->max_rq_in_driver, -@@ -4431,27 +4517,17 @@ static void bfq_update_hw_tag(struct bfq_data *bfqd) - bfqd->hw_tag_samples = 0; - } - --static void bfq_completed_request(struct request_queue *q, struct request *rq) -+static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd) - { -- struct bfq_queue *bfqq = RQ_BFQQ(rq); -- struct bfq_data *bfqd = bfqq->bfqd; - u64 now_ns; - u32 delta_us; - -- bfq_log_bfqq(bfqd, bfqq, "completed one req with %u sects left", -- blk_rq_sectors(rq)); -- -- assert_spin_locked(bfqd->queue->queue_lock); - bfq_update_hw_tag(bfqd); - - BUG_ON(!bfqd->rq_in_driver); - BUG_ON(!bfqq->dispatched); - bfqd->rq_in_driver--; - bfqq->dispatched--; -- bfqg_stats_update_completion(bfqq_group(bfqq), -- rq_start_time_ns(rq), -- rq_io_start_time_ns(rq), -- rq->cmd_flags); - - if (!bfqq->dispatched && !bfq_bfqq_busy(bfqq)) { - BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list)); -@@ -4477,7 +4553,8 @@ static void bfq_completed_request(struct request_queue *q, struct request *rq) - */ - delta_us = div_u64(now_ns - bfqd->last_completion, NSEC_PER_USEC); - -- bfq_log(bfqd, "rq_completed: delta %uus/%luus max_size %u rate %llu/%llu", -+ bfq_log_bfqq(bfqd, bfqq, -+ "rq_completed: delta %uus/%luus max_size %u rate %llu/%llu", - delta_us, BFQ_MIN_TT/NSEC_PER_USEC, bfqd->last_rq_max_size, - (USEC_PER_SEC* - (u64)((bfqd->last_rq_max_size<in_service_queue == bfqq) { - if (bfqq->dispatched == 0 && bfq_bfqq_must_idle(bfqq)) { - bfq_arm_slice_timer(bfqd); -- goto out; -+ return; - } else if (bfq_may_expire_for_budg_timeout(bfqq)) - bfq_bfqq_expire(bfqd, bfqq, false, - BFQ_BFQQ_BUDGET_TIMEOUT); -@@ -4537,68 +4614,55 @@ static void bfq_completed_request(struct request_queue *q, struct request *rq) - bfq_bfqq_expire(bfqd, bfqq, false, - BFQ_BFQQ_NO_MORE_REQUESTS); - } -- -- if (!bfqd->rq_in_driver) -- bfq_schedule_dispatch(bfqd); -- --out: -- return; - } - --static int __bfq_may_queue(struct bfq_queue *bfqq) -+static void bfq_put_rq_priv_body(struct bfq_queue *bfqq) - { -- if (bfq_bfqq_wait_request(bfqq) && bfq_bfqq_must_alloc(bfqq)) { -- bfq_clear_bfqq_must_alloc(bfqq); -- return ELV_MQUEUE_MUST; -- } -+ bfqq->allocated--; - -- return ELV_MQUEUE_MAY; -+ bfq_put_queue(bfqq); - } - --static int bfq_may_queue(struct request_queue *q, unsigned int op) -+static void bfq_put_rq_private(struct request_queue *q, struct request *rq) - { -- struct bfq_data *bfqd = q->elevator->elevator_data; -- struct task_struct *tsk = current; -- struct bfq_io_cq *bic; -- struct bfq_queue *bfqq; -- -- /* -- * Don't force setup of a queue from here, as a call to may_queue -- * does not necessarily imply that a request actually will be -- * queued. So just lookup a possibly existing queue, or return -- * 'may queue' if that fails. -- */ -- bic = bfq_bic_lookup(bfqd, tsk->io_context); -- if (!bic) -- return ELV_MQUEUE_MAY; -- -- bfqq = bic_to_bfqq(bic, op_is_sync(op)); -- if (bfqq) -- return __bfq_may_queue(bfqq); -+ struct bfq_queue *bfqq = RQ_BFQQ(rq); -+ struct bfq_data *bfqd = bfqq->bfqd; - -- return ELV_MQUEUE_MAY; --} -+ if (rq->rq_flags & RQF_STARTED) -+ bfqg_stats_update_completion(bfqq_group(bfqq), -+ rq_start_time_ns(rq), -+ rq_io_start_time_ns(rq), -+ rq->cmd_flags); - --/* -- * Queue lock held here. -- */ --static void bfq_put_request(struct request *rq) --{ -- struct bfq_queue *bfqq = RQ_BFQQ(rq); -+ if (likely(rq->rq_flags & RQF_STARTED)) { -+ unsigned long flags; - -- if (bfqq) { -- const int rw = rq_data_dir(rq); -+ spin_lock_irqsave(&bfqd->lock, flags); - -- BUG_ON(!bfqq->allocated[rw]); -- bfqq->allocated[rw]--; -+ bfq_completed_request(bfqq, bfqd); -+ bfq_put_rq_priv_body(bfqq); - -- rq->elv.priv[0] = NULL; -- rq->elv.priv[1] = NULL; -+ spin_unlock_irqrestore(&bfqd->lock, flags); -+ } else { -+ /* -+ * Request rq may be still/already in the scheduler, -+ * in which case we need to remove it. And we cannot -+ * defer such a check and removal, to avoid -+ * inconsistencies in the time interval from the end -+ * of this function to the start of the deferred work. -+ * Fortunately, this situation occurs only in process -+ * context, so taking the scheduler lock does not -+ * cause any deadlock, even if other locks are already -+ * (correctly) held by this process. -+ */ - -- bfq_log_bfqq(bfqq->bfqd, bfqq, "put_request %p, %d", -- bfqq, bfqq->ref); -- bfq_put_queue(bfqq); -+ if (!RB_EMPTY_NODE(&rq->rb_node)) -+ bfq_remove_request(q, rq); -+ bfq_put_rq_priv_body(bfqq); - } -+ -+ rq->elv.priv[0] = NULL; -+ rq->elv.priv[1] = NULL; - } - - /* -@@ -4630,18 +4694,16 @@ bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq) - /* - * Allocate bfq data structures associated with this request. - */ --static int bfq_set_request(struct request_queue *q, struct request *rq, -- struct bio *bio, gfp_t gfp_mask) -+static int bfq_get_rq_private(struct request_queue *q, struct request *rq, -+ struct bio *bio) - { - struct bfq_data *bfqd = q->elevator->elevator_data; - struct bfq_io_cq *bic = icq_to_bic(rq->elv.icq); -- const int rw = rq_data_dir(rq); - const int is_sync = rq_is_sync(rq); - struct bfq_queue *bfqq; -- unsigned long flags; - bool bfqq_already_existing = false, split = false; - -- spin_lock_irqsave(q->queue_lock, flags); -+ spin_lock_irq(&bfqd->lock); - - if (!bic) - goto queue_fail; -@@ -4661,7 +4723,7 @@ static int bfq_set_request(struct request_queue *q, struct request *rq, - bic_set_bfqq(bic, bfqq, is_sync); - if (split && is_sync) { - bfq_log_bfqq(bfqd, bfqq, -- "set_request: was_in_list %d " -+ "get_request: was_in_list %d " - "was_in_large_burst %d " - "large burst in progress %d", - bic->was_in_burst_list, -@@ -4671,12 +4733,12 @@ static int bfq_set_request(struct request_queue *q, struct request *rq, - if ((bic->was_in_burst_list && bfqd->large_burst) || - bic->saved_in_large_burst) { - bfq_log_bfqq(bfqd, bfqq, -- "set_request: marking in " -+ "get_request: marking in " - "large burst"); - bfq_mark_bfqq_in_large_burst(bfqq); - } else { - bfq_log_bfqq(bfqd, bfqq, -- "set_request: clearing in " -+ "get_request: clearing in " - "large burst"); - bfq_clear_bfqq_in_large_burst(bfqq); - if (bic->was_in_burst_list) -@@ -4703,9 +4765,12 @@ static int bfq_set_request(struct request_queue *q, struct request *rq, - } - } - -- bfqq->allocated[rw]++; -+ bfqq->allocated++; -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "get_request: new allocated %d", bfqq->allocated); -+ - bfqq->ref++; -- bfq_log_bfqq(bfqd, bfqq, "set_request: bfqq %p, %d", bfqq, bfqq->ref); -+ bfq_log_bfqq(bfqd, bfqq, "get_request: bfqq %p, %d", bfqq, bfqq->ref); - - rq->elv.priv[0] = bic; - rq->elv.priv[1] = bfqq; -@@ -4733,26 +4798,53 @@ static int bfq_set_request(struct request_queue *q, struct request *rq, - if (unlikely(bfq_bfqq_just_created(bfqq))) - bfq_handle_burst(bfqd, bfqq); - -- spin_unlock_irqrestore(q->queue_lock, flags); -+ spin_unlock_irq(&bfqd->lock); - - return 0; - - queue_fail: -- bfq_schedule_dispatch(bfqd); -- spin_unlock_irqrestore(q->queue_lock, flags); -+ spin_unlock_irq(&bfqd->lock); - - return 1; - } - --static void bfq_kick_queue(struct work_struct *work) -+static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq) - { -- struct bfq_data *bfqd = -- container_of(work, struct bfq_data, unplug_work); -- struct request_queue *q = bfqd->queue; -+ struct bfq_data *bfqd = bfqq->bfqd; -+ enum bfqq_expiration reason; -+ unsigned long flags; -+ -+ spin_lock_irqsave(&bfqd->lock, flags); -+ bfq_clear_bfqq_wait_request(bfqq); - -- spin_lock_irq(q->queue_lock); -- __blk_run_queue(q); -- spin_unlock_irq(q->queue_lock); -+ if (bfqq != bfqd->in_service_queue) { -+ spin_unlock_irqrestore(&bfqd->lock, flags); -+ return; -+ } -+ -+ if (bfq_bfqq_budget_timeout(bfqq)) -+ /* -+ * Also here the queue can be safely expired -+ * for budget timeout without wasting -+ * guarantees -+ */ -+ reason = BFQ_BFQQ_BUDGET_TIMEOUT; -+ else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0) -+ /* -+ * The queue may not be empty upon timer expiration, -+ * because we may not disable the timer when the -+ * first request of the in-service queue arrives -+ * during disk idling. -+ */ -+ reason = BFQ_BFQQ_TOO_IDLE; -+ else -+ goto schedule_dispatch; -+ -+ bfq_bfqq_expire(bfqd, bfqq, true, reason); -+ -+schedule_dispatch: -+ spin_unlock_irqrestore(&bfqd->lock, flags); -+ bfq_schedule_dispatch(bfqd); - } - - /* -@@ -4763,59 +4855,22 @@ static enum hrtimer_restart bfq_idle_slice_timer(struct hrtimer *timer) - { - struct bfq_data *bfqd = container_of(timer, struct bfq_data, - idle_slice_timer); -- struct bfq_queue *bfqq; -- unsigned long flags; -- enum bfqq_expiration reason; -- -- spin_lock_irqsave(bfqd->queue->queue_lock, flags); -+ struct bfq_queue *bfqq = bfqd->in_service_queue; - -- bfqq = bfqd->in_service_queue; - /* - * Theoretical race here: the in-service queue can be NULL or -- * different from the queue that was idling if the timer handler -- * spins on the queue_lock and a new request arrives for the -- * current queue and there is a full dispatch cycle that changes -- * the in-service queue. This can hardly happen, but in the worst -- * case we just expire a queue too early. -+ * different from the queue that was idling if a new request -+ * arrives for the current queue and there is a full dispatch -+ * cycle that changes the in-service queue. This can hardly -+ * happen, but in the worst case we just expire a queue too -+ * early. - */ -- if (bfqq) { -- bfq_log_bfqq(bfqd, bfqq, "slice_timer expired"); -- bfq_clear_bfqq_wait_request(bfqq); -- -- if (bfq_bfqq_budget_timeout(bfqq)) -- /* -- * Also here the queue can be safely expired -- * for budget timeout without wasting -- * guarantees -- */ -- reason = BFQ_BFQQ_BUDGET_TIMEOUT; -- else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0) -- /* -- * The queue may not be empty upon timer expiration, -- * because we may not disable the timer when the -- * first request of the in-service queue arrives -- * during disk idling. -- */ -- reason = BFQ_BFQQ_TOO_IDLE; -- else -- goto schedule_dispatch; -- -- bfq_bfqq_expire(bfqd, bfqq, true, reason); -- } -- --schedule_dispatch: -- bfq_schedule_dispatch(bfqd); -+ if (bfqq) -+ bfq_idle_slice_timer_body(bfqq); - -- spin_unlock_irqrestore(bfqd->queue->queue_lock, flags); - return HRTIMER_NORESTART; - } - --static void bfq_shutdown_timer_wq(struct bfq_data *bfqd) --{ -- hrtimer_cancel(&bfqd->idle_slice_timer); -- cancel_work_sync(&bfqd->unplug_work); --} -- - static void __bfq_put_async_bfqq(struct bfq_data *bfqd, - struct bfq_queue **bfqq_ptr) - { -@@ -4852,28 +4907,40 @@ static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg) - static void bfq_exit_queue(struct elevator_queue *e) - { - struct bfq_data *bfqd = e->elevator_data; -- struct request_queue *q = bfqd->queue; - struct bfq_queue *bfqq, *n; - -- bfq_shutdown_timer_wq(bfqd); -- -- spin_lock_irq(q->queue_lock); -+ hrtimer_cancel(&bfqd->idle_slice_timer); - - BUG_ON(bfqd->in_service_queue); -- list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list) -- bfq_deactivate_bfqq(bfqd, bfqq, false, false); - -- spin_unlock_irq(q->queue_lock); -+ list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list) { -+ if (bfqq->bic) /* bfqqs without bic are handled below */ -+ cancel_work_sync(&bfqq->bic->exit_icq_work); -+ } -+ -+ spin_lock_irq(&bfqd->lock); -+ list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list) { -+ bfq_deactivate_bfqq(bfqd, bfqq, false, false); -+ /* -+ * Make sure that deferred exit_icq_work completes -+ * without errors for bfq_queues without bic -+ */ -+ if (!bfqq->bic) -+ bfqq->bfqd = NULL; -+ } -+ spin_unlock_irq(&bfqd->lock); - -- bfq_shutdown_timer_wq(bfqd); -+ hrtimer_cancel(&bfqd->idle_slice_timer); - - BUG_ON(hrtimer_active(&bfqd->idle_slice_timer)); - - #ifdef BFQ_GROUP_IOSCHED_ENABLED -- blkcg_deactivate_policy(q, &blkcg_policy_bfq); -+ blkcg_deactivate_policy(bfqd->queue, &blkcg_policy_bfq); - #else -+ spin_lock_irq(&bfqd->lock); - bfq_put_async_queues(bfqd, bfqd->root_group); - kfree(bfqd->root_group); -+ spin_unlock_irq(&bfqd->lock); - #endif - - kfree(bfqd); -@@ -4934,10 +5001,6 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) - - bfqd->queue = q; - -- spin_lock_irq(q->queue_lock); -- q->elevator = eq; -- spin_unlock_irq(q->queue_lock); -- - bfqd->root_group = bfq_create_group_hierarchy(bfqd, q->node); - if (!bfqd->root_group) - goto out_free; -@@ -4951,8 +5014,6 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) - bfqd->queue_weights_tree = RB_ROOT; - bfqd->group_weights_tree = RB_ROOT; - -- INIT_WORK(&bfqd->unplug_work, bfq_kick_queue); -- - INIT_LIST_HEAD(&bfqd->active_list); - INIT_LIST_HEAD(&bfqd->idle_list); - INIT_HLIST_HEAD(&bfqd->burst_list); -@@ -5001,6 +5062,11 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) - bfqd->peak_rate = R_fast[blk_queue_nonrot(bfqd->queue)] * 2 / 3; - bfqd->device_speed = BFQ_BFQD_FAST; - -+ spin_lock_init(&bfqd->lock); -+ INIT_LIST_HEAD(&bfqd->dispatch); -+ -+ q->elevator = eq; -+ - return 0; - - out_free: -@@ -5057,7 +5123,7 @@ static ssize_t bfq_weights_show(struct elevator_queue *e, char *page) - num_char += sprintf(page + num_char, "Tot reqs queued %d\n\n", - bfqd->queued); - -- spin_lock_irq(bfqd->queue->queue_lock); -+ spin_lock_irq(&bfqd->lock); - - num_char += sprintf(page + num_char, "Active:\n"); - list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list) { -@@ -5086,7 +5152,7 @@ static ssize_t bfq_weights_show(struct elevator_queue *e, char *page) - jiffies_to_msecs(bfqq->wr_cur_max_time)); - } - -- spin_unlock_irq(bfqd->queue->queue_lock); -+ spin_unlock_irq(&bfqd->lock); - - return num_char; - } -@@ -5294,35 +5360,31 @@ static struct elv_fs_entry bfq_attrs[] = { - __ATTR_NULL - }; - --static struct elevator_type iosched_bfq = { -- .ops.sq = { -- .elevator_merge_fn = bfq_merge, -- .elevator_merged_fn = bfq_merged_request, -- .elevator_merge_req_fn = bfq_merged_requests, --#ifdef BFQ_GROUP_IOSCHED_ENABLED -- .elevator_bio_merged_fn = bfq_bio_merged, --#endif -- .elevator_allow_bio_merge_fn = bfq_allow_bio_merge, -- .elevator_allow_rq_merge_fn = bfq_allow_rq_merge, -- .elevator_dispatch_fn = bfq_dispatch_requests, -- .elevator_add_req_fn = bfq_insert_request, -- .elevator_activate_req_fn = bfq_activate_request, -- .elevator_deactivate_req_fn = bfq_deactivate_request, -- .elevator_completed_req_fn = bfq_completed_request, -- .elevator_former_req_fn = elv_rb_former_request, -- .elevator_latter_req_fn = elv_rb_latter_request, -- .elevator_init_icq_fn = bfq_init_icq, -- .elevator_exit_icq_fn = bfq_exit_icq, -- .elevator_set_req_fn = bfq_set_request, -- .elevator_put_req_fn = bfq_put_request, -- .elevator_may_queue_fn = bfq_may_queue, -- .elevator_init_fn = bfq_init_queue, -- .elevator_exit_fn = bfq_exit_queue, -+static struct elevator_type iosched_bfq_mq = { -+ .ops.mq = { -+ .get_rq_priv = bfq_get_rq_private, -+ .put_rq_priv = bfq_put_rq_private, -+ .init_icq = bfq_init_icq, -+ .exit_icq = bfq_exit_icq, -+ .insert_requests = bfq_insert_requests, -+ .dispatch_request = bfq_dispatch_request, -+ .next_request = elv_rb_latter_request, -+ .former_request = elv_rb_former_request, -+ .allow_merge = bfq_allow_bio_merge, -+ .bio_merge = bfq_bio_merge, -+ .request_merge = bfq_request_merge, -+ .requests_merged = bfq_requests_merged, -+ .request_merged = bfq_request_merged, -+ .has_work = bfq_has_work, -+ .init_sched = bfq_init_queue, -+ .exit_sched = bfq_exit_queue, - }, -+ -+ .uses_mq = true, - .icq_size = sizeof(struct bfq_io_cq), - .icq_align = __alignof__(struct bfq_io_cq), - .elevator_attrs = bfq_attrs, -- .elevator_name = "bfq-sq", -+ .elevator_name = "bfq-mq", - .elevator_owner = THIS_MODULE, - }; - -@@ -5392,7 +5454,7 @@ static int __init bfq_init(void) - device_speed_thresh[0] = (4 * R_slow[0]) / 3; - device_speed_thresh[1] = (4 * R_slow[1]) / 3; - -- ret = elv_register(&iosched_bfq); -+ ret = elv_register(&iosched_bfq_mq); - if (ret) - goto err_pol_unreg; - -@@ -5412,8 +5474,8 @@ static int __init bfq_init(void) - - static void __exit bfq_exit(void) - { -- elv_unregister(&iosched_bfq); --#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ elv_unregister(&iosched_bfq_mq); -+#ifdef CONFIG_BFQ_GROUP_ENABLED - blkcg_policy_unregister(&blkcg_policy_bfq); - #endif - bfq_slab_kill(); -@@ -5422,5 +5484,6 @@ static void __exit bfq_exit(void) - module_init(bfq_init); - module_exit(bfq_exit); - --MODULE_AUTHOR("Arianna Avanzini, Fabio Checconi, Paolo Valente"); -+MODULE_AUTHOR("Paolo Valente"); - MODULE_LICENSE("GPL"); -+MODULE_DESCRIPTION("MQ Budget Fair Queueing I/O Scheduler"); -diff --git a/block/bfq-mq.h b/block/bfq-mq.h -index 0f51f270469c..c3fcd5ebd735 100644 ---- a/block/bfq-mq.h -+++ b/block/bfq-mq.h -@@ -19,15 +19,8 @@ - #include - #include - --/* -- * Define an alternative macro to compile cgroups support. This is one -- * of the steps needed to let bfq-mq share the files bfq-sched.c and -- * bfq-cgroup.c with bfq-sq. For bfq-mq, the macro -- * BFQ_GROUP_IOSCHED_ENABLED will be defined as a function of whether -- * the configuration option CONFIG_BFQ_MQ_GROUP_IOSCHED, and not -- * CONFIG_BFQ_GROUP_IOSCHED, is defined. -- */ --#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+/* see comments on CONFIG_BFQ_GROUP_IOSCHED in bfq.h */ -+#ifdef CONFIG_BFQ_MQ_GROUP_IOSCHED - #define BFQ_GROUP_IOSCHED_ENABLED - #endif - -@@ -259,8 +252,8 @@ struct bfq_queue { - struct request *next_rq; - /* number of sync and async requests queued */ - int queued[2]; -- /* number of sync and async requests currently allocated */ -- int allocated[2]; -+ /* number of requests currently allocated */ -+ int allocated; - /* number of pending metadata requests */ - int meta_pending; - /* fifo list of requests in sort_list */ -@@ -345,6 +338,8 @@ struct bfq_queue { - unsigned long wr_start_at_switch_to_srt; - - unsigned long split_time; /* time of last split */ -+ -+ spinlock_t lock; - }; - - /** -@@ -361,6 +356,9 @@ struct bfq_io_cq { - uint64_t blkcg_serial_nr; /* the current blkcg serial */ - #endif - -+ /* delayed work to exec the body of the the exit_icq handler */ -+ struct work_struct exit_icq_work; -+ - /* - * Snapshot of the has_short_time flag before merging; taken - * to remember its value while the queue is merged, so as to -@@ -402,11 +400,13 @@ enum bfq_device_speed { - /** - * struct bfq_data - per-device data structure. - * -- * All the fields are protected by the @queue lock. -+ * All the fields are protected by @lock. - */ - struct bfq_data { -- /* request queue for the device */ -+ /* device request queue */ - struct request_queue *queue; -+ /* dispatch queue */ -+ struct list_head dispatch; - - /* root bfq_group for the device */ - struct bfq_group *root_group; -@@ -460,8 +460,6 @@ struct bfq_data { - * the queue in service. - */ - struct hrtimer idle_slice_timer; -- /* delayed work to restart dispatching on the request queue */ -- struct work_struct unplug_work; - - /* bfq_queue in service */ - struct bfq_queue *in_service_queue; -@@ -612,6 +610,8 @@ struct bfq_data { - - /* fallback dummy bfqq for extreme OOM conditions */ - struct bfq_queue oom_bfqq; -+ -+ spinlock_t lock; - }; - - enum bfqq_state_flags { -@@ -622,7 +622,6 @@ enum bfqq_state_flags { - * waiting for a request - * without idling the device - */ -- BFQ_BFQQ_FLAG_must_alloc, /* must be allowed rq alloc */ - BFQ_BFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */ - BFQ_BFQQ_FLAG_has_short_ttime, /* queue has a short think time */ - BFQ_BFQQ_FLAG_sync, /* synchronous queue */ -@@ -661,7 +660,6 @@ BFQ_BFQQ_FNS(just_created); - BFQ_BFQQ_FNS(busy); - BFQ_BFQQ_FNS(wait_request); - BFQ_BFQQ_FNS(non_blocking_wait_rq); --BFQ_BFQQ_FNS(must_alloc); - BFQ_BFQQ_FNS(fifo_expire); - BFQ_BFQQ_FNS(has_short_ttime); - BFQ_BFQQ_FNS(sync); -@@ -692,7 +690,6 @@ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg); - #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \ - char __pbuf[128]; \ - \ -- assert_spin_locked((bfqd)->queue->queue_lock); \ - blkg_path(bfqg_to_blkg(bfqq_group(bfqq)), __pbuf, sizeof(__pbuf)); \ - pr_crit("%s bfq%d%c %s " fmt "\n", \ - checked_dev_name((bfqd)->queue->backing_dev_info->dev), \ -@@ -734,7 +731,6 @@ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg); - #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \ - char __pbuf[128]; \ - \ -- assert_spin_locked((bfqd)->queue->queue_lock); \ - blkg_path(bfqg_to_blkg(bfqq_group(bfqq)), __pbuf, sizeof(__pbuf)); \ - blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s " fmt, \ - (bfqq)->pid, \ -@@ -961,7 +957,6 @@ static struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq) - - static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio); - static void bfq_put_queue(struct bfq_queue *bfqq); --static void bfq_dispatch_insert(struct request_queue *q, struct request *rq); - static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd, - struct bio *bio, bool is_sync, - struct bfq_io_cq *bic); - -From bde5235de2241502c1c00337bd51c96d9b60b6df Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Fri, 3 Mar 2017 08:52:40 +0100 -Subject: [PATCH 13/51] Add checks and extra log messages - Part I - -Signed-off-by: Paolo Valente ---- - block/bfq-mq-iosched.c | 112 +++++++++++++++++++++++++++++++++++++++++++++++-- - 1 file changed, 109 insertions(+), 3 deletions(-) - -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index c963d92a32c2..40eadb3f7073 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -773,6 +773,8 @@ static int bfqq_process_refs(struct bfq_queue *bfqq) - { - int process_refs, io_refs; - -+ lockdep_assert_held(&bfqq->bfqd->lock); -+ - io_refs = bfqq->allocated; - process_refs = bfqq->ref - io_refs - bfqq->entity.on_st; - BUG_ON(process_refs < 0); -@@ -1483,6 +1485,8 @@ static void bfq_add_request(struct request *rq) - bfqq->queued[rq_is_sync(rq)]++; - bfqd->queued++; - -+ BUG_ON(!RQ_BFQQ(rq)); -+ BUG_ON(RQ_BFQQ(rq) != bfqq); - elv_rb_add(&bfqq->sort_list, rq); - - /* -@@ -1491,6 +1495,8 @@ static void bfq_add_request(struct request *rq) - prev = bfqq->next_rq; - next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position); - BUG_ON(!next_rq); -+ BUG_ON(!RQ_BFQQ(next_rq)); -+ BUG_ON(RQ_BFQQ(next_rq) != bfqq); - bfqq->next_rq = next_rq; - - /* -@@ -1615,6 +1621,19 @@ static void bfq_remove_request(struct request_queue *q, - - if (bfqq->next_rq == rq) { - bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq); -+ if (bfqq->next_rq && !RQ_BFQQ(bfqq->next_rq)) { -+ pr_crit("no bfqq! for next rq %p bfqq %p\n", -+ bfqq->next_rq, bfqq); -+ } -+ -+ BUG_ON(bfqq->next_rq && !RQ_BFQQ(bfqq->next_rq)); -+ if (bfqq->next_rq && RQ_BFQQ(bfqq->next_rq) != bfqq) { -+ pr_crit( -+ "wrong bfqq! for next rq %p, rq_bfqq %p bfqq %p\n", -+ bfqq->next_rq, RQ_BFQQ(bfqq->next_rq), bfqq); -+ } -+ BUG_ON(bfqq->next_rq && RQ_BFQQ(bfqq->next_rq) != bfqq); -+ - bfq_updated_next_req(bfqd, bfqq); - } - -@@ -1701,6 +1720,8 @@ static int bfq_request_merge(struct request_queue *q, struct request **req, - __rq = bfq_find_rq_fmerge(bfqd, bio, q); - if (__rq && elv_bio_merge_ok(__rq, bio)) { - *req = __rq; -+ bfq_log(bfqd, "request_merge: req %p", __rq); -+ - return ELEVATOR_FRONT_MERGE; - } - -@@ -1721,6 +1742,8 @@ static void bfq_request_merged(struct request_queue *q, struct request *req, - - /* Reposition request in its sort_list */ - elv_rb_del(&bfqq->sort_list, req); -+ BUG_ON(!RQ_BFQQ(req)); -+ BUG_ON(RQ_BFQQ(req) != bfqq); - elv_rb_add(&bfqq->sort_list, req); - - spin_lock_irq(&bfqd->lock); -@@ -1729,7 +1752,13 @@ static void bfq_request_merged(struct request_queue *q, struct request *req, - next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req, - bfqd->last_position); - BUG_ON(!next_rq); -+ - bfqq->next_rq = next_rq; -+ -+ bfq_log_bfqq(bfqd, bfqq, -+ "requests_merged: req %p prev %p next_rq %p bfqq %p", -+ req, prev, next_rq, bfqq); -+ - /* - * If next_rq changes, update both the queue's budget to - * fit the new request and the queue's position in its -@@ -1748,8 +1777,16 @@ static void bfq_requests_merged(struct request_queue *q, struct request *rq, - { - struct bfq_queue *bfqq = RQ_BFQQ(rq), *next_bfqq = RQ_BFQQ(next); - -+ BUG_ON(!RQ_BFQQ(rq)); -+ BUG_ON(!RQ_BFQQ(next)); -+ - if (!RB_EMPTY_NODE(&rq->rb_node)) - goto end; -+ -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "requests_merged: rq %p next %p bfqq %p next_bfqq %p", -+ rq, next, bfqq, next_bfqq); -+ - spin_lock_irq(&bfqq->bfqd->lock); - - /* -@@ -3847,6 +3884,9 @@ static bool bfq_has_work(struct blk_mq_hw_ctx *hctx) - { - struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; - -+ bfq_log(bfqd, "has_work, dispatch_non_empty %d busy_queues %d", -+ !list_empty_careful(&bfqd->dispatch), bfqd->busy_queues > 0); -+ - /* - * Avoiding lock: a race on bfqd->busy_queues should cause at - * most a call to dispatch for nothing -@@ -3865,6 +3905,8 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) - rq = list_first_entry(&bfqd->dispatch, struct request, - queuelist); - list_del_init(&rq->queuelist); -+ bfq_log(bfqd, -+ "dispatch requests: picked %p from dispatch list", rq); - goto exit; - } - -@@ -3904,7 +3946,20 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) - if (rq) { - rq->rq_flags |= RQF_STARTED; - bfqd->rq_in_driver++; -- } -+ if (bfqq) -+ bfq_log_bfqq(bfqd, bfqq, -+ "dispatched %s request %p, rq_in_driver %d", -+ bfq_bfqq_sync(bfqq) ? "sync" : "async", -+ rq, -+ bfqd->rq_in_driver); -+ else -+ bfq_log(bfqd, -+ "dispatched request %p from dispatch list, rq_in_driver %d", -+ rq, bfqd->rq_in_driver); -+ } else -+ bfq_log(bfqd, -+ "returned NULL request, rq_in_driver %d", -+ bfqd->rq_in_driver); - - return rq; - } -@@ -3944,6 +3999,7 @@ static void bfq_put_queue(struct bfq_queue *bfqq) - return; - - BUG_ON(rb_first(&bfqq->sort_list)); -+ BUG_ON(bfqq->allocated != 0); - BUG_ON(bfqq->entity.tree); - BUG_ON(bfq_bfqq_busy(bfqq)); - -@@ -4043,6 +4099,7 @@ static void bfq_exit_icq(struct io_cq *icq) - { - struct bfq_io_cq *bic = icq_to_bic(icq); - -+ BUG_ON(!bic); - kblockd_schedule_work(&bic->exit_icq_work); - } - -@@ -4057,6 +4114,7 @@ static void bfq_set_next_ioprio_data(struct bfq_queue *bfqq, - int ioprio_class; - struct bfq_data *bfqd = bfqq->bfqd; - -+ WARN_ON(!bfqd); - if (!bfqd) - return; - -@@ -4404,6 +4462,10 @@ static void __bfq_insert_request(struct bfq_data *bfqd, struct request *rq) - { - struct bfq_queue *bfqq = RQ_BFQQ(rq), *new_bfqq; - -+ assert_spin_locked(&bfqd->lock); -+ -+ bfq_log_bfqq(bfqd, bfqq, "__insert_req: rq %p bfqq %p", rq, bfqq); -+ - /* - * An unplug may trigger a requeue of a request from the device - * driver: make sure we are in process context while trying to -@@ -4420,6 +4482,12 @@ static void __bfq_insert_request(struct bfq_data *bfqd, struct request *rq) - */ - new_bfqq->allocated++; - bfqq->allocated--; -+ bfq_log_bfqq(bfqd, bfqq, -+ "insert_request: new allocated %d", bfqq->allocated); -+ bfq_log_bfqq(bfqd, new_bfqq, -+ "insert_request: new_bfqq new allocated %d", -+ bfqq->allocated); -+ - new_bfqq->ref++; - bfq_clear_bfqq_just_created(bfqq); - if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq) -@@ -4529,6 +4597,10 @@ static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd) - bfqd->rq_in_driver--; - bfqq->dispatched--; - -+ bfq_log_bfqq(bfqd, bfqq, -+ "completed_requests: new disp %d, new rq_in_driver %d", -+ bfqq->dispatched, bfqd->rq_in_driver); -+ - if (!bfqq->dispatched && !bfq_bfqq_busy(bfqq)) { - BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list)); - /* -@@ -4618,6 +4690,9 @@ static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd) - - static void bfq_put_rq_priv_body(struct bfq_queue *bfqq) - { -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "put_request_body: allocated %d", bfqq->allocated); -+ BUG_ON(!bfqq->allocated); - bfqq->allocated--; - - bfq_put_queue(bfqq); -@@ -4625,8 +4700,27 @@ static void bfq_put_rq_priv_body(struct bfq_queue *bfqq) - - static void bfq_put_rq_private(struct request_queue *q, struct request *rq) - { -- struct bfq_queue *bfqq = RQ_BFQQ(rq); -- struct bfq_data *bfqd = bfqq->bfqd; -+ struct bfq_queue *bfqq; -+ struct bfq_data *bfqd; -+ struct bfq_io_cq *bic; -+ -+ BUG_ON(!rq); -+ bfqq = RQ_BFQQ(rq); -+ BUG_ON(!bfqq); -+ -+ bic = RQ_BIC(rq); -+ BUG_ON(!bic); -+ -+ bfqd = bfqq->bfqd; -+ BUG_ON(!bfqd); -+ -+ BUG_ON(rq->rq_flags & RQF_QUEUED); -+ BUG_ON(!(rq->rq_flags & RQF_ELVPRIV)); -+ -+ bfq_log_bfqq(bfqd, bfqq, -+ "putting rq %p with %u sects left, STARTED %d", -+ rq, blk_rq_sectors(rq), -+ rq->rq_flags & RQF_STARTED); - - if (rq->rq_flags & RQF_STARTED) - bfqg_stats_update_completion(bfqq_group(bfqq), -@@ -4634,6 +4728,8 @@ static void bfq_put_rq_private(struct request_queue *q, struct request *rq) - rq_io_start_time_ns(rq), - rq->cmd_flags); - -+ BUG_ON(blk_rq_sectors(rq) == 0 && !(rq->rq_flags & RQF_STARTED)); -+ - if (likely(rq->rq_flags & RQF_STARTED)) { - unsigned long flags; - -@@ -4655,7 +4751,9 @@ static void bfq_put_rq_private(struct request_queue *q, struct request *rq) - * cause any deadlock, even if other locks are already - * (correctly) held by this process. - */ -+ BUG_ON(in_interrupt()); - -+ assert_spin_locked(&bfqd->lock); - if (!RB_EMPTY_NODE(&rq->rb_node)) - bfq_remove_request(q, rq); - bfq_put_rq_priv_body(bfqq); -@@ -4814,7 +4912,9 @@ static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq) - enum bfqq_expiration reason; - unsigned long flags; - -+ BUG_ON(!bfqd); - spin_lock_irqsave(&bfqd->lock, flags); -+ bfq_log_bfqq(bfqd, bfqq, "handling slice_timer expiration"); - bfq_clear_bfqq_wait_request(bfqq); - - if (bfqq != bfqd->in_service_queue) { -@@ -4857,6 +4957,8 @@ static enum hrtimer_restart bfq_idle_slice_timer(struct hrtimer *timer) - idle_slice_timer); - struct bfq_queue *bfqq = bfqd->in_service_queue; - -+ bfq_log(bfqd, "slice_timer expired"); -+ - /* - * Theoretical race here: the in-service queue can be NULL or - * different from the queue that was idling if a new request -@@ -4909,9 +5011,12 @@ static void bfq_exit_queue(struct elevator_queue *e) - struct bfq_data *bfqd = e->elevator_data; - struct bfq_queue *bfqq, *n; - -+ bfq_log(bfqd, "exit_queue: starting ..."); -+ - hrtimer_cancel(&bfqd->idle_slice_timer); - - BUG_ON(bfqd->in_service_queue); -+ BUG_ON(!list_empty(&bfqd->active_list)); - - list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list) { - if (bfqq->bic) /* bfqqs without bic are handled below */ -@@ -4943,6 +5048,7 @@ static void bfq_exit_queue(struct elevator_queue *e) - spin_unlock_irq(&bfqd->lock); - #endif - -+ bfq_log(bfqd, "exit_queue: finished ..."); - kfree(bfqd); - } - - -From 7f59486861e368d25f59d4136cf8e51a75b7edf9 Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Thu, 9 Feb 2017 10:36:27 +0100 -Subject: [PATCH 14/51] Add lock check in bfq_allow_bio_merge - -Signed-off-by: Paolo Valente ---- - block/bfq-mq-iosched.c | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index 40eadb3f7073..21b876aeba16 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -2279,6 +2279,7 @@ static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq, - if (!bic) - return false; - -+ assert_spin_locked(&bfqd->lock); - bfqq = bic_to_bfqq(bic, is_sync); - /* - * We take advantage of this function to perform an early merge - -From a2dd19a4d95cf401268c144c79ce549c7fc4bbca Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Tue, 7 Feb 2017 15:14:29 +0100 -Subject: [PATCH 15/51] bfq-mq: execute exit_icq operations immediately - -Exploting Omar's patch that removes the taking of the queue lock in -put_io_context_active, this patch moves back the operation of the bfq_exit_icq -hook from a deferred work to the body of the function. - -Signed-off-by: Paolo Valente ---- - block/bfq-mq-iosched.c | 34 +++------------------------------- - block/bfq-mq.h | 3 --- - 2 files changed, 3 insertions(+), 34 deletions(-) - -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index 21b876aeba16..1deb79a47181 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -4080,28 +4080,13 @@ static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync) - } - } - --static void bfq_exit_icq_body(struct work_struct *work) --{ -- struct bfq_io_cq *bic = -- container_of(work, struct bfq_io_cq, exit_icq_work); -- -- bfq_exit_icq_bfqq(bic, true); -- bfq_exit_icq_bfqq(bic, false); --} -- --static void bfq_init_icq(struct io_cq *icq) --{ -- struct bfq_io_cq *bic = icq_to_bic(icq); -- -- INIT_WORK(&bic->exit_icq_work, bfq_exit_icq_body); --} -- - static void bfq_exit_icq(struct io_cq *icq) - { - struct bfq_io_cq *bic = icq_to_bic(icq); - - BUG_ON(!bic); -- kblockd_schedule_work(&bic->exit_icq_work); -+ bfq_exit_icq_bfqq(bic, true); -+ bfq_exit_icq_bfqq(bic, false); - } - - /* -@@ -5019,21 +5004,9 @@ static void bfq_exit_queue(struct elevator_queue *e) - BUG_ON(bfqd->in_service_queue); - BUG_ON(!list_empty(&bfqd->active_list)); - -- list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list) { -- if (bfqq->bic) /* bfqqs without bic are handled below */ -- cancel_work_sync(&bfqq->bic->exit_icq_work); -- } -- - spin_lock_irq(&bfqd->lock); -- list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list) { -+ list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list) - bfq_deactivate_bfqq(bfqd, bfqq, false, false); -- /* -- * Make sure that deferred exit_icq_work completes -- * without errors for bfq_queues without bic -- */ -- if (!bfqq->bic) -- bfqq->bfqd = NULL; -- } - spin_unlock_irq(&bfqd->lock); - - hrtimer_cancel(&bfqd->idle_slice_timer); -@@ -5471,7 +5444,6 @@ static struct elevator_type iosched_bfq_mq = { - .ops.mq = { - .get_rq_priv = bfq_get_rq_private, - .put_rq_priv = bfq_put_rq_private, -- .init_icq = bfq_init_icq, - .exit_icq = bfq_exit_icq, - .insert_requests = bfq_insert_requests, - .dispatch_request = bfq_dispatch_request, -diff --git a/block/bfq-mq.h b/block/bfq-mq.h -index c3fcd5ebd735..23744b246db6 100644 ---- a/block/bfq-mq.h -+++ b/block/bfq-mq.h -@@ -356,9 +356,6 @@ struct bfq_io_cq { - uint64_t blkcg_serial_nr; /* the current blkcg serial */ - #endif - -- /* delayed work to exec the body of the the exit_icq handler */ -- struct work_struct exit_icq_work; -- - /* - * Snapshot of the has_short_time flag before merging; taken - * to remember its value while the queue is merged, so as to - -From ab7e78a0ff095101de74e700f8743295a500bb20 Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Tue, 21 Feb 2017 10:26:22 +0100 -Subject: [PATCH 16/51] Unnest request-queue and ioc locks from scheduler locks - -In some bio-merging functions, the request-queue lock needs to be -taken, to lookup for the bic associated with the process that issued -the bio that may need to be merged. In addition, put_io_context must -be invoked in some other functions, and put_io_context may cause the -lock of the involved ioc to be taken. In both cases, these extra -request-queue or ioc locks are taken, or might be taken, while the -scheduler lock is being held. In this respect, there are other code -paths, in part external to bfq-mq, in which the same locks are taken -(nested) in the opposite order, i.e., it is the scheduler lock to be -taken while the request-queue or the ioc lock is being held. This -leads to circular deadlocks. - -This commit addresses this issue by modifying the logic of the above -functions, so as to let the lookup and put_io_context be performed, -and thus the extra locks be taken, outside the critical sections -protected by the scheduler lock. - -Signed-off-by: Paolo Valente ---- - block/bfq-cgroup-included.c | 9 ++ - block/bfq-mq-iosched.c | 264 ++++++++++++++++++++++++++++---------------- - block/bfq-mq.h | 25 ++++- - block/bfq-sched.c | 11 ++ - 4 files changed, 213 insertions(+), 96 deletions(-) - -diff --git a/block/bfq-cgroup-included.c b/block/bfq-cgroup-included.c -index 8a73de76f32b..cf59eeb7f08e 100644 ---- a/block/bfq-cgroup-included.c -+++ b/block/bfq-cgroup-included.c -@@ -716,6 +716,9 @@ static void bfq_pd_offline(struct blkg_policy_data *pd) - struct bfq_group *bfqg; - struct bfq_data *bfqd; - struct bfq_entity *entity; -+#ifdef BFQ_MQ -+ unsigned long flags; -+#endif - int i; - - BUG_ON(!pd); -@@ -729,6 +732,9 @@ static void bfq_pd_offline(struct blkg_policy_data *pd) - if (!entity) /* root group */ - return; - -+#ifdef BFQ_MQ -+ spin_lock_irqsave(&bfqd->lock, flags); -+#endif - /* - * Empty all service_trees belonging to this group before - * deactivating the group itself. -@@ -766,6 +772,9 @@ static void bfq_pd_offline(struct blkg_policy_data *pd) - __bfq_deactivate_entity(entity, false); - bfq_put_async_queues(bfqd, bfqg); - -+#ifdef BFQ_MQ -+ bfq_unlock_put_ioc_restore(bfqd, flags); -+#endif - /* - * @blkg is going offline and will be ignored by - * blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index 1deb79a47181..69ef3761c95d 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -233,6 +233,7 @@ static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd, - return NULL; - } - -+#define BFQ_MQ - #include "bfq-sched.c" - #include "bfq-cgroup-included.c" - -@@ -1564,15 +1565,9 @@ static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd, - struct bio *bio, - struct request_queue *q) - { -- struct task_struct *tsk = current; -- struct bfq_io_cq *bic; -- struct bfq_queue *bfqq; -+ struct bfq_queue *bfqq = bfqd->bio_bfqq; - -- bic = bfq_bic_lookup(bfqd, tsk->io_context, q); -- if (!bic) -- return NULL; - -- bfqq = bic_to_bfqq(bic, op_is_sync(bio->bi_opf)); - if (bfqq) - return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio)); - -@@ -1693,9 +1688,26 @@ static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio) - struct request_queue *q = hctx->queue; - struct bfq_data *bfqd = q->elevator->elevator_data; - struct request *free = NULL; -+ /* -+ * bfq_bic_lookup grabs the queue_lock: invoke it now and -+ * store its return value for later use, to avoid nesting -+ * queue_lock inside the bfqd->lock. We assume that the bic -+ * returned by bfq_bic_lookup does not go away before -+ * bfqd->lock is taken. -+ */ -+ struct bfq_io_cq *bic = bfq_bic_lookup(bfqd, current->io_context, q); - bool ret; - - spin_lock_irq(&bfqd->lock); -+ -+ if (bic) -+ bfqd->bio_bfqq = bic_to_bfqq(bic, op_is_sync(bio->bi_opf)); -+ else -+ bfqd->bio_bfqq = NULL; -+ bfqd->bio_bic = bic; -+ /* Set next flag just for testing purposes */ -+ bfqd->bio_bfqq_set = true; -+ - ret = blk_mq_sched_try_merge(q, bio, &free); - - /* -@@ -1706,6 +1718,7 @@ static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio) - */ - if (free) - blk_mq_free_request(free); -+ bfqd->bio_bfqq_set = false; - spin_unlock_irq(&bfqd->lock); - - return ret; -@@ -2261,8 +2274,7 @@ static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq, - { - struct bfq_data *bfqd = q->elevator->elevator_data; - bool is_sync = op_is_sync(bio->bi_opf); -- struct bfq_io_cq *bic; -- struct bfq_queue *bfqq, *new_bfqq; -+ struct bfq_queue *bfqq = bfqd->bio_bfqq, *new_bfqq; - - /* - * Disallow merge of a sync bio into an async request. -@@ -2273,31 +2285,40 @@ static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq, - /* - * Lookup the bfqq that this bio will be queued with. Allow - * merge only if rq is queued there. -- * Queue lock is held here. - */ -- bic = bfq_bic_lookup(bfqd, current->io_context, q); -- if (!bic) -+ if (!bfqq) - return false; - -- assert_spin_locked(&bfqd->lock); -- bfqq = bic_to_bfqq(bic, is_sync); - /* - * We take advantage of this function to perform an early merge - * of the queues of possible cooperating processes. - */ -- if (bfqq) { -- new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false); -- if (new_bfqq) { -- bfq_merge_bfqqs(bfqd, bic, bfqq, new_bfqq); -- /* -- * If we get here, the bio will be queued in the -- * shared queue, i.e., new_bfqq, so use new_bfqq -- * to decide whether bio and rq can be merged. -- */ -- bfqq = new_bfqq; -- } -- } -+ new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false); -+ if (new_bfqq) { -+ /* -+ * bic still points to bfqq, then it has not yet been -+ * redirected to some other bfq_queue, and a queue -+ * merge beween bfqq and new_bfqq can be safely -+ * fulfillled, i.e., bic can be redirected to new_bfqq -+ * and bfqq can be put. -+ */ -+ bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq, -+ new_bfqq); -+ /* -+ * If we get here, bio will be queued into new_queue, -+ * so use new_bfqq to decide whether bio and rq can be -+ * merged. -+ */ -+ bfqq = new_bfqq; - -+ /* -+ * Change also bqfd->bio_bfqq, as -+ * bfqd->bio_bic now points to new_bfqq, and -+ * this function may be invoked again (and then may -+ * use again bqfd->bio_bfqq). -+ */ -+ bfqd->bio_bfqq = bfqq; -+ } - return bfqq == RQ_BFQQ(rq); - } - -@@ -3965,14 +3986,43 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) - return rq; - } - -+/* -+ * Next two functions release bfqd->lock and put the io context -+ * pointed by bfqd->ioc_to_put. This delayed put is used to not risk -+ * to take an ioc->lock while the scheduler lock is being held. -+ */ -+static void bfq_unlock_put_ioc(struct bfq_data *bfqd) -+{ -+ struct io_context *ioc_to_put = bfqd->ioc_to_put; -+ -+ bfqd->ioc_to_put = NULL; -+ spin_unlock_irq(&bfqd->lock); -+ -+ if (ioc_to_put) -+ put_io_context(ioc_to_put); -+} -+ -+static void bfq_unlock_put_ioc_restore(struct bfq_data *bfqd, -+ unsigned long flags) -+{ -+ struct io_context *ioc_to_put = bfqd->ioc_to_put; -+ -+ bfqd->ioc_to_put = NULL; -+ spin_unlock_irqrestore(&bfqd->lock, flags); -+ -+ if (ioc_to_put) -+ put_io_context(ioc_to_put); -+} -+ - static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) - { - struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; - struct request *rq; - - spin_lock_irq(&bfqd->lock); -+ - rq = __bfq_dispatch_request(hctx); -- spin_unlock_irq(&bfqd->lock); -+ bfq_unlock_put_ioc(bfqd); - - return rq; - } -@@ -3981,7 +4031,7 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) - * Task holds one reference to the queue, dropped when task exits. Each rq - * in-flight on this queue also holds a reference, dropped when rq is freed. - * -- * Queue lock must be held here. Recall not to use bfqq after calling -+ * Scheduler lock must be held here. Recall not to use bfqq after calling - * this function on it. - */ - static void bfq_put_queue(struct bfq_queue *bfqq) -@@ -4066,17 +4116,23 @@ static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync) - bfqd = bfqq->bfqd; /* NULL if scheduler already exited */ - - if (bfqq && bfqd) { -- spin_lock_irq(&bfqd->lock); -+ unsigned long flags; -+ -+ spin_lock_irqsave(&bfqd->lock, flags); - /* -- * If the bic is using a shared queue, put the reference -- * taken on the io_context when the bic started using a -- * shared bfq_queue. -+ * If the bic is using a shared queue, put the -+ * reference taken on the io_context when the bic -+ * started using a shared bfq_queue. This put cannot -+ * make ioc->ref_count reach 0, then no ioc->lock -+ * risks to be taken (leading to possible deadlock -+ * scenarios). - */ - if (is_sync && bfq_bfqq_coop(bfqq)) - put_io_context(bic->icq.ioc); -+ - bfq_exit_bfqq(bfqd, bfqq); - bic_set_bfqq(bic, NULL, is_sync); -- spin_unlock_irq(&bfqd->lock); -+ bfq_unlock_put_ioc_restore(bfqd, flags); - } - } - -@@ -4183,8 +4239,6 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, - INIT_HLIST_NODE(&bfqq->burst_list_node); - BUG_ON(!hlist_unhashed(&bfqq->burst_list_node)); - -- spin_lock_init(&bfqq->lock); -- - bfqq->ref = 0; - bfqq->bfqd = bfqd; - -@@ -4476,6 +4530,14 @@ static void __bfq_insert_request(struct bfq_data *bfqd, struct request *rq) - - new_bfqq->ref++; - bfq_clear_bfqq_just_created(bfqq); -+ /* -+ * If the bic associated with the process -+ * issuing this request still points to bfqq -+ * (and thus has not been already redirected -+ * to new_bfqq or even some other bfq_queue), -+ * then complete the merge and redirect it to -+ * new_bfqq. -+ */ - if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq) - bfq_merge_bfqqs(bfqd, RQ_BIC(rq), - bfqq, new_bfqq); -@@ -4498,14 +4560,17 @@ static void __bfq_insert_request(struct bfq_data *bfqd, struct request *rq) - } - - static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, -- bool at_head) -+ bool at_head) - { - struct request_queue *q = hctx->queue; - struct bfq_data *bfqd = q->elevator->elevator_data; - - spin_lock_irq(&bfqd->lock); -- if (blk_mq_sched_try_insert_merge(q, rq)) -- goto done; -+ if (blk_mq_sched_try_insert_merge(q, rq)) { -+ spin_unlock_irq(&bfqd->lock); -+ return; -+ } -+ - spin_unlock_irq(&bfqd->lock); - - blk_mq_sched_request_inserted(rq); -@@ -4530,8 +4595,8 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, - q->last_merge = rq; - } - } --done: -- spin_unlock_irq(&bfqd->lock); -+ -+ bfq_unlock_put_ioc(bfqd); - } - - static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx, -@@ -4724,7 +4789,7 @@ static void bfq_put_rq_private(struct request_queue *q, struct request *rq) - bfq_completed_request(bfqq, bfqd); - bfq_put_rq_priv_body(bfqq); - -- spin_unlock_irqrestore(&bfqd->lock, flags); -+ bfq_unlock_put_ioc_restore(bfqd, flags); - } else { - /* - * Request rq may be still/already in the scheduler, -@@ -4732,10 +4797,10 @@ static void bfq_put_rq_private(struct request_queue *q, struct request *rq) - * defer such a check and removal, to avoid - * inconsistencies in the time interval from the end - * of this function to the start of the deferred work. -- * Fortunately, this situation occurs only in process -- * context, so taking the scheduler lock does not -- * cause any deadlock, even if other locks are already -- * (correctly) held by this process. -+ * This situation seems to occur only in process -+ * context, as a consequence of a merge. In the -+ * current version of the code, this implies that the -+ * lock is held. - */ - BUG_ON(in_interrupt()); - -@@ -4758,8 +4823,6 @@ bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq) - { - bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue"); - -- put_io_context(bic->icq.ioc); -- - if (bfqq_process_refs(bfqq) == 1) { - bfqq->pid = current->pid; - bfq_clear_bfqq_coop(bfqq); -@@ -4775,6 +4838,41 @@ bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq) - return NULL; - } - -+static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd, -+ struct bfq_io_cq *bic, -+ struct bio *bio, -+ bool split, bool is_sync, -+ bool *new_queue) -+{ -+ struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync); -+ -+ if (likely(bfqq && bfqq != &bfqd->oom_bfqq)) -+ return bfqq; -+ -+ if (new_queue) -+ *new_queue = true; -+ -+ if (bfqq) -+ bfq_put_queue(bfqq); -+ bfqq = bfq_get_queue(bfqd, bio, is_sync, bic); -+ -+ bic_set_bfqq(bic, bfqq, is_sync); -+ if (split && is_sync) { -+ if ((bic->was_in_burst_list && bfqd->large_burst) || -+ bic->saved_in_large_burst) -+ bfq_mark_bfqq_in_large_burst(bfqq); -+ else { -+ bfq_clear_bfqq_in_large_burst(bfqq); -+ if (bic->was_in_burst_list) -+ hlist_add_head(&bfqq->burst_list_node, -+ &bfqd->burst_list); -+ } -+ bfqq->split_time = jiffies; -+ } -+ -+ return bfqq; -+} -+ - /* - * Allocate bfq data structures associated with this request. - */ -@@ -4786,6 +4884,7 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq, - const int is_sync = rq_is_sync(rq); - struct bfq_queue *bfqq; - bool bfqq_already_existing = false, split = false; -+ bool new_queue = false; - - spin_lock_irq(&bfqd->lock); - -@@ -4796,42 +4895,10 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq, - - bfq_bic_update_cgroup(bic, bio); - --new_queue: -- bfqq = bic_to_bfqq(bic, is_sync); -- if (!bfqq || bfqq == &bfqd->oom_bfqq) { -- if (bfqq) -- bfq_put_queue(bfqq); -- bfqq = bfq_get_queue(bfqd, bio, is_sync, bic); -- BUG_ON(!hlist_unhashed(&bfqq->burst_list_node)); -+ bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, false, is_sync, -+ &new_queue); - -- bic_set_bfqq(bic, bfqq, is_sync); -- if (split && is_sync) { -- bfq_log_bfqq(bfqd, bfqq, -- "get_request: was_in_list %d " -- "was_in_large_burst %d " -- "large burst in progress %d", -- bic->was_in_burst_list, -- bic->saved_in_large_burst, -- bfqd->large_burst); -- -- if ((bic->was_in_burst_list && bfqd->large_burst) || -- bic->saved_in_large_burst) { -- bfq_log_bfqq(bfqd, bfqq, -- "get_request: marking in " -- "large burst"); -- bfq_mark_bfqq_in_large_burst(bfqq); -- } else { -- bfq_log_bfqq(bfqd, bfqq, -- "get_request: clearing in " -- "large burst"); -- bfq_clear_bfqq_in_large_burst(bfqq); -- if (bic->was_in_burst_list) -- hlist_add_head(&bfqq->burst_list_node, -- &bfqd->burst_list); -- } -- bfqq->split_time = jiffies; -- } -- } else { -+ if (unlikely(!new_queue)) { - /* If the queue was seeky for too long, break it apart. */ - if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) { - bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq"); -@@ -4841,9 +4908,19 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq, - bic->saved_in_large_burst = true; - - bfqq = bfq_split_bfqq(bic, bfqq); -- split = true; -+ /* -+ * A reference to bic->icq.ioc needs to be -+ * released after a queue split. Do not do it -+ * immediately, to not risk to possibly take -+ * an ioc->lock while holding the scheduler -+ * lock. -+ */ -+ bfqd->ioc_to_put = bic->icq.ioc; -+ - if (!bfqq) -- goto new_queue; -+ bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, -+ true, is_sync, -+ NULL); - else - bfqq_already_existing = true; - } -@@ -4861,18 +4938,17 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq, - - /* - * If a bfq_queue has only one process reference, it is owned -- * by only one bfq_io_cq: we can set the bic field of the -- * bfq_queue to the address of that structure. Also, if the -- * queue has just been split, mark a flag so that the -- * information is available to the other scheduler hooks. -+ * by only this bic: we can then set bfqq->bic = bic. in -+ * addition, if the queue has also just been split, we have to -+ * resume its state. - */ - if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) { - bfqq->bic = bic; -- if (split) { -+ if (bfqd->ioc_to_put) { /* if true, then there has been a split */ - /* -- * If the queue has just been split from a shared -- * queue, restore the idle window and the possible -- * weight raising period. -+ * The queue has just been split from a shared -+ * queue: restore the idle window and the -+ * possible weight raising period. - */ - bfq_bfqq_resume_state(bfqq, bfqd, bic, - bfqq_already_existing); -@@ -4882,7 +4958,7 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq, - if (unlikely(bfq_bfqq_just_created(bfqq))) - bfq_handle_burst(bfqd, bfqq); - -- spin_unlock_irq(&bfqd->lock); -+ bfq_unlock_put_ioc(bfqd); - - return 0; - -@@ -4929,7 +5005,7 @@ static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq) - bfq_bfqq_expire(bfqd, bfqq, true, reason); - - schedule_dispatch: -- spin_unlock_irqrestore(&bfqd->lock, flags); -+ bfq_unlock_put_ioc_restore(bfqd, flags); - bfq_schedule_dispatch(bfqd); - } - -diff --git a/block/bfq-mq.h b/block/bfq-mq.h -index 23744b246db6..bd83f1c02573 100644 ---- a/block/bfq-mq.h -+++ b/block/bfq-mq.h -@@ -338,8 +338,6 @@ struct bfq_queue { - unsigned long wr_start_at_switch_to_srt; - - unsigned long split_time; /* time of last split */ -- -- spinlock_t lock; - }; - - /** -@@ -609,6 +607,29 @@ struct bfq_data { - struct bfq_queue oom_bfqq; - - spinlock_t lock; -+ -+ /* -+ * bic associated with the task issuing current bio for -+ * merging. This and the next field are used as a support to -+ * be able to perform the bic lookup, needed by bio-merge -+ * functions, before the scheduler lock is taken, and thus -+ * avoid taking the request-queue lock while the scheduler -+ * lock is being held. -+ */ -+ struct bfq_io_cq *bio_bic; -+ /* bfqq associated with the task issuing current bio for merging */ -+ struct bfq_queue *bio_bfqq; -+ /* Extra flag used only for TESTING */ -+ bool bio_bfqq_set; -+ -+ /* -+ * io context to put right after bfqd->lock is released. This -+ * filed is used to perform put_io_context, when needed, to -+ * after the scheduler lock has been released, and thus -+ * prevent an ioc->lock from being possibly taken while the -+ * scheduler lock is being held. -+ */ -+ struct io_context *ioc_to_put; - }; - - enum bfqq_state_flags { -diff --git a/block/bfq-sched.c b/block/bfq-sched.c -index b54a638186e3..a5c8b4acd33c 100644 ---- a/block/bfq-sched.c -+++ b/block/bfq-sched.c -@@ -1905,7 +1905,18 @@ static void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd) - struct bfq_entity *entity = in_serv_entity; - - if (bfqd->in_service_bic) { -+#ifdef BFQ_MQ -+ /* -+ * Schedule the release of a reference to -+ * bfqd->in_service_bic->icq.ioc to right after the -+ * scheduler lock is released. This ioc is not -+ * released immediately, to not risk to possibly take -+ * an ioc->lock while holding the scheduler lock. -+ */ -+ bfqd->ioc_to_put = bfqd->in_service_bic->icq.ioc; -+#else - put_io_context(bfqd->in_service_bic->icq.ioc); -+#endif - bfqd->in_service_bic = NULL; - } - - -From 84cc7140cb4f0574710625f51abbb076a1dd2920 Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Fri, 3 Mar 2017 09:31:14 +0100 -Subject: [PATCH 17/51] Add checks and extra log messages - Part II - -Signed-off-by: Paolo Valente ---- - block/bfq-mq-iosched.c | 42 ++++++++++++++++++++++++++++++++++++++++-- - block/bfq-sched.c | 1 + - 2 files changed, 41 insertions(+), 2 deletions(-) - -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index 69ef3761c95d..5707d42b160d 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -1567,6 +1567,7 @@ static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd, - { - struct bfq_queue *bfqq = bfqd->bio_bfqq; - -+ BUG_ON(!bfqd->bio_bfqq_set); - - if (bfqq) - return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio)); -@@ -1719,6 +1720,7 @@ static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio) - if (free) - blk_mq_free_request(free); - bfqd->bio_bfqq_set = false; -+ BUG_ON(bfqd->ioc_to_put); - spin_unlock_irq(&bfqd->lock); - - return ret; -@@ -1781,6 +1783,7 @@ static void bfq_request_merged(struct request_queue *q, struct request *req, - bfq_updated_next_req(bfqd, bfqq); - bfq_pos_tree_add_move(bfqd, bfqq); - } -+ BUG_ON(bfqd->ioc_to_put); - spin_unlock_irq(&bfqd->lock); - } - } -@@ -1824,6 +1827,7 @@ static void bfq_requests_merged(struct request_queue *q, struct request *rq, - - bfq_remove_request(q, next); - -+ BUG_ON(bfqq->bfqd->ioc_to_put); - spin_unlock_irq(&bfqq->bfqd->lock); - end: - bfqg_stats_update_io_merged(bfqq_group(bfqq), next->cmd_flags); -@@ -2195,9 +2199,11 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic, - { - bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu", - (unsigned long) new_bfqq->pid); -+ BUG_ON(bfqq->bic && bfqq->bic == new_bfqq->bic); - /* Save weight raising and idle window of the merged queues */ - bfq_bfqq_save_state(bfqq); - bfq_bfqq_save_state(new_bfqq); -+ - if (bfq_bfqq_IO_bound(bfqq)) - bfq_mark_bfqq_IO_bound(new_bfqq); - bfq_clear_bfqq_IO_bound(bfqq); -@@ -2276,6 +2282,7 @@ static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq, - bool is_sync = op_is_sync(bio->bi_opf); - struct bfq_queue *bfqq = bfqd->bio_bfqq, *new_bfqq; - -+ assert_spin_locked(&bfqd->lock); - /* - * Disallow merge of a sync bio into an async request. - */ -@@ -2286,6 +2293,7 @@ static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq, - * Lookup the bfqq that this bio will be queued with. Allow - * merge only if rq is queued there. - */ -+ BUG_ON(!bfqd->bio_bfqq_set); - if (!bfqq) - return false; - -@@ -2294,6 +2302,7 @@ static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq, - * of the queues of possible cooperating processes. - */ - new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false); -+ BUG_ON(new_bfqq == bfqq); - if (new_bfqq) { - /* - * bic still points to bfqq, then it has not yet been -@@ -4040,6 +4049,8 @@ static void bfq_put_queue(struct bfq_queue *bfqq) - struct bfq_group *bfqg = bfqq_group(bfqq); - #endif - -+ assert_spin_locked(&bfqq->bfqd->lock); -+ - BUG_ON(bfqq->ref <= 0); - - if (bfqq->bfqd) -@@ -4119,6 +4130,7 @@ static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync) - unsigned long flags; - - spin_lock_irqsave(&bfqd->lock, flags); -+ BUG_ON(bfqd->ioc_to_put); - /* - * If the bic is using a shared queue, put the - * reference taken on the io_context when the bic -@@ -4567,10 +4579,12 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, - - spin_lock_irq(&bfqd->lock); - if (blk_mq_sched_try_insert_merge(q, rq)) { -+ BUG_ON(bfqd->ioc_to_put); - spin_unlock_irq(&bfqd->lock); - return; - } - -+ BUG_ON(bfqd->ioc_to_put); - spin_unlock_irq(&bfqd->lock); - - blk_mq_sched_request_inserted(rq); -@@ -4785,6 +4799,7 @@ static void bfq_put_rq_private(struct request_queue *q, struct request *rq) - unsigned long flags; - - spin_lock_irqsave(&bfqd->lock, flags); -+ BUG_ON(bfqd->ioc_to_put); - - bfq_completed_request(bfqq, bfqd); - bfq_put_rq_priv_body(bfqq); -@@ -4855,13 +4870,28 @@ static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd, - if (bfqq) - bfq_put_queue(bfqq); - bfqq = bfq_get_queue(bfqd, bio, is_sync, bic); -+ BUG_ON(!hlist_unhashed(&bfqq->burst_list_node)); - - bic_set_bfqq(bic, bfqq, is_sync); - if (split && is_sync) { -+ bfq_log_bfqq(bfqd, bfqq, -+ "get_request: was_in_list %d " -+ "was_in_large_burst %d " -+ "large burst in progress %d", -+ bic->was_in_burst_list, -+ bic->saved_in_large_burst, -+ bfqd->large_burst); -+ - if ((bic->was_in_burst_list && bfqd->large_burst) || -- bic->saved_in_large_burst) -+ bic->saved_in_large_burst) { -+ bfq_log_bfqq(bfqd, bfqq, -+ "get_request: marking in " -+ "large burst"); - bfq_mark_bfqq_in_large_burst(bfqq); -- else { -+ } else { -+ bfq_log_bfqq(bfqd, bfqq, -+ "get_request: clearing in " -+ "large burst"); - bfq_clear_bfqq_in_large_burst(bfqq); - if (bic->was_in_burst_list) - hlist_add_head(&bfqq->burst_list_node, -@@ -4897,10 +4927,12 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq, - - bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, false, is_sync, - &new_queue); -+ BUG_ON(bfqd->ioc_to_put); - - if (unlikely(!new_queue)) { - /* If the queue was seeky for too long, break it apart. */ - if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) { -+ BUG_ON(!is_sync); - bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq"); - - /* Update bic before losing reference to bfqq */ -@@ -4923,6 +4955,9 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq, - NULL); - else - bfqq_already_existing = true; -+ -+ BUG_ON(!bfqq); -+ BUG_ON(bfqq == &bfqd->oom_bfqq); - } - } - -@@ -4976,6 +5011,8 @@ static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq) - - BUG_ON(!bfqd); - spin_lock_irqsave(&bfqd->lock, flags); -+ BUG_ON(bfqd->ioc_to_put); -+ - bfq_log_bfqq(bfqd, bfqq, "handling slice_timer expiration"); - bfq_clear_bfqq_wait_request(bfqq); - -@@ -5083,6 +5120,7 @@ static void bfq_exit_queue(struct elevator_queue *e) - spin_lock_irq(&bfqd->lock); - list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list) - bfq_deactivate_bfqq(bfqd, bfqq, false, false); -+ BUG_ON(bfqd->ioc_to_put); - spin_unlock_irq(&bfqd->lock); - - hrtimer_cancel(&bfqd->idle_slice_timer); -diff --git a/block/bfq-sched.c b/block/bfq-sched.c -index a5c8b4acd33c..85e59eeb3569 100644 ---- a/block/bfq-sched.c -+++ b/block/bfq-sched.c -@@ -1906,6 +1906,7 @@ static void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd) - - if (bfqd->in_service_bic) { - #ifdef BFQ_MQ -+ BUG_ON(bfqd->ioc_to_put); - /* - * Schedule the release of a reference to - * bfqd->in_service_bic->icq.ioc to right after the - -From 3d54cb804f1db2e08ce4a6cc335868538542f587 Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Wed, 22 Feb 2017 11:30:01 +0100 -Subject: [PATCH 18/51] Fix unbalanced increment of rq_in_driver - -Signed-off-by: Paolo Valente ---- - block/bfq-mq-iosched.c | 52 +++++++++++++++++++++++++++++++++++++++++--------- - 1 file changed, 43 insertions(+), 9 deletions(-) - -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index 5707d42b160d..9cbcb8d43d81 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -3936,9 +3936,45 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) - rq = list_first_entry(&bfqd->dispatch, struct request, - queuelist); - list_del_init(&rq->queuelist); -+ - bfq_log(bfqd, - "dispatch requests: picked %p from dispatch list", rq); -- goto exit; -+ bfqq = RQ_BFQQ(rq); -+ -+ if (bfqq) { -+ /* -+ * Increment counters here, because this -+ * dispatch does not follow the standard -+ * dispatch flow (where counters are -+ * incremented) -+ */ -+ bfqq->dispatched++; -+ -+ goto inc_in_driver_start_rq; -+ } -+ -+ /* -+ * We exploit the put_rq_private hook to decrement -+ * rq_in_driver, but put_rq_private will not be -+ * invoked on this request. So, to avoid unbalance, -+ * just start this request, without incrementing -+ * rq_in_driver. As a negative consequence, -+ * rq_in_driver is deceptively lower than it should be -+ * while this request is in service. This may cause -+ * bfq_schedule_dispatch to be invoked uselessly. -+ * -+ * As for implementing an exact solution, the -+ * put_request hook, if defined, is probably invoked -+ * also on this request. So, by exploiting this hook, -+ * we could 1) increment rq_in_driver here, and 2) -+ * decrement it in put_request. Such a solution would -+ * let the value of the counter be always accurate, -+ * but it would entail using an extra interface -+ * function. This cost seems higher than the benefit, -+ * being the frequency of non-elevator-private -+ * requests very low. -+ */ -+ goto start_rq; - } - - bfq_log(bfqd, "dispatch requests: %d busy queues", bfqd->busy_queues); -@@ -3973,10 +4009,12 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) - - BUG_ON(bfqq->next_rq == NULL && - bfqq->entity.budget < bfqq->entity.service); --exit: -+ - if (rq) { -- rq->rq_flags |= RQF_STARTED; -+ inc_in_driver_start_rq: - bfqd->rq_in_driver++; -+ start_rq: -+ rq->rq_flags |= RQF_STARTED; - if (bfqq) - bfq_log_bfqq(bfqd, bfqq, - "dispatched %s request %p, rq_in_driver %d", -@@ -3992,6 +4030,7 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) - "returned NULL request, rq_in_driver %d", - bfqd->rq_in_driver); - -+exit: - return rq; - } - -@@ -4591,15 +4630,10 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, - - spin_lock_irq(&bfqd->lock); - if (at_head || blk_rq_is_passthrough(rq)) { -- struct bfq_queue *bfqq = RQ_BFQQ(rq); -- - if (at_head) - list_add(&rq->queuelist, &bfqd->dispatch); - else - list_add_tail(&rq->queuelist, &bfqd->dispatch); -- -- if (bfqq) -- bfqq->dispatched++; - } else { - __bfq_insert_request(bfqd, rq); - -@@ -4966,7 +5000,7 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq, - "get_request: new allocated %d", bfqq->allocated); - - bfqq->ref++; -- bfq_log_bfqq(bfqd, bfqq, "get_request: bfqq %p, %d", bfqq, bfqq->ref); -+ bfq_log_bfqq(bfqd, bfqq, "get_request %p: bfqq %p, %d", rq, bfqq, bfqq->ref); - - rq->elv.priv[0] = bic; - rq->elv.priv[1] = bfqq; - -From 7ba977d696b239569b4cd233aebc99e136ecf487 Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Fri, 3 Mar 2017 09:39:35 +0100 -Subject: [PATCH 19/51] Add checks and extra log messages - Part III - -Signed-off-by: Paolo Valente ---- - block/bfq-mq-iosched.c | 11 +++++++++++ - 1 file changed, 11 insertions(+) - -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index 9cbcb8d43d81..24b529a2edc7 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -4630,10 +4630,21 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, - - spin_lock_irq(&bfqd->lock); - if (at_head || blk_rq_is_passthrough(rq)) { -+ struct bfq_queue *bfqq = RQ_BFQQ(rq); -+ - if (at_head) - list_add(&rq->queuelist, &bfqd->dispatch); - else - list_add_tail(&rq->queuelist, &bfqd->dispatch); -+ -+ if (bfqq) -+ bfq_log_bfqq(bfqd, bfqq, -+ "insert_request %p in disp: at_head %d", -+ rq, at_head); -+ else -+ bfq_log(bfqd, -+ "insert_request %p in disp: at_head %d", -+ rq, at_head); - } else { - __bfq_insert_request(bfqd, rq); - - -From c94e47b2908600b8ba89f84b0ac7febddd313141 Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Fri, 17 Feb 2017 14:28:02 +0100 -Subject: [PATCH 20/51] TESTING: Check wrong invocation of merge and - put_rq_priv functions - -Check that merge functions are not invoked on requests queued in the -dispatch queue, and that neither put_rq_private is invoked on these -requests if, in addition, they have not passed through get_rq_private. - -Signed-off-by: Paolo Valente ---- - block/bfq-mq-iosched.c | 22 ++++++++++++++++++++++ - include/linux/blkdev.h | 2 ++ - 2 files changed, 24 insertions(+) - -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index 24b529a2edc7..b4d40bb712d2 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -1746,6 +1746,8 @@ static int bfq_request_merge(struct request_queue *q, struct request **req, - static void bfq_request_merged(struct request_queue *q, struct request *req, - enum elv_merge type) - { -+ BUG_ON(req->rq_flags & RQF_DISP_LIST); -+ - if (type == ELEVATOR_FRONT_MERGE && - rb_prev(&req->rb_node) && - blk_rq_pos(req) < -@@ -1795,6 +1797,8 @@ static void bfq_requests_merged(struct request_queue *q, struct request *rq, - - BUG_ON(!RQ_BFQQ(rq)); - BUG_ON(!RQ_BFQQ(next)); -+ BUG_ON(rq->rq_flags & RQF_DISP_LIST); -+ BUG_ON(next->rq_flags & RQF_DISP_LIST); - - if (!RB_EMPTY_NODE(&rq->rb_node)) - goto end; -@@ -3936,6 +3940,7 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) - rq = list_first_entry(&bfqd->dispatch, struct request, - queuelist); - list_del_init(&rq->queuelist); -+ rq->rq_flags &= ~RQF_DISP_LIST; - - bfq_log(bfqd, - "dispatch requests: picked %p from dispatch list", rq); -@@ -3950,6 +3955,17 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) - */ - bfqq->dispatched++; - -+ /* -+ * TESTING: reset DISP_LIST flag, because: 1) -+ * this rq this request has passed through -+ * get_rq_private, 2) then it will have -+ * put_rq_private invoked on it, and 3) in -+ * put_rq_private we use this flag to check -+ * that put_rq_private is not invoked on -+ * requests for which get_rq_private has been -+ * invoked. -+ */ -+ rq->rq_flags &= ~RQF_DISP_LIST; - goto inc_in_driver_start_rq; - } - -@@ -4637,6 +4653,7 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, - else - list_add_tail(&rq->queuelist, &bfqd->dispatch); - -+ rq->rq_flags |= RQF_DISP_LIST; - if (bfqq) - bfq_log_bfqq(bfqd, bfqq, - "insert_request %p in disp: at_head %d", -@@ -4824,6 +4841,10 @@ static void bfq_put_rq_private(struct request_queue *q, struct request *rq) - bfqd = bfqq->bfqd; - BUG_ON(!bfqd); - -+ if (rq->rq_flags & RQF_DISP_LIST) { -+ pr_crit("putting disp rq %p for %d", rq, bfqq->pid); -+ BUG(); -+ } - BUG_ON(rq->rq_flags & RQF_QUEUED); - BUG_ON(!(rq->rq_flags & RQF_ELVPRIV)); - -@@ -5015,6 +5036,7 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq, - - rq->elv.priv[0] = bic; - rq->elv.priv[1] = bfqq; -+ rq->rq_flags &= ~RQF_DISP_LIST; - - /* - * If a bfq_queue has only one process reference, it is owned -diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h -index 10f892ca585d..0048e59e6d07 100644 ---- a/include/linux/blkdev.h -+++ b/include/linux/blkdev.h -@@ -121,6 +121,8 @@ typedef __u32 __bitwise req_flags_t; - /* Look at ->special_vec for the actual data payload instead of the - bio chain. */ - #define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18)) -+/* DEBUG: rq in bfq-mq dispatch list */ -+#define RQF_DISP_LIST ((__force req_flags_t)(1 << 19)) - - /* flags that prevent us from merging requests: */ - #define RQF_NOMERGE_FLAGS \ - -From 49206f9052d13c96d49dbc36c612bed41b2d6552 Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Sat, 25 Feb 2017 17:38:05 +0100 -Subject: [PATCH 21/51] Complete support for cgroups - -This commit completes cgroups support for bfq-mq. In particular, it deals with -a sort of circular dependency introduced in blk-mq: the function -blkcg_activate_policy, invoked during scheduler initialization, triggers the -invocation of the has_work scheduler hook (before the init function is -finished). To adress this issue, this commit moves the invocation of -blkcg_activate_policy after the initialization of all the fields that could be -initialized before invoking blkcg_activate_policy itself. This enables has_work -to correctly return false, and thus to prevent the blk-mq stack from invoking -further scheduler hooks before the init function is finished. - -Signed-off-by: Paolo Valente ---- - block/Kconfig.iosched | 9 +++++ - block/bfq-mq-iosched.c | 108 ++++++++++++++++++++++++++++--------------------- - block/bfq-mq.h | 2 +- - 3 files changed, 72 insertions(+), 47 deletions(-) - -diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched -index 2d94af3d8b0a..299a6861fb90 100644 ---- a/block/Kconfig.iosched -+++ b/block/Kconfig.iosched -@@ -106,6 +106,15 @@ config MQ_IOSCHED_BFQ - guarantees a low latency to interactive and soft real-time - applications. Details in Documentation/block/bfq-iosched.txt - -+config MQ_BFQ_GROUP_IOSCHED -+ bool "BFQ-MQ hierarchical scheduling support" -+ depends on MQ_IOSCHED_BFQ && BLK_CGROUP -+ default n -+ ---help--- -+ -+ Enable hierarchical scheduling in BFQ-MQ, using the blkio -+ (cgroups-v1) or io (cgroups-v2) controller. -+ - config MQ_IOSCHED_DEADLINE - tristate "MQ deadline I/O scheduler" - default y -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index b4d40bb712d2..02a1e7fd0ea4 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -88,7 +88,6 @@ - #include "blk-mq.h" - #include "blk-mq-tag.h" - #include "blk-mq-sched.h" --#undef CONFIG_BFQ_GROUP_IOSCHED /* cgroups support not yet functional */ - #include "bfq-mq.h" - - /* Expiration time of sync (0) and async (1) requests, in ns. */ -@@ -233,15 +232,6 @@ static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd, - return NULL; - } - --#define BFQ_MQ --#include "bfq-sched.c" --#include "bfq-cgroup-included.c" -- --#define bfq_class_idle(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_IDLE) --#define bfq_class_rt(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_RT) -- --#define bfq_sample_valid(samples) ((samples) > 80) -- - /* - * Scheduler run of queue, if there are requests pending and no one in the - * driver that will restart queueing. -@@ -255,6 +245,43 @@ static void bfq_schedule_dispatch(struct bfq_data *bfqd) - } - - /* -+ * Next two functions release bfqd->lock and put the io context -+ * pointed by bfqd->ioc_to_put. This delayed put is used to not risk -+ * to take an ioc->lock while the scheduler lock is being held. -+ */ -+static void bfq_unlock_put_ioc(struct bfq_data *bfqd) -+{ -+ struct io_context *ioc_to_put = bfqd->ioc_to_put; -+ -+ bfqd->ioc_to_put = NULL; -+ spin_unlock_irq(&bfqd->lock); -+ -+ if (ioc_to_put) -+ put_io_context(ioc_to_put); -+} -+ -+static void bfq_unlock_put_ioc_restore(struct bfq_data *bfqd, -+ unsigned long flags) -+{ -+ struct io_context *ioc_to_put = bfqd->ioc_to_put; -+ -+ bfqd->ioc_to_put = NULL; -+ spin_unlock_irqrestore(&bfqd->lock, flags); -+ -+ if (ioc_to_put) -+ put_io_context(ioc_to_put); -+} -+ -+#define BFQ_MQ -+#include "bfq-sched.c" -+#include "bfq-cgroup-included.c" -+ -+#define bfq_class_idle(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_IDLE) -+#define bfq_class_rt(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_RT) -+ -+#define bfq_sample_valid(samples) ((samples) > 80) -+ -+/* - * Lifted from AS - choose which of rq1 and rq2 that is best served now. - * We choose the request that is closesr to the head right now. Distance - * behind the head is penalized and only allowed to a certain extent. -@@ -4050,34 +4077,6 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) - return rq; - } - --/* -- * Next two functions release bfqd->lock and put the io context -- * pointed by bfqd->ioc_to_put. This delayed put is used to not risk -- * to take an ioc->lock while the scheduler lock is being held. -- */ --static void bfq_unlock_put_ioc(struct bfq_data *bfqd) --{ -- struct io_context *ioc_to_put = bfqd->ioc_to_put; -- -- bfqd->ioc_to_put = NULL; -- spin_unlock_irq(&bfqd->lock); -- -- if (ioc_to_put) -- put_io_context(ioc_to_put); --} -- --static void bfq_unlock_put_ioc_restore(struct bfq_data *bfqd, -- unsigned long flags) --{ -- struct io_context *ioc_to_put = bfqd->ioc_to_put; -- -- bfqd->ioc_to_put = NULL; -- spin_unlock_irqrestore(&bfqd->lock, flags); -- -- if (ioc_to_put) -- put_io_context(ioc_to_put); --} -- - static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) - { - struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; -@@ -5239,6 +5238,10 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) - } - eq->elevator_data = bfqd; - -+ spin_lock_irq(q->queue_lock); -+ q->elevator = eq; -+ spin_unlock_irq(q->queue_lock); -+ - /* - * Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues. - * Grab a permanent reference to it, so that the normal code flow -@@ -5261,12 +5264,7 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) - bfqd->oom_bfqq.entity.prio_changed = 1; - - bfqd->queue = q; -- -- bfqd->root_group = bfq_create_group_hierarchy(bfqd, q->node); -- if (!bfqd->root_group) -- goto out_free; -- bfq_init_root_group(bfqd->root_group, bfqd); -- bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group); -+ INIT_LIST_HEAD(&bfqd->dispatch); - - hrtimer_init(&bfqd->idle_slice_timer, CLOCK_MONOTONIC, - HRTIMER_MODE_REL); -@@ -5324,9 +5322,27 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) - bfqd->device_speed = BFQ_BFQD_FAST; - - spin_lock_init(&bfqd->lock); -- INIT_LIST_HEAD(&bfqd->dispatch); - -- q->elevator = eq; -+ /* -+ * The invocation of the next bfq_create_group_hierarchy -+ * function is the head of a chain of function calls -+ * (bfq_create_group_hierarchy->blkcg_activate_policy-> -+ * blk_mq_freeze_queue) that may lead to the invocation of the -+ * has_work hook function. For this reason, -+ * bfq_create_group_hierarchy is invoked only after all -+ * scheduler data has been initialized, apart from the fields -+ * that can be initialized only after invoking -+ * bfq_create_group_hierarchy. This, in particular, enables -+ * has_work to correctly return false. Of course, to avoid -+ * other inconsistencies, the blk-mq stack must then refrain -+ * from invoking further scheduler hooks before this init -+ * function is finished. -+ */ -+ bfqd->root_group = bfq_create_group_hierarchy(bfqd, q->node); -+ if (!bfqd->root_group) -+ goto out_free; -+ bfq_init_root_group(bfqd->root_group, bfqd); -+ bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group); - - return 0; - -diff --git a/block/bfq-mq.h b/block/bfq-mq.h -index bd83f1c02573..2c81c02bccc4 100644 ---- a/block/bfq-mq.h -+++ b/block/bfq-mq.h -@@ -20,7 +20,7 @@ - #include - - /* see comments on CONFIG_BFQ_GROUP_IOSCHED in bfq.h */ --#ifdef CONFIG_BFQ_MQ_GROUP_IOSCHED -+#ifdef CONFIG_MQ_BFQ_GROUP_IOSCHED - #define BFQ_GROUP_IOSCHED_ENABLED - #endif - - -From 62d12db23ce14d2716b5cff7d2635fbc817b96d0 Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Fri, 17 Mar 2017 06:15:18 +0100 -Subject: [PATCH 22/51] Remove all get and put of I/O contexts - -When a bfq queue is set in service and when it is merged, a reference -to the I/O context associated with the queue is taken. This reference -is then released when the queue is deselected from service or -split. More precisely, the release of the reference is postponed to -when the scheduler lock is released, to avoid nesting between the -scheduler and the I/O-context lock. In fact, such nesting would lead -to deadlocks, because of other code paths that take the same locks in -the opposite order. This postponing of I/O-context releases does -complicate code. - -This commit addresses this issue by modifying involved operations in -such a way to not need to get the above I/O-context references any -more. Then it also removes any get and release of these references. - -Signed-off-by: Paolo Valente ---- - block/bfq-cgroup-included.c | 2 +- - block/bfq-mq-iosched.c | 127 ++++++++------------------------------------ - block/bfq-mq.h | 11 ---- - block/bfq-sched.c | 17 ------ - 4 files changed, 22 insertions(+), 135 deletions(-) - -diff --git a/block/bfq-cgroup-included.c b/block/bfq-cgroup-included.c -index cf59eeb7f08e..dfacca799b5e 100644 ---- a/block/bfq-cgroup-included.c -+++ b/block/bfq-cgroup-included.c -@@ -773,7 +773,7 @@ static void bfq_pd_offline(struct blkg_policy_data *pd) - bfq_put_async_queues(bfqd, bfqg); - - #ifdef BFQ_MQ -- bfq_unlock_put_ioc_restore(bfqd, flags); -+ spin_unlock_irqrestore(&bfqd->lock, flags); - #endif - /* - * @blkg is going offline and will be ignored by -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index 02a1e7fd0ea4..8e7589d3280f 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -244,34 +244,6 @@ static void bfq_schedule_dispatch(struct bfq_data *bfqd) - } - } - --/* -- * Next two functions release bfqd->lock and put the io context -- * pointed by bfqd->ioc_to_put. This delayed put is used to not risk -- * to take an ioc->lock while the scheduler lock is being held. -- */ --static void bfq_unlock_put_ioc(struct bfq_data *bfqd) --{ -- struct io_context *ioc_to_put = bfqd->ioc_to_put; -- -- bfqd->ioc_to_put = NULL; -- spin_unlock_irq(&bfqd->lock); -- -- if (ioc_to_put) -- put_io_context(ioc_to_put); --} -- --static void bfq_unlock_put_ioc_restore(struct bfq_data *bfqd, -- unsigned long flags) --{ -- struct io_context *ioc_to_put = bfqd->ioc_to_put; -- -- bfqd->ioc_to_put = NULL; -- spin_unlock_irqrestore(&bfqd->lock, flags); -- -- if (ioc_to_put) -- put_io_context(ioc_to_put); --} -- - #define BFQ_MQ - #include "bfq-sched.c" - #include "bfq-cgroup-included.c" -@@ -1747,7 +1719,6 @@ static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio) - if (free) - blk_mq_free_request(free); - bfqd->bio_bfqq_set = false; -- BUG_ON(bfqd->ioc_to_put); - spin_unlock_irq(&bfqd->lock); - - return ret; -@@ -1812,7 +1783,6 @@ static void bfq_request_merged(struct request_queue *q, struct request *req, - bfq_updated_next_req(bfqd, bfqq); - bfq_pos_tree_add_move(bfqd, bfqq); - } -- BUG_ON(bfqd->ioc_to_put); - spin_unlock_irq(&bfqd->lock); - } - } -@@ -1858,7 +1828,6 @@ static void bfq_requests_merged(struct request_queue *q, struct request *rq, - - bfq_remove_request(q, next); - -- BUG_ON(bfqq->bfqd->ioc_to_put); - spin_unlock_irq(&bfqq->bfqd->lock); - end: - bfqg_stats_update_io_merged(bfqq_group(bfqq), next->cmd_flags); -@@ -2035,20 +2004,18 @@ bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) - * first time that the requests of some process are redirected to - * it. - * -- * We redirect bfqq to new_bfqq and not the opposite, because we -- * are in the context of the process owning bfqq, hence we have -- * the io_cq of this process. So we can immediately configure this -- * io_cq to redirect the requests of the process to new_bfqq. -+ * We redirect bfqq to new_bfqq and not the opposite, because -+ * we are in the context of the process owning bfqq, thus we -+ * have the io_cq of this process. So we can immediately -+ * configure this io_cq to redirect the requests of the -+ * process to new_bfqq. In contrast, the io_cq of new_bfqq is -+ * not available any more (new_bfqq->bic == NULL). - * -- * NOTE, even if new_bfqq coincides with the in-service queue, the -- * io_cq of new_bfqq is not available, because, if the in-service -- * queue is shared, bfqd->in_service_bic may not point to the -- * io_cq of the in-service queue. -- * Redirecting the requests of the process owning bfqq to the -- * currently in-service queue is in any case the best option, as -- * we feed the in-service queue with new requests close to the -- * last request served and, by doing so, hopefully increase the -- * throughput. -+ * Anyway, even in case new_bfqq coincides with the in-service -+ * queue, redirecting requests the in-service queue is the -+ * best option, as we feed the in-service queue with new -+ * requests close to the last request served and, by doing so, -+ * are likely to increase the throughput. - */ - bfqq->new_bfqq = new_bfqq; - new_bfqq->ref += process_refs; -@@ -2147,13 +2114,13 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, - in_service_bfqq = bfqd->in_service_queue; - - if (in_service_bfqq && in_service_bfqq != bfqq && -- bfqd->in_service_bic && wr_from_too_long(in_service_bfqq) -+ wr_from_too_long(in_service_bfqq) - && likely(in_service_bfqq == &bfqd->oom_bfqq)) - bfq_log_bfqq(bfqd, bfqq, - "would have tried merge with in-service-queue, but wr"); - -- if (!in_service_bfqq || in_service_bfqq == bfqq || -- !bfqd->in_service_bic || wr_from_too_long(in_service_bfqq) || -+ if (!in_service_bfqq || in_service_bfqq == bfqq -+ || wr_from_too_long(in_service_bfqq) || - unlikely(in_service_bfqq == &bfqd->oom_bfqq)) - goto check_scheduled; - -@@ -2214,16 +2181,6 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq) - BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish)); - } - --static void bfq_get_bic_reference(struct bfq_queue *bfqq) --{ -- /* -- * If bfqq->bic has a non-NULL value, the bic to which it belongs -- * is about to begin using a shared bfq_queue. -- */ -- if (bfqq->bic) -- atomic_long_inc(&bfqq->bic->icq.ioc->refcount); --} -- - static void - bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic, - struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) -@@ -2280,12 +2237,6 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic, - bfqd->wr_busy_queues); - - /* -- * Grab a reference to the bic, to prevent it from being destroyed -- * before being possibly touched by a bfq_split_bfqq(). -- */ -- bfq_get_bic_reference(bfqq); -- bfq_get_bic_reference(new_bfqq); -- /* - * Merge queues (that is, let bic redirect its requests to new_bfqq) - */ - bic_set_bfqq(bic, new_bfqq, 1); -@@ -2472,16 +2423,10 @@ static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd) - static void bfq_arm_slice_timer(struct bfq_data *bfqd) - { - struct bfq_queue *bfqq = bfqd->in_service_queue; -- struct bfq_io_cq *bic; - u32 sl; - - BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list)); - -- /* Processes have exited, don't wait. */ -- bic = bfqd->in_service_bic; -- if (!bic || atomic_read(&bic->icq.ioc->active_ref) == 0) -- return; -- - bfq_mark_bfqq_wait_request(bfqq); - - /* -@@ -3922,11 +3867,6 @@ static struct request *bfq_dispatch_rq_from_bfqq(struct bfq_data *bfqd, - bfq_bfqq_budget_left(bfqq), - bfqq->dispatched); - -- if (!bfqd->in_service_bic) { -- atomic_long_inc(&RQ_BIC(rq)->icq.ioc->refcount); -- bfqd->in_service_bic = RQ_BIC(rq); -- } -- - /* - * Expire bfqq, pretending that its budget expired, if bfqq - * belongs to CLASS_IDLE and other queues are waiting for -@@ -4085,7 +4025,7 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) - spin_lock_irq(&bfqd->lock); - - rq = __bfq_dispatch_request(hctx); -- bfq_unlock_put_ioc(bfqd); -+ spin_unlock_irq(&bfqd->lock); - - return rq; - } -@@ -4184,21 +4124,10 @@ static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync) - unsigned long flags; - - spin_lock_irqsave(&bfqd->lock, flags); -- BUG_ON(bfqd->ioc_to_put); -- /* -- * If the bic is using a shared queue, put the -- * reference taken on the io_context when the bic -- * started using a shared bfq_queue. This put cannot -- * make ioc->ref_count reach 0, then no ioc->lock -- * risks to be taken (leading to possible deadlock -- * scenarios). -- */ -- if (is_sync && bfq_bfqq_coop(bfqq)) -- put_io_context(bic->icq.ioc); - - bfq_exit_bfqq(bfqd, bfqq); - bic_set_bfqq(bic, NULL, is_sync); -- bfq_unlock_put_ioc_restore(bfqd, flags); -+ spin_unlock_irqrestore(&bfqd->lock, flags); - } - } - -@@ -4633,12 +4562,10 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, - - spin_lock_irq(&bfqd->lock); - if (blk_mq_sched_try_insert_merge(q, rq)) { -- BUG_ON(bfqd->ioc_to_put); - spin_unlock_irq(&bfqd->lock); - return; - } - -- BUG_ON(bfqd->ioc_to_put); - spin_unlock_irq(&bfqd->lock); - - blk_mq_sched_request_inserted(rq); -@@ -4671,7 +4598,7 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, - } - } - -- bfq_unlock_put_ioc(bfqd); -+ spin_unlock_irq(&bfqd->lock); - } - - static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx, -@@ -4864,12 +4791,11 @@ static void bfq_put_rq_private(struct request_queue *q, struct request *rq) - unsigned long flags; - - spin_lock_irqsave(&bfqd->lock, flags); -- BUG_ON(bfqd->ioc_to_put); - - bfq_completed_request(bfqq, bfqd); - bfq_put_rq_priv_body(bfqq); - -- bfq_unlock_put_ioc_restore(bfqd, flags); -+ spin_unlock_irqrestore(&bfqd->lock, flags); - } else { - /* - * Request rq may be still/already in the scheduler, -@@ -4992,7 +4918,6 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq, - - bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, false, is_sync, - &new_queue); -- BUG_ON(bfqd->ioc_to_put); - - if (unlikely(!new_queue)) { - /* If the queue was seeky for too long, break it apart. */ -@@ -5005,14 +4930,6 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq, - bic->saved_in_large_burst = true; - - bfqq = bfq_split_bfqq(bic, bfqq); -- /* -- * A reference to bic->icq.ioc needs to be -- * released after a queue split. Do not do it -- * immediately, to not risk to possibly take -- * an ioc->lock while holding the scheduler -- * lock. -- */ -- bfqd->ioc_to_put = bic->icq.ioc; - - if (!bfqq) - bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, -@@ -5045,7 +4962,7 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq, - */ - if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) { - bfqq->bic = bic; -- if (bfqd->ioc_to_put) { /* if true, then there has been a split */ -+ if (split) { - /* - * The queue has just been split from a shared - * queue: restore the idle window and the -@@ -5059,7 +4976,7 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq, - if (unlikely(bfq_bfqq_just_created(bfqq))) - bfq_handle_burst(bfqd, bfqq); - -- bfq_unlock_put_ioc(bfqd); -+ spin_unlock_irq(&bfqd->lock); - - return 0; - -@@ -5077,7 +4994,6 @@ static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq) - - BUG_ON(!bfqd); - spin_lock_irqsave(&bfqd->lock, flags); -- BUG_ON(bfqd->ioc_to_put); - - bfq_log_bfqq(bfqd, bfqq, "handling slice_timer expiration"); - bfq_clear_bfqq_wait_request(bfqq); -@@ -5108,7 +5024,7 @@ static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq) - bfq_bfqq_expire(bfqd, bfqq, true, reason); - - schedule_dispatch: -- bfq_unlock_put_ioc_restore(bfqd, flags); -+ spin_unlock_irqrestore(&bfqd->lock, flags); - bfq_schedule_dispatch(bfqd); - } - -@@ -5186,7 +5102,6 @@ static void bfq_exit_queue(struct elevator_queue *e) - spin_lock_irq(&bfqd->lock); - list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list) - bfq_deactivate_bfqq(bfqd, bfqq, false, false); -- BUG_ON(bfqd->ioc_to_put); - spin_unlock_irq(&bfqd->lock); - - hrtimer_cancel(&bfqd->idle_slice_timer); -diff --git a/block/bfq-mq.h b/block/bfq-mq.h -index 2c81c02bccc4..36ee24a87dda 100644 ---- a/block/bfq-mq.h -+++ b/block/bfq-mq.h -@@ -458,8 +458,6 @@ struct bfq_data { - - /* bfq_queue in service */ - struct bfq_queue *in_service_queue; -- /* bfq_io_cq (bic) associated with the @in_service_queue */ -- struct bfq_io_cq *in_service_bic; - - /* on-disk position of the last served request */ - sector_t last_position; -@@ -621,15 +619,6 @@ struct bfq_data { - struct bfq_queue *bio_bfqq; - /* Extra flag used only for TESTING */ - bool bio_bfqq_set; -- -- /* -- * io context to put right after bfqd->lock is released. This -- * filed is used to perform put_io_context, when needed, to -- * after the scheduler lock has been released, and thus -- * prevent an ioc->lock from being possibly taken while the -- * scheduler lock is being held. -- */ -- struct io_context *ioc_to_put; - }; - - enum bfqq_state_flags { -diff --git a/block/bfq-sched.c b/block/bfq-sched.c -index 85e59eeb3569..9c4e6797d8c9 100644 ---- a/block/bfq-sched.c -+++ b/block/bfq-sched.c -@@ -1904,23 +1904,6 @@ static void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd) - struct bfq_entity *in_serv_entity = &in_serv_bfqq->entity; - struct bfq_entity *entity = in_serv_entity; - -- if (bfqd->in_service_bic) { --#ifdef BFQ_MQ -- BUG_ON(bfqd->ioc_to_put); -- /* -- * Schedule the release of a reference to -- * bfqd->in_service_bic->icq.ioc to right after the -- * scheduler lock is released. This ioc is not -- * released immediately, to not risk to possibly take -- * an ioc->lock while holding the scheduler lock. -- */ -- bfqd->ioc_to_put = bfqd->in_service_bic->icq.ioc; --#else -- put_io_context(bfqd->in_service_bic->icq.ioc); --#endif -- bfqd->in_service_bic = NULL; -- } -- - bfq_clear_bfqq_wait_request(in_serv_bfqq); - hrtimer_try_to_cancel(&bfqd->idle_slice_timer); - bfqd->in_service_queue = NULL; - -From 1521ad11f8684cf0a1b7249249cd406fee50da6d Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Wed, 29 Mar 2017 18:41:46 +0200 -Subject: [PATCH 23/51] BUGFIX: Remove unneeded and deadlock-causing lock in - request_merged - -Signed-off-by: Paolo Valente ---- - block/bfq-mq-iosched.c | 2 -- - 1 file changed, 2 deletions(-) - -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index 8e7589d3280f..bb046335ff4f 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -1761,7 +1761,6 @@ static void bfq_request_merged(struct request_queue *q, struct request *req, - BUG_ON(RQ_BFQQ(req) != bfqq); - elv_rb_add(&bfqq->sort_list, req); - -- spin_lock_irq(&bfqd->lock); - /* Choose next request to be served for bfqq */ - prev = bfqq->next_rq; - next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req, -@@ -1783,7 +1782,6 @@ static void bfq_request_merged(struct request_queue *q, struct request *req, - bfq_updated_next_req(bfqd, bfqq); - bfq_pos_tree_add_move(bfqd, bfqq); - } -- spin_unlock_irq(&bfqd->lock); - } - } - - -From 9136b4c953918ea937254c57cfb787b55b5bc2c6 Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Wed, 29 Mar 2017 18:55:30 +0200 -Subject: [PATCH 24/51] Fix wrong unlikely - -Signed-off-by: Paolo Valente ---- - block/bfq-mq-iosched.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index bb046335ff4f..3ae9bd424b3f 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -4917,7 +4917,7 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq, - bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, false, is_sync, - &new_queue); - -- if (unlikely(!new_queue)) { -+ if (likely(!new_queue)) { - /* If the queue was seeky for too long, break it apart. */ - if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) { - BUG_ON(!is_sync); - -From 8e05f722f19645f2278f6962368ca3b7c2a81c9c Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Fri, 12 May 2017 09:51:18 +0200 -Subject: [PATCH 25/51] Change cgroup params prefix to bfq-mq for bfq-mq - -Signed-off-by: Paolo Valente ---- - block/bfq-cgroup-included.c | 54 ++++++++++++++++++++++++++------------------- - 1 file changed, 31 insertions(+), 23 deletions(-) - -diff --git a/block/bfq-cgroup-included.c b/block/bfq-cgroup-included.c -index dfacca799b5e..9e9b0a09e26f 100644 ---- a/block/bfq-cgroup-included.c -+++ b/block/bfq-cgroup-included.c -@@ -995,9 +995,15 @@ bfq_create_group_hierarchy(struct bfq_data *bfqd, int node) - return blkg_to_bfqg(bfqd->queue->root_blkg); - } - -+#ifdef BFQ_MQ -+#define BFQ_CGROUP_FNAME(param) "bfq-mq."#param -+#else -+#define BFQ_CGROUP_FNAME(param) "bfq."#param -+#endif -+ - static struct cftype bfq_blkcg_legacy_files[] = { - { -- .name = "bfq.weight", -+ .name = BFQ_CGROUP_FNAME(weight), - .flags = CFTYPE_NOT_ON_ROOT, - .seq_show = bfq_io_show_weight, - .write_u64 = bfq_io_set_weight_legacy, -@@ -1005,106 +1011,106 @@ static struct cftype bfq_blkcg_legacy_files[] = { - - /* statistics, covers only the tasks in the bfqg */ - { -- .name = "bfq.time", -+ .name = BFQ_CGROUP_FNAME(time), - .private = offsetof(struct bfq_group, stats.time), - .seq_show = bfqg_print_stat, - }, - { -- .name = "bfq.sectors", -+ .name = BFQ_CGROUP_FNAME(sectors), - .seq_show = bfqg_print_stat_sectors, - }, - { -- .name = "bfq.io_service_bytes", -+ .name = BFQ_CGROUP_FNAME(io_service_bytes), - .private = (unsigned long)&blkcg_policy_bfq, - .seq_show = blkg_print_stat_bytes, - }, - { -- .name = "bfq.io_serviced", -+ .name = BFQ_CGROUP_FNAME(io_serviced), - .private = (unsigned long)&blkcg_policy_bfq, - .seq_show = blkg_print_stat_ios, - }, - { -- .name = "bfq.io_service_time", -+ .name = BFQ_CGROUP_FNAME(io_service_time), - .private = offsetof(struct bfq_group, stats.service_time), - .seq_show = bfqg_print_rwstat, - }, - { -- .name = "bfq.io_wait_time", -+ .name = BFQ_CGROUP_FNAME(io_wait_time), - .private = offsetof(struct bfq_group, stats.wait_time), - .seq_show = bfqg_print_rwstat, - }, - { -- .name = "bfq.io_merged", -+ .name = BFQ_CGROUP_FNAME(io_merged), - .private = offsetof(struct bfq_group, stats.merged), - .seq_show = bfqg_print_rwstat, - }, - { -- .name = "bfq.io_queued", -+ .name = BFQ_CGROUP_FNAME(io_queued), - .private = offsetof(struct bfq_group, stats.queued), - .seq_show = bfqg_print_rwstat, - }, - - /* the same statictics which cover the bfqg and its descendants */ - { -- .name = "bfq.time_recursive", -+ .name = BFQ_CGROUP_FNAME(time_recursive), - .private = offsetof(struct bfq_group, stats.time), - .seq_show = bfqg_print_stat_recursive, - }, - { -- .name = "bfq.sectors_recursive", -+ .name = BFQ_CGROUP_FNAME(sectors_recursive), - .seq_show = bfqg_print_stat_sectors_recursive, - }, - { -- .name = "bfq.io_service_bytes_recursive", -+ .name = BFQ_CGROUP_FNAME(io_service_bytes_recursive), - .private = (unsigned long)&blkcg_policy_bfq, - .seq_show = blkg_print_stat_bytes_recursive, - }, - { -- .name = "bfq.io_serviced_recursive", -+ .name = BFQ_CGROUP_FNAME(io_serviced_recursive), - .private = (unsigned long)&blkcg_policy_bfq, - .seq_show = blkg_print_stat_ios_recursive, - }, - { -- .name = "bfq.io_service_time_recursive", -+ .name = BFQ_CGROUP_FNAME(io_service_time_recursive), - .private = offsetof(struct bfq_group, stats.service_time), - .seq_show = bfqg_print_rwstat_recursive, - }, - { -- .name = "bfq.io_wait_time_recursive", -+ .name = BFQ_CGROUP_FNAME(io_wait_time_recursive), - .private = offsetof(struct bfq_group, stats.wait_time), - .seq_show = bfqg_print_rwstat_recursive, - }, - { -- .name = "bfq.io_merged_recursive", -+ .name = BFQ_CGROUP_FNAME(io_merged_recursive), - .private = offsetof(struct bfq_group, stats.merged), - .seq_show = bfqg_print_rwstat_recursive, - }, - { -- .name = "bfq.io_queued_recursive", -+ .name = BFQ_CGROUP_FNAME(io_queued_recursive), - .private = offsetof(struct bfq_group, stats.queued), - .seq_show = bfqg_print_rwstat_recursive, - }, - { -- .name = "bfq.avg_queue_size", -+ .name = BFQ_CGROUP_FNAME(avg_queue_size), - .seq_show = bfqg_print_avg_queue_size, - }, - { -- .name = "bfq.group_wait_time", -+ .name = BFQ_CGROUP_FNAME(group_wait_time), - .private = offsetof(struct bfq_group, stats.group_wait_time), - .seq_show = bfqg_print_stat, - }, - { -- .name = "bfq.idle_time", -+ .name = BFQ_CGROUP_FNAME(idle_time), - .private = offsetof(struct bfq_group, stats.idle_time), - .seq_show = bfqg_print_stat, - }, - { -- .name = "bfq.empty_time", -+ .name = BFQ_CGROUP_FNAME(empty_time), - .private = offsetof(struct bfq_group, stats.empty_time), - .seq_show = bfqg_print_stat, - }, - { -- .name = "bfq.dequeue", -+ .name = BFQ_CGROUP_FNAME(dequeue), - .private = offsetof(struct bfq_group, stats.dequeue), - .seq_show = bfqg_print_stat, - }, -@@ -1113,7 +1119,7 @@ static struct cftype bfq_blkcg_legacy_files[] = { - - static struct cftype bfq_blkg_files[] = { - { -- .name = "bfq.weight", -+ .name = BFQ_CGROUP_FNAME(weight), - .flags = CFTYPE_NOT_ON_ROOT, - .seq_show = bfq_io_show_weight, - .write = bfq_io_set_weight, -@@ -1121,6 +1127,8 @@ static struct cftype bfq_blkg_files[] = { - {} /* terminate */ - }; - -+#undef BFQ_CGROUP_FNAME -+ - #else /* BFQ_GROUP_IOSCHED_ENABLED */ - - static inline void bfqg_stats_update_io_add(struct bfq_group *bfqg, - -From abdf7565dadbb00e78be5f4fb2cc9b157649840e Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Fri, 12 May 2017 11:56:13 +0200 -Subject: [PATCH 26/51] Add tentative extra tests on groups, reqs and queues - -Signed-off-by: Paolo Valente ---- - block/bfq-cgroup-included.c | 1 + - block/bfq-mq-iosched.c | 5 +++++ - include/linux/blkdev.h | 2 ++ - 3 files changed, 8 insertions(+) - -diff --git a/block/bfq-cgroup-included.c b/block/bfq-cgroup-included.c -index 9e9b0a09e26f..72107ad12220 100644 ---- a/block/bfq-cgroup-included.c -+++ b/block/bfq-cgroup-included.c -@@ -412,6 +412,7 @@ static void bfq_pd_init(struct blkg_policy_data *pd) - BUG_ON(!blkg); - bfqg = blkg_to_bfqg(blkg); - bfqd = blkg->q->elevator->elevator_data; -+ BUG_ON(bfqg == bfqd->root_group); - entity = &bfqg->entity; - d = blkcg_to_bfqgd(blkg->blkcg); - -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index 3ae9bd424b3f..a9e3406fef06 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -4494,6 +4494,7 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, - static void __bfq_insert_request(struct bfq_data *bfqd, struct request *rq) - { - struct bfq_queue *bfqq = RQ_BFQQ(rq), *new_bfqq; -+ BUG_ON(!bfqq); - - assert_spin_locked(&bfqd->lock); - -@@ -4587,6 +4588,9 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, - "insert_request %p in disp: at_head %d", - rq, at_head); - } else { -+ BUG_ON(!(rq->rq_flags & RQF_GOT)); -+ rq->rq_flags &= ~RQF_GOT; -+ - __bfq_insert_request(bfqd, rq); - - if (rq_mergeable(rq)) { -@@ -4974,6 +4978,7 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq, - if (unlikely(bfq_bfqq_just_created(bfqq))) - bfq_handle_burst(bfqd, bfqq); - -+ rq->rq_flags |= RQF_GOT; - spin_unlock_irq(&bfqd->lock); - - return 0; -diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h -index 0048e59e6d07..9ae814743095 100644 ---- a/include/linux/blkdev.h -+++ b/include/linux/blkdev.h -@@ -123,6 +123,8 @@ typedef __u32 __bitwise req_flags_t; - #define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18)) - /* DEBUG: rq in bfq-mq dispatch list */ - #define RQF_DISP_LIST ((__force req_flags_t)(1 << 19)) -+/* DEBUG: rq had get_rq_private executed on it */ -+#define RQF_GOT ((__force req_flags_t)(1 << 20)) - - /* flags that prevent us from merging requests: */ - #define RQF_NOMERGE_FLAGS \ - -From 9e1c1514bc947c4e04502331372b1cc58459d8d1 Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Mon, 15 May 2017 22:25:03 +0200 -Subject: [PATCH 27/51] block, bfq-mq: access and cache blkg data only when - safe - -In blk-cgroup, operations on blkg objects are protected with the -request_queue lock. This is no more the lock that protects -I/O-scheduler operations in blk-mq. In fact, the latter are now -protected with a finer-grained per-scheduler-instance lock. As a -consequence, although blkg lookups are also rcu-protected, blk-mq I/O -schedulers may see inconsistent data when they access blkg and -blkg-related objects. BFQ does access these objects, and does incur -this problem, in the following case. - -The blkg_lookup performed in bfq_get_queue, being protected (only) -through rcu, may happen to return the address of a copy of the -original blkg. If this is the case, then the blkg_get performed in -bfq_get_queue, to pin down the blkg, is useless: it does not prevent -blk-cgroup code from destroying both the original blkg and all objects -directly or indirectly referred by the copy of the blkg. BFQ accesses -these objects, which typically causes a crash for NULL-pointer -dereference of memory-protection violation. - -Some additional protection mechanism should be added to blk-cgroup to -address this issue. In the meantime, this commit provides a quick -temporary fix for BFQ: cache (when safe) blkg data that might -disappear right after a blkg_lookup. - -In particular, this commit exploits the following facts to achieve its -goal without introducing further locks. Destroy operations on a blkg -invoke, as a first step, hooks of the scheduler associated with the -blkg. And these hooks are executed with bfqd->lock held for BFQ. As a -consequence, for any blkg associated with the request queue an -instance of BFQ is attached to, we are guaranteed that such a blkg is -not destroyed, and that all the pointers it contains are consistent, -while that instance is holding its bfqd->lock. A blkg_lookup performed -with bfqd->lock held then returns a fully consistent blkg, which -remains consistent until this lock is held. In more detail, this holds -even if the returned blkg is a copy of the original one. - -Finally, also the object describing a group inside BFQ needs to be -protected from destruction on the blkg_free of the original blkg -(which invokes bfq_pd_free). This commit adds private refcounting for -this object, to let it disappear only after no bfq_queue refers to it -any longer. - -This commit also removes or updates some stale comments on locking -issues related to blk-cgroup operations. - -Reported-by: Tomas Konir -Reported-by: Lee Tibbert -Reported-by: Marco Piazza -Signed-off-by: Paolo Valente -Tested-by: Tomas Konir -Tested-by: Lee Tibbert -Tested-by: Marco Piazza ---- - block/bfq-cgroup-included.c | 149 ++++++++++++++++++++++++++++++++++++++++---- - block/bfq-mq-iosched.c | 2 +- - block/bfq-mq.h | 26 +++----- - 3 files changed, 148 insertions(+), 29 deletions(-) - -diff --git a/block/bfq-cgroup-included.c b/block/bfq-cgroup-included.c -index 72107ad12220..d903393ee78a 100644 ---- a/block/bfq-cgroup-included.c -+++ b/block/bfq-cgroup-included.c -@@ -43,7 +43,11 @@ BFQG_FLAG_FNS(idling) - BFQG_FLAG_FNS(empty) - #undef BFQG_FLAG_FNS - -+#ifdef BFQ_MQ -+/* This should be called with the scheduler lock held. */ -+#else - /* This should be called with the queue_lock held. */ -+#endif - static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats) - { - unsigned long long now; -@@ -58,7 +62,11 @@ static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats) - bfqg_stats_clear_waiting(stats); - } - -+#ifdef BFQ_MQ -+/* This should be called with the scheduler lock held. */ -+#else - /* This should be called with the queue_lock held. */ -+#endif - static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg, - struct bfq_group *curr_bfqg) - { -@@ -72,7 +80,11 @@ static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg, - bfqg_stats_mark_waiting(stats); - } - -+#ifdef BFQ_MQ -+/* This should be called with the scheduler lock held. */ -+#else - /* This should be called with the queue_lock held. */ -+#endif - static void bfqg_stats_end_empty_time(struct bfqg_stats *stats) - { - unsigned long long now; -@@ -198,14 +210,43 @@ static struct bfq_group *bfqq_group(struct bfq_queue *bfqq) - - static void bfqg_get(struct bfq_group *bfqg) - { -- return blkg_get(bfqg_to_blkg(bfqg)); -+#ifdef BFQ_MQ -+ bfqg->ref++; -+#else -+ blkg_get(bfqg_to_blkg(bfqg)); -+#endif - } - - static void bfqg_put(struct bfq_group *bfqg) - { -- return blkg_put(bfqg_to_blkg(bfqg)); -+#ifdef BFQ_MQ -+ bfqg->ref--; -+ -+ BUG_ON(bfqg->ref < 0); -+ if (bfqg->ref == 0) -+ kfree(bfqg); -+#else -+ blkg_put(bfqg_to_blkg(bfqg)); -+#endif -+} -+ -+#ifdef BFQ_MQ -+static void bfqg_and_blkg_get(struct bfq_group *bfqg) -+{ -+ /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */ -+ bfqg_get(bfqg); -+ -+ blkg_get(bfqg_to_blkg(bfqg)); - } - -+static void bfqg_and_blkg_put(struct bfq_group *bfqg) -+{ -+ bfqg_put(bfqg); -+ -+ blkg_put(bfqg_to_blkg(bfqg)); -+} -+#endif -+ - static void bfqg_stats_update_io_add(struct bfq_group *bfqg, - struct bfq_queue *bfqq, - unsigned int op) -@@ -310,7 +351,15 @@ static void bfq_init_entity(struct bfq_entity *entity, - if (bfqq) { - bfqq->ioprio = bfqq->new_ioprio; - bfqq->ioprio_class = bfqq->new_ioprio_class; -+#ifdef BFQ_MQ -+ /* -+ * Make sure that bfqg and its associated blkg do not -+ * disappear before entity. -+ */ -+ bfqg_and_blkg_get(bfqg); -+#else - bfqg_get(bfqg); -+#endif - } - entity->parent = bfqg->my_entity; /* NULL for root group */ - entity->sched_data = &bfqg->sched_data; -@@ -397,6 +446,10 @@ static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node) - return NULL; - } - -+#ifdef BFQ_MQ -+ /* see comments in bfq_bic_update_cgroup for why refcounting */ -+ bfqg_get(bfqg); -+#endif - return &bfqg->pd; - } - -@@ -432,7 +485,11 @@ static void bfq_pd_free(struct blkg_policy_data *pd) - struct bfq_group *bfqg = pd_to_bfqg(pd); - - bfqg_stats_exit(&bfqg->stats); -- return kfree(bfqg); -+#ifdef BFQ_MQ -+ bfqg_put(bfqg); -+#else -+ kfree(bfqg); -+#endif - } - - static void bfq_pd_reset_stats(struct blkg_policy_data *pd) -@@ -516,9 +573,16 @@ static void bfq_bfqq_expire(struct bfq_data *bfqd, - * Move @bfqq to @bfqg, deactivating it from its old group and reactivating - * it on the new one. Avoid putting the entity on the old group idle tree. - * -+#ifdef BFQ_MQ -+ * Must be called under the scheduler lock, to make sure that the blkg -+ * owning @bfqg does not disappear (see comments in -+ * bfq_bic_update_cgroup on guaranteeing the consistency of blkg -+ * objects). -+#else - * Must be called under the queue lock; the cgroup owning @bfqg must - * not disappear (by now this just means that we are called under - * rcu_read_lock()). -+#endif - */ - static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, - struct bfq_group *bfqg) -@@ -555,16 +619,20 @@ static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, - entity->tree); - bfq_put_idle_entity(bfq_entity_service_tree(entity), entity); - } -+#ifdef BFQ_MQ -+ bfqg_and_blkg_put(bfqq_group(bfqq)); -+#else - bfqg_put(bfqq_group(bfqq)); -+#endif - -- /* -- * Here we use a reference to bfqg. We don't need a refcounter -- * as the cgroup reference will not be dropped, so that its -- * destroy() callback will not be invoked. -- */ - entity->parent = bfqg->my_entity; - entity->sched_data = &bfqg->sched_data; -+#ifdef BFQ_MQ -+ /* pin down bfqg and its associated blkg */ -+ bfqg_and_blkg_get(bfqg); -+#else - bfqg_get(bfqg); -+#endif - - BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_bfqq_busy(bfqq)); - if (bfq_bfqq_busy(bfqq)) { -@@ -585,8 +653,14 @@ static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, - * @bic: the bic to move. - * @blkcg: the blk-cgroup to move to. - * -+#ifdef BFQ_MQ -+ * Move bic to blkcg, assuming that bfqd->lock is held; which makes -+ * sure that the reference to cgroup is valid across the call (see -+ * comments in bfq_bic_update_cgroup on this issue) -+#else - * Move bic to blkcg, assuming that bfqd->queue is locked; the caller - * has to make sure that the reference to cgroup is valid across the call. -+#endif - * - * NOTE: an alternative approach might have been to store the current - * cgroup in bfqq and getting a reference to it, reducing the lookup -@@ -645,6 +719,59 @@ static void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) - goto out; - - bfqg = __bfq_bic_change_cgroup(bfqd, bic, bio_blkcg(bio)); -+#ifdef BFQ_MQ -+ /* -+ * Update blkg_path for bfq_log_* functions. We cache this -+ * path, and update it here, for the following -+ * reasons. Operations on blkg objects in blk-cgroup are -+ * protected with the request_queue lock, and not with the -+ * lock that protects the instances of this scheduler -+ * (bfqd->lock). This exposes BFQ to the following sort of -+ * race. -+ * -+ * The blkg_lookup performed in bfq_get_queue, protected -+ * through rcu, may happen to return the address of a copy of -+ * the original blkg. If this is the case, then the -+ * bfqg_and_blkg_get performed in bfq_get_queue, to pin down -+ * the blkg, is useless: it does not prevent blk-cgroup code -+ * from destroying both the original blkg and all objects -+ * directly or indirectly referred by the copy of the -+ * blkg. -+ * -+ * On the bright side, destroy operations on a blkg invoke, as -+ * a first step, hooks of the scheduler associated with the -+ * blkg. And these hooks are executed with bfqd->lock held for -+ * BFQ. As a consequence, for any blkg associated with the -+ * request queue this instance of the scheduler is attached -+ * to, we are guaranteed that such a blkg is not destroyed, and -+ * that all the pointers it contains are consistent, while we -+ * are holding bfqd->lock. A blkg_lookup performed with -+ * bfqd->lock held then returns a fully consistent blkg, which -+ * remains consistent until this lock is held. -+ * -+ * Thanks to the last fact, and to the fact that: (1) bfqg has -+ * been obtained through a blkg_lookup in the above -+ * assignment, and (2) bfqd->lock is being held, here we can -+ * safely use the policy data for the involved blkg (i.e., the -+ * field bfqg->pd) to get to the blkg associated with bfqg, -+ * and then we can safely use any field of blkg. After we -+ * release bfqd->lock, even just getting blkg through this -+ * bfqg may cause dangling references to be traversed, as -+ * bfqg->pd may not exist any more. -+ * -+ * In view of the above facts, here we cache, in the bfqg, any -+ * blkg data we may need for this bic, and for its associated -+ * bfq_queue. As of now, we need to cache only the path of the -+ * blkg, which is used in the bfq_log_* functions. -+ * -+ * Finally, note that bfqg itself needs to be protected from -+ * destruction on the blkg_free of the original blkg (which -+ * invokes bfq_pd_free). We use an additional private -+ * refcounter for bfqg, to let it disappear only after no -+ * bfq_queue refers to it any longer. -+ */ -+ blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path)); -+#endif - bic->blkcg_serial_nr = serial_nr; - out: - rcu_read_unlock(); -@@ -682,8 +809,6 @@ static void bfq_reparent_leaf_entity(struct bfq_data *bfqd, - * @bfqd: the device data structure with the root group. - * @bfqg: the group to move from. - * @st: the service tree with the entities. -- * -- * Needs queue_lock to be taken and reference to be valid over the call. - */ - static void bfq_reparent_active_entities(struct bfq_data *bfqd, - struct bfq_group *bfqg, -@@ -736,6 +861,7 @@ static void bfq_pd_offline(struct blkg_policy_data *pd) - #ifdef BFQ_MQ - spin_lock_irqsave(&bfqd->lock, flags); - #endif -+ - /* - * Empty all service_trees belonging to this group before - * deactivating the group itself. -@@ -746,8 +872,7 @@ static void bfq_pd_offline(struct blkg_policy_data *pd) - /* - * The idle tree may still contain bfq_queues belonging - * to exited task because they never migrated to a different -- * cgroup from the one being destroyed now. No one else -- * can access them so it's safe to act without any lock. -+ * cgroup from the one being destroyed now. - */ - bfq_flush_idle_tree(st); - -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index a9e3406fef06..4eb668eeacdc 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -4073,7 +4073,7 @@ static void bfq_put_queue(struct bfq_queue *bfqq) - - kmem_cache_free(bfq_pool, bfqq); - #ifdef BFQ_GROUP_IOSCHED_ENABLED -- bfqg_put(bfqg); -+ bfqg_and_blkg_put(bfqg); - #endif - } - -diff --git a/block/bfq-mq.h b/block/bfq-mq.h -index 36ee24a87dda..77ab0f22ed22 100644 ---- a/block/bfq-mq.h -+++ b/block/bfq-mq.h -@@ -695,23 +695,17 @@ static struct bfq_group *bfqq_group(struct bfq_queue *bfqq); - static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg); - - #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \ -- char __pbuf[128]; \ -- \ -- blkg_path(bfqg_to_blkg(bfqq_group(bfqq)), __pbuf, sizeof(__pbuf)); \ - pr_crit("%s bfq%d%c %s " fmt "\n", \ - checked_dev_name((bfqd)->queue->backing_dev_info->dev), \ - (bfqq)->pid, \ - bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \ -- __pbuf, ##args); \ -+ bfqq_group(bfqq)->blkg_path, ##args); \ - } while (0) - - #define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \ -- char __pbuf[128]; \ -- \ -- blkg_path(bfqg_to_blkg(bfqg), __pbuf, sizeof(__pbuf)); \ - pr_crit("%s %s " fmt "\n", \ - checked_dev_name((bfqd)->queue->backing_dev_info->dev), \ -- __pbuf, ##args); \ -+ bfqg->blkg_path, ##args); \ - } while (0) - - #else /* BFQ_GROUP_IOSCHED_ENABLED */ -@@ -736,20 +730,14 @@ static struct bfq_group *bfqq_group(struct bfq_queue *bfqq); - static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg); - - #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \ -- char __pbuf[128]; \ -- \ -- blkg_path(bfqg_to_blkg(bfqq_group(bfqq)), __pbuf, sizeof(__pbuf)); \ - blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s " fmt, \ - (bfqq)->pid, \ - bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \ -- __pbuf, ##args); \ -+ bfqq_group(bfqq)->blkg_path, ##args); \ - } while (0) - - #define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \ -- char __pbuf[128]; \ -- \ -- blkg_path(bfqg_to_blkg(bfqg), __pbuf, sizeof(__pbuf)); \ -- blk_add_trace_msg((bfqd)->queue, "%s " fmt, __pbuf, ##args); \ -+ blk_add_trace_msg((bfqd)->queue, "%s " fmt, bfqg->blkg_path, ##args);\ - } while (0) - - #else /* BFQ_GROUP_IOSCHED_ENABLED */ -@@ -860,6 +848,12 @@ struct bfq_group { - /* must be the first member */ - struct blkg_policy_data pd; - -+ /* cached path for this blkg (see comments in bfq_bic_update_cgroup) */ -+ char blkg_path[128]; -+ -+ /* reference counter (see comments in bfq_bic_update_cgroup) */ -+ int ref; -+ - struct bfq_entity entity; - struct bfq_sched_data sched_data; - - -From c9137b749aceef6c2dde88e99b2fc978d5952e76 Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Sat, 17 Jun 2017 11:18:11 +0200 -Subject: [PATCH 28/51] bfq-mq: fix macro name in conditional invocation of - policy_unregister - -This commit fixes the name of the macro in the conditional group that -invokes blkcg_policy_unregister in bfq_exit for bfq-mq. Because of -this error, blkcg_policy_unregister was never invoked. - -Signed-off-by: Paolo Valente ---- - block/bfq-mq-iosched.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index 4eb668eeacdc..bc1de3f70ea8 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -5669,7 +5669,7 @@ static int __init bfq_init(void) - static void __exit bfq_exit(void) - { - elv_unregister(&iosched_bfq_mq); --#ifdef CONFIG_BFQ_GROUP_ENABLED -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - blkcg_policy_unregister(&blkcg_policy_bfq); - #endif - bfq_slab_kill(); - -From c7ceb37496f63b2dba4d06946ab85ec97b87bfb5 Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Wed, 5 Jul 2017 11:48:17 +0200 -Subject: [PATCH 29/51] Port of "blk-mq-sched: unify request finished methods" - -No need to have two different callouts of bfq vs kyber. - -Signed-off-by: Christoph Hellwig -Signed-off-by: Jens Axboe ---- - block/bfq-mq-iosched.c | 6 +++--- - 1 file changed, 3 insertions(+), 3 deletions(-) - -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index bc1de3f70ea8..2598602a0b10 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -4753,7 +4753,7 @@ static void bfq_put_rq_priv_body(struct bfq_queue *bfqq) - bfq_put_queue(bfqq); - } - --static void bfq_put_rq_private(struct request_queue *q, struct request *rq) -+static void bfq_finish_request(struct request *rq) - { - struct bfq_queue *bfqq; - struct bfq_data *bfqd; -@@ -4814,7 +4814,7 @@ static void bfq_put_rq_private(struct request_queue *q, struct request *rq) - - assert_spin_locked(&bfqd->lock); - if (!RB_EMPTY_NODE(&rq->rb_node)) -- bfq_remove_request(q, rq); -+ bfq_remove_request(rq->q, rq); - bfq_put_rq_priv_body(bfqq); - } - -@@ -5558,7 +5558,7 @@ static struct elv_fs_entry bfq_attrs[] = { - static struct elevator_type iosched_bfq_mq = { - .ops.mq = { - .get_rq_priv = bfq_get_rq_private, -- .put_rq_priv = bfq_put_rq_private, -+ .finish_request = bfq_finish_request, - .exit_icq = bfq_exit_icq, - .insert_requests = bfq_insert_requests, - .dispatch_request = bfq_dispatch_request, - -From 12bef026fe114ab5e2e284772ddc52a8be83fdbc Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Wed, 5 Jul 2017 11:54:57 +0200 -Subject: [PATCH 30/51] Port of "bfq-iosched: fix NULL ioc check in - bfq_get_rq_private" - -icq_to_bic is a container_of operation, so we need to check for NULL -before it. Also move the check outside the spinlock while we're at -it. - -Signed-off-by: Christoph Hellwig -Signed-off-by: Jens Axboe ---- - block/bfq-mq-iosched.c | 15 +++++---------- - 1 file changed, 5 insertions(+), 10 deletions(-) - -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index 2598602a0b10..c57774a60911 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -4903,16 +4903,17 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq, - struct bio *bio) - { - struct bfq_data *bfqd = q->elevator->elevator_data; -- struct bfq_io_cq *bic = icq_to_bic(rq->elv.icq); -+ struct bfq_io_cq *bic; - const int is_sync = rq_is_sync(rq); - struct bfq_queue *bfqq; - bool bfqq_already_existing = false, split = false; - bool new_queue = false; - -- spin_lock_irq(&bfqd->lock); -+ if (!rq->elv.icq) -+ return 1; -+ bic = icq_to_bic(rq->elv.icq); - -- if (!bic) -- goto queue_fail; -+ spin_lock_irq(&bfqd->lock); - - bfq_check_ioprio_change(bic, bio); - -@@ -4980,13 +4981,7 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq, - - rq->rq_flags |= RQF_GOT; - spin_unlock_irq(&bfqd->lock); -- - return 0; -- --queue_fail: -- spin_unlock_irq(&bfqd->lock); -- -- return 1; - } - - static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq) - -From 633e5711347df1bf4ca935fd0aa9118a0054f75d Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Wed, 5 Jul 2017 12:02:16 +0200 -Subject: [PATCH 31/51] Port of "blk-mq-sched: unify request prepare methods" - -This patch makes sure we always allocate requests in the core blk-mq -code and use a common prepare_request method to initialize them for -both mq I/O schedulers. For Kyber and additional limit_depth method -is added that is called before allocating the request. - -Also because none of the intializations can really fail the new method -does not return an error - instead the bfq finish method is hardened -to deal with the no-IOC case. - -Last but not least this removes the abuse of RQF_QUEUE by the blk-mq -scheduling code as RQF_ELFPRIV is all that is needed now. - -Signed-off-by: Christoph Hellwig -Signed-off-by: Jens Axboe ---- - block/bfq-mq-iosched.c | 13 ++++++++----- - 1 file changed, 8 insertions(+), 5 deletions(-) - -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index c57774a60911..49ffca1ad6e7 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -4760,6 +4760,10 @@ static void bfq_finish_request(struct request *rq) - struct bfq_io_cq *bic; - - BUG_ON(!rq); -+ -+ if (!rq->elv.icq) -+ return; -+ - bfqq = RQ_BFQQ(rq); - BUG_ON(!bfqq); - -@@ -4899,9 +4903,9 @@ static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd, - /* - * Allocate bfq data structures associated with this request. - */ --static int bfq_get_rq_private(struct request_queue *q, struct request *rq, -- struct bio *bio) -+static void bfq_prepare_request(struct request *rq, struct bio *bio) - { -+ struct request_queue *q = rq->q; - struct bfq_data *bfqd = q->elevator->elevator_data; - struct bfq_io_cq *bic; - const int is_sync = rq_is_sync(rq); -@@ -4910,7 +4914,7 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq, - bool new_queue = false; - - if (!rq->elv.icq) -- return 1; -+ return; - bic = icq_to_bic(rq->elv.icq); - - spin_lock_irq(&bfqd->lock); -@@ -4981,7 +4985,6 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq, - - rq->rq_flags |= RQF_GOT; - spin_unlock_irq(&bfqd->lock); -- return 0; - } - - static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq) -@@ -5552,7 +5555,7 @@ static struct elv_fs_entry bfq_attrs[] = { - - static struct elevator_type iosched_bfq_mq = { - .ops.mq = { -- .get_rq_priv = bfq_get_rq_private, -+ .prepare_request = bfq_prepare_request, - .finish_request = bfq_finish_request, - .exit_icq = bfq_exit_icq, - .insert_requests = bfq_insert_requests, - -From 5a321acfce282c3e58ac63582faf6f928ad17f27 Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Wed, 5 Jul 2017 12:43:22 +0200 -Subject: [PATCH 32/51] Add list of bfq instances to documentation - -Signed-off-by: Paolo Valente ---- - Documentation/block/bfq-iosched.txt | 11 ++++++++++- - 1 file changed, 10 insertions(+), 1 deletion(-) - -diff --git a/Documentation/block/bfq-iosched.txt b/Documentation/block/bfq-iosched.txt -index 3d6951d63489..8ce6b9a9bacd 100644 ---- a/Documentation/block/bfq-iosched.txt -+++ b/Documentation/block/bfq-iosched.txt -@@ -11,6 +11,15 @@ controllers), BFQ's main features are: - groups (switching back to time distribution when needed to keep - throughput high). - -+If bfq-mq patches have been applied, then the following three -+instances of BFQ are available (otherwise only the first instance): -+- bfq: mainline version of BFQ, for blk-mq -+- bfq-mq: development version of BFQ for blk-mq; this version contains -+ also all latest features not yet landed in mainline, plus many -+ safety checks -+- bfq: BFQ for legacy blk; also this version contains both latest -+ features and safety checks -+ - In its default configuration, BFQ privileges latency over - throughput. So, when needed for achieving a lower latency, BFQ builds - schedules that may lead to a lower throughput. If your main or only -@@ -27,7 +36,7 @@ sequential I/O (e.g., 8-12 GB/s if I/O requests are 256 KB large), and - to 120-200 MB/s with 4KB random I/O. BFQ is currently being tested on - multi-queue devices too. - --The table of contents follow. Impatients can just jump to Section 3. -+The table of contents follows. Impatients can just jump to Section 3. - - CONTENTS - - -From 9f2e5b27227fd9254cc258572dc2d4531838c30b Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Wed, 5 Jul 2017 16:28:00 +0200 -Subject: [PATCH 33/51] bfq-sq: fix prefix of names of cgroups parameters - -Signed-off-by: Paolo Valente ---- - Documentation/block/bfq-iosched.txt | 12 +++++++----- - block/bfq-cgroup-included.c | 2 +- - 2 files changed, 8 insertions(+), 6 deletions(-) - -diff --git a/Documentation/block/bfq-iosched.txt b/Documentation/block/bfq-iosched.txt -index 8ce6b9a9bacd..965d82f94db9 100644 ---- a/Documentation/block/bfq-iosched.txt -+++ b/Documentation/block/bfq-iosched.txt -@@ -503,10 +503,12 @@ To get proportional sharing of bandwidth with BFQ for a given device, - BFQ must of course be the active scheduler for that device. - - Within each group directory, the names of the files associated with --BFQ-specific cgroup parameters and stats begin with the "bfq." --prefix. So, with cgroups-v1 or cgroups-v2, the full prefix for --BFQ-specific files is "blkio.bfq." or "io.bfq." For example, the group --parameter to set the weight of a group with BFQ is blkio.bfq.weight -+BFQ-specific cgroup parameters and stats begin with the "bfq.", -+"bfq-sq." or "bfq-mq." prefix, depending on which instance of bfq you -+want to use. So, with cgroups-v1 or cgroups-v2, the full prefix for -+BFQ-specific files is "blkio.bfqX." or "io.bfqX.", where X can be "" -+(i.e., null string), "-sq" or "-mq". For example, the group parameter -+to set the weight of a group with the mainline BFQ is blkio.bfq.weight - or io.bfq.weight. - - Parameters to set -@@ -514,7 +516,7 @@ Parameters to set - - For each group, there is only the following parameter to set. - --weight (namely blkio.bfq.weight or io.bfq-weight): the weight of the -+weight (namely blkio.bfqX.weight or io.bfqX.weight): the weight of the - group inside its parent. Available values: 1..10000 (default 100). The - linear mapping between ioprio and weights, described at the beginning - of the tunable section, is still valid, but all weights higher than -diff --git a/block/bfq-cgroup-included.c b/block/bfq-cgroup-included.c -index d903393ee78a..631e53d9150d 100644 ---- a/block/bfq-cgroup-included.c -+++ b/block/bfq-cgroup-included.c -@@ -1124,7 +1124,7 @@ bfq_create_group_hierarchy(struct bfq_data *bfqd, int node) - #ifdef BFQ_MQ - #define BFQ_CGROUP_FNAME(param) "bfq-mq."#param - #else --#define BFQ_CGROUP_FNAME(param) "bfq."#param -+#define BFQ_CGROUP_FNAME(param) "bfq-sq."#param - #endif - - static struct cftype bfq_blkcg_legacy_files[] = { - -From 92b42df8166939ccf26aa450125b5b575cf6d505 Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Wed, 5 Jul 2017 21:08:32 +0200 -Subject: [PATCH 34/51] Add to documentation that bfq-mq and bfq-sq contain - last fixes too - -Signed-off-by: Paolo Valente ---- - Documentation/block/bfq-iosched.txt | 6 +++--- - 1 file changed, 3 insertions(+), 3 deletions(-) - -diff --git a/Documentation/block/bfq-iosched.txt b/Documentation/block/bfq-iosched.txt -index 965d82f94db9..0e59f1c9d30e 100644 ---- a/Documentation/block/bfq-iosched.txt -+++ b/Documentation/block/bfq-iosched.txt -@@ -15,10 +15,10 @@ If bfq-mq patches have been applied, then the following three - instances of BFQ are available (otherwise only the first instance): - - bfq: mainline version of BFQ, for blk-mq - - bfq-mq: development version of BFQ for blk-mq; this version contains -- also all latest features not yet landed in mainline, plus many -+ also all latest features and fixes not yet landed in mainline, plus many - safety checks --- bfq: BFQ for legacy blk; also this version contains both latest -- features and safety checks -+- bfq: BFQ for legacy blk; also this version contains latest features -+ and fixes, as well as safety checks - - In its default configuration, BFQ privileges latency over - throughput. So, when needed for achieving a lower latency, BFQ builds - -From 7f9bdd433b848d4f53c167258bf4d0b3f1ae1923 Mon Sep 17 00:00:00 2001 -From: Lee Tibbert -Date: Wed, 19 Jul 2017 10:28:32 -0400 -Subject: [PATCH 35/51] Improve most frequently used no-logging path - -This patch originated as a fix for compiler unused-variable warnings -issued when compiling bfq-mq with logging disabled (both -CONFIG_BLK_DEV_IO_TRACE and CONFIG_BFQ_REDIRECT_TO_CONSOLE -undefined). - -It turns out to also have benefits for the bfq-sq path as well. - -In most performance sensitive production builds blktrace_api logging -will probably be turned off, so it is worth making the no-logging path -compile without warnings. Any performance benefit is a bonus. - -Thank you to T. B. on the bfq-iosched@googlegroups.com list -for ((void) (bfqq)) simplification/suggestion/improvement. All bugs -and unclear descriptions are my own doing. - -The discussion below is based on the gcc compiler with optimization -level of at least 02. Lower optimization levels are unlikely to -remove no-op instruction equivalents. - -Provide three improvements in this likely case. - - 1) Fix multiple occurrences of an unused-variable warning - issued when compiling bfq-mq with no logging. The warning - occurred each time the bfq_log_bfqg macro was expanded inside - a code block such as the following snippet from - block/bfq-sched.c, line 139 and few following, lightly edited for - indentation in order to pass checkpatch.pl maximum line lengths. - -else { - struct bfq_group *bfqg = - container_of(next_in_service, - struct bfq_group, entity); - - bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg, - "update_next_in_service: chosen this entity"); - } - - Previously bfq-mq.h expanded bfq_log_bfqg to blk_add_trace_msg. - When both bfq console logging and blktrace_api logging are - disabled, include/linux/blktrace_api expands to - do { } while (0), leaving the code block local variable unused. - - bfq_log_bfqq() had similar behavior but is never called with - a potentially unused variable. This patch fixes that macro for - consistency. - - bfq-sq.h (single queue) with blktrace_api enabled, and the bfq - console logging macros have code paths which not trigger this - warning. - - kernel.org (4.12 & 4.13) bfq (bfq-iosched.h) could trigger - the warning but no code does so now. This patch fixes - bfq-iosched.h for consistency. - - The style above enables a software engineering approach where - complex expressions are moved to a local variable before the - bfq_log* call. This makes it easier to read the expression and - use breakpoints to verify it. bfq-mq uses this approach in - several places. - - New bfq_log* macros are provided for the no-logging case. - I touch only the second argument, because current code never - uses the local variable approach with the first or other - arguments. I tried to balance consistency with simplicity. - - 2) For bfq-sq, reduce to zero, the number of instructions executed - when no logging is configured. No sense marshaling arguments - which are never going to be used. - - On a trial V8R11 builds, this reduced the size of bfq-iosched.o - by 14.3 KiB. The size went from 70304 to 55664 bytes. - - bfq-mq and kernel.org bfq code size does not change because - existing macros already optimize to zero bytes when not logging. - The current changes maintains consistency with the bfq-sq path - and makes the bfq-mq & bfq no-logging paths resistant to future - logging path macro changes which might cause generated code. - - 3) Slightly reduce compile time of all bfq variants by including - blktrace_api.h only when it will be used. - -Signed-off-by: Lee Tibbert ---- - block/bfq-mq.h | 18 +++++++++++++++++- - block/bfq.h | 18 +++++++++++++++++- - 2 files changed, 34 insertions(+), 2 deletions(-) - -diff --git a/block/bfq-mq.h b/block/bfq-mq.h -index 77ab0f22ed22..7ed2cc29be57 100644 ---- a/block/bfq-mq.h -+++ b/block/bfq-mq.h -@@ -15,7 +15,6 @@ - #ifndef _BFQ_H - #define _BFQ_H - --#include - #include - #include - -@@ -725,6 +724,21 @@ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg); - ##args) - - #else /* CONFIG_BFQ_REDIRECT_TO_CONSOLE */ -+ -+#if !defined(CONFIG_BLK_DEV_IO_TRACE) -+ -+/* Avoid possible "unused-variable" warning. See commit message. */ -+ -+#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) ((void) (bfqq)) -+ -+#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) ((void) (bfqg)) -+ -+#define bfq_log(bfqd, fmt, args...) do {} while (0) -+ -+#else /* CONFIG_BLK_DEV_IO_TRACE */ -+ -+#include -+ - #ifdef BFQ_GROUP_IOSCHED_ENABLED - static struct bfq_group *bfqq_group(struct bfq_queue *bfqq); - static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg); -@@ -752,6 +766,8 @@ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg); - - #define bfq_log(bfqd, fmt, args...) \ - blk_add_trace_msg((bfqd)->queue, "bfq " fmt, ##args) -+ -+#endif /* CONFIG_BLK_DEV_IO_TRACE */ - #endif /* CONFIG_BFQ_REDIRECT_TO_CONSOLE */ - - /* Expiration reasons. */ -diff --git a/block/bfq.h b/block/bfq.h -index 53954d1b87f8..15d326f466b7 100644 ---- a/block/bfq.h -+++ b/block/bfq.h -@@ -15,7 +15,6 @@ - #ifndef _BFQ_H - #define _BFQ_H - --#include - #include - #include - -@@ -725,6 +724,21 @@ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg); - ##args) - - #else /* CONFIG_BFQ_REDIRECT_TO_CONSOLE */ -+ -+#if !defined(CONFIG_BLK_DEV_IO_TRACE) -+ -+/* Avoid possible "unused-variable" warning. See commit message. */ -+ -+#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) ((void) (bfqq)) -+ -+#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) ((void) (bfqg)) -+ -+#define bfq_log(bfqd, fmt, args...) do {} while (0) -+ -+#else /* CONFIG_BLK_DEV_IO_TRACE */ -+ -+#include -+ - #ifdef BFQ_GROUP_IOSCHED_ENABLED - static struct bfq_group *bfqq_group(struct bfq_queue *bfqq); - static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg); -@@ -759,6 +773,8 @@ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg); - - #define bfq_log(bfqd, fmt, args...) \ - blk_add_trace_msg((bfqd)->queue, "bfq " fmt, ##args) -+ -+#endif /* CONFIG_BLK_DEV_IO_TRACE */ - #endif /* CONFIG_BFQ_REDIRECT_TO_CONSOLE */ - - /* Expiration reasons. */ - -From f11a0e751e741bf94c6a48234824d50b3c0100ad Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Wed, 9 Aug 2017 16:40:39 +0200 -Subject: [PATCH 36/51] bfq-sq: fix commit "Remove all get and put of I/O - contexts" in branch bfq-mq - -The commit "Remove all get and put of I/O contexts" erroneously removed -the reset of the field in_service_bic for bfq-sq. This commit re-adds -that missing reset. - -Signed-off-by: Paolo Valente ---- - block/bfq-sched.c | 7 +++++++ - block/bfq-sq-iosched.c | 1 + - 2 files changed, 8 insertions(+) - -diff --git a/block/bfq-sched.c b/block/bfq-sched.c -index 9c4e6797d8c9..7425824c26b8 100644 ---- a/block/bfq-sched.c -+++ b/block/bfq-sched.c -@@ -1904,6 +1904,13 @@ static void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd) - struct bfq_entity *in_serv_entity = &in_serv_bfqq->entity; - struct bfq_entity *entity = in_serv_entity; - -+#ifndef BFQ_MQ -+ if (bfqd->in_service_bic) { -+ put_io_context(bfqd->in_service_bic->icq.ioc); -+ bfqd->in_service_bic = NULL; -+ } -+#endif -+ - bfq_clear_bfqq_wait_request(in_serv_bfqq); - hrtimer_try_to_cancel(&bfqd->idle_slice_timer); - bfqd->in_service_queue = NULL; -diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c -index 25da0d1c0622..e1960bf149d8 100644 ---- a/block/bfq-sq-iosched.c -+++ b/block/bfq-sq-iosched.c -@@ -3765,6 +3765,7 @@ static int bfq_dispatch_request(struct bfq_data *bfqd, - if (!bfqd->in_service_bic) { - atomic_long_inc(&RQ_BIC(rq)->icq.ioc->refcount); - bfqd->in_service_bic = RQ_BIC(rq); -+ BUG_ON(!bfqd->in_service_bic); - } - - if (bfqd->busy_queues > 1 && bfq_class_idle(bfqq)) - -From eceae5457530df8598557767d7be258ca9384de4 Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Wed, 9 Aug 2017 22:29:01 +0200 -Subject: [PATCH 37/51] bfq-sq-mq: make lookup_next_entity push up vtime on - expirations - -To provide a very smooth service, bfq starts to serve a bfq_queue -only if the queue is 'eligible', i.e., if the same queue would -have started to be served in the ideal, perfectly fair system that -bfq simulates internally. This is obtained by associating each -queue with a virtual start time, and by computing a special system -virtual time quantity: a queue is eligible only if the system -virtual time has reached the virtual start time of the -queue. Finally, bfq guarantees that, when a new queue must be set -in service, there is always at least one eligible entity for each -active parent entity in the scheduler. To provide this guarantee, -the function __bfq_lookup_next_entity pushes up, for each parent -entity on which it is invoked, the system virtual time to the -minimum among the virtual start times of the entities in the -active tree for the parent entity (more precisely, the push up -occurs if the system virtual time happens to be lower than all -such virtual start times). - -There is however a circumstance in which __bfq_lookup_next_entity -cannot push up the system virtual time for a parent entity, even -if the system virtual time is lower than the virtual start times -of all the child entities in the active tree. It happens if one of -the child entities is in service. In fact, in such a case, there -is already an eligible entity, the in-service one, even if it may -not be not present in the active tree (because in-service entities -may be removed from the active tree). - -Unfortunately, in the last re-design of the -hierarchical-scheduling engine, the reset of the pointer to the -in-service entity for a given parent entity--reset to be done as a -consequence of the expiration of the in-service entity--always -happens after the function __bfq_lookup_next_entity has been -invoked. This causes the function to think that there is still an -entity in service for the parent entity, and then that the system -virtual time cannot be pushed up, even if actually such a -no-more-in-service entity has already been properly reinserted -into the active tree (or in some other tree if no more -active). Yet, the system virtual time *had* to be pushed up, to be -ready to correctly choose the next queue to serve. Because of the -lack of this push up, bfq may wrongly set in service a queue that -had been speculatively pre-computed as the possible -next-in-service queue, but that would no more be the one to serve -after the expiration and the reinsertion into the active trees of -the previously in-service entities. - -This commit addresses this issue by making -__bfq_lookup_next_entity properly push up the system virtual time -if an expiration is occurring. - -Signed-off-by: Paolo Valente ---- - block/bfq-mq-iosched.c | 4 +-- - block/bfq-sched.c | 77 ++++++++++++++++++++++++++++++++------------------ - block/bfq-sq-iosched.c | 4 +-- - 3 files changed, 53 insertions(+), 32 deletions(-) - -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index 49ffca1ad6e7..b5c848650375 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -682,7 +682,7 @@ static void bfq_updated_next_req(struct bfq_data *bfqd, - entity->budget = new_budget; - bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu", - new_budget); -- bfq_requeue_bfqq(bfqd, bfqq); -+ bfq_requeue_bfqq(bfqd, bfqq, false); - } - } - -@@ -2822,7 +2822,7 @@ static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq) - - bfq_del_bfqq_busy(bfqd, bfqq, true); - } else { -- bfq_requeue_bfqq(bfqd, bfqq); -+ bfq_requeue_bfqq(bfqd, bfqq, true); - /* - * Resort priority tree of potential close cooperators. - */ -diff --git a/block/bfq-sched.c b/block/bfq-sched.c -index 7425824c26b8..f3001af37256 100644 ---- a/block/bfq-sched.c -+++ b/block/bfq-sched.c -@@ -33,7 +33,8 @@ static struct bfq_entity *bfq_root_active_entity(struct rb_root *tree) - return rb_entry(node, struct bfq_entity, rb_node); - } - --static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd); -+static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd, -+ bool expiration); - - static bool bfq_update_parent_budget(struct bfq_entity *next_in_service); - -@@ -43,6 +44,8 @@ static bool bfq_update_parent_budget(struct bfq_entity *next_in_service); - * @new_entity: if not NULL, pointer to the entity whose activation, - * requeueing or repositionig triggered the invocation of - * this function. -+ * @expiration: id true, this function is being invoked after the -+ * expiration of the in-service entity - * - * This function is called to update sd->next_in_service, which, in - * its turn, may change as a consequence of the insertion or -@@ -61,7 +64,8 @@ static bool bfq_update_parent_budget(struct bfq_entity *next_in_service); - * entity. - */ - static bool bfq_update_next_in_service(struct bfq_sched_data *sd, -- struct bfq_entity *new_entity) -+ struct bfq_entity *new_entity, -+ bool expiration) - { - struct bfq_entity *next_in_service = sd->next_in_service; - struct bfq_queue *bfqq; -@@ -120,7 +124,7 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd, - if (replace_next) - next_in_service = new_entity; - } else /* invoked because of a deactivation: lookup needed */ -- next_in_service = bfq_lookup_next_entity(sd); -+ next_in_service = bfq_lookup_next_entity(sd, expiration); - - if (next_in_service) { - parent_sched_may_change = !sd->next_in_service || -@@ -1291,10 +1295,12 @@ static void __bfq_activate_requeue_entity(struct bfq_entity *entity, - * @requeue: true if this is a requeue, which implies that bfqq is - * being expired; thus ALL its ancestors stop being served and must - * therefore be requeued -+ * @expiration: true if this function is being invoked in the expiration path -+ * of the in-service queue - */ - static void bfq_activate_requeue_entity(struct bfq_entity *entity, - bool non_blocking_wait_rq, -- bool requeue) -+ bool requeue, bool expiration) - { - struct bfq_sched_data *sd; - -@@ -1307,7 +1313,8 @@ static void bfq_activate_requeue_entity(struct bfq_entity *entity, - RB_EMPTY_ROOT(&(sd->service_tree+1)->active) && - RB_EMPTY_ROOT(&(sd->service_tree+2)->active)); - -- if (!bfq_update_next_in_service(sd, entity) && !requeue) { -+ if (!bfq_update_next_in_service(sd, entity, expiration) && -+ !requeue) { - BUG_ON(!sd->next_in_service); - break; - } -@@ -1373,6 +1380,8 @@ static bool __bfq_deactivate_entity(struct bfq_entity *entity, - * bfq_deactivate_entity - deactivate an entity representing a bfq_queue. - * @entity: the entity to deactivate. - * @ins_into_idle_tree: true if the entity can be put into the idle tree -+ * @expiration: true if this function is being invoked in the expiration path -+ * of the in-service queue - */ - static void bfq_deactivate_entity(struct bfq_entity *entity, - bool ins_into_idle_tree, -@@ -1417,7 +1426,7 @@ static void bfq_deactivate_entity(struct bfq_entity *entity, - * then, since entity has just been - * deactivated, a new one must be found. - */ -- bfq_update_next_in_service(sd, NULL); -+ bfq_update_next_in_service(sd, NULL, expiration); - - if (sd->next_in_service || sd->in_service_entity) { - /* -@@ -1495,7 +1504,7 @@ static void bfq_deactivate_entity(struct bfq_entity *entity, - "invoking udpdate_next for this entity"); - } - #endif -- if (!bfq_update_next_in_service(sd, entity) && -+ if (!bfq_update_next_in_service(sd, entity, expiration) && - !expiration) - /* - * next_in_service unchanged or not causing -@@ -1524,7 +1533,7 @@ static u64 bfq_calc_vtime_jump(struct bfq_service_tree *st) - if (bfqq) - bfq_log_bfqq(bfqq->bfqd, bfqq, - "calc_vtime_jump: new value %llu", -- root_entity->min_start); -+ ((root_entity->min_start>>10)*1000)>>12); - #ifdef BFQ_GROUP_IOSCHED_ENABLED - else { - struct bfq_group *bfqg = -@@ -1533,7 +1542,7 @@ static u64 bfq_calc_vtime_jump(struct bfq_service_tree *st) - - bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg, - "calc_vtime_jump: new value %llu", -- root_entity->min_start); -+ ((root_entity->min_start>>10)*1000)>>12); - } - #endif - return root_entity->min_start; -@@ -1615,17 +1624,9 @@ static struct bfq_entity *bfq_first_active_entity(struct bfq_service_tree *st, - * 3) is idle. - */ - static struct bfq_entity * --__bfq_lookup_next_entity(struct bfq_service_tree *st, bool in_service --#if 0 -- , bool force --#endif -- ) -+__bfq_lookup_next_entity(struct bfq_service_tree *st, bool in_service) - { -- struct bfq_entity *entity --#if 0 -- , *new_next_in_service = NULL --#endif -- ; -+ struct bfq_entity *entity; - u64 new_vtime; - struct bfq_queue *bfqq; - -@@ -1667,8 +1668,9 @@ __bfq_lookup_next_entity(struct bfq_service_tree *st, bool in_service - container_of(entity, struct bfq_group, entity); - - bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg, -- "__lookup_next: start %llu vtime %llu st %p", -+ "__lookup_next: start %llu vtime %llu (%llu) st %p", - ((entity->start>>10)*1000)>>12, -+ ((st->vtime>>10)*1000)>>12, - ((new_vtime>>10)*1000)>>12, st); - } - #endif -@@ -1681,12 +1683,14 @@ __bfq_lookup_next_entity(struct bfq_service_tree *st, bool in_service - /** - * bfq_lookup_next_entity - return the first eligible entity in @sd. - * @sd: the sched_data. -+ * @expiration: true if we are on the expiration path of the in-service queue - * - * This function is invoked when there has been a change in the trees -- * for sd, and we need know what is the new next entity after this -- * change. -+ * for sd, and we need to know what is the new next entity to serve -+ * after this change. - */ --static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd) -+static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd, -+ bool expiration) - { - struct bfq_service_tree *st = sd->service_tree; - struct bfq_service_tree *idle_class_st = st + (BFQ_IOPRIO_CLASSES - 1); -@@ -1716,8 +1720,24 @@ static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd) - * class, unless the idle class needs to be served. - */ - for (; class_idx < BFQ_IOPRIO_CLASSES; class_idx++) { -+ /* -+ * If expiration is true, then bfq_lookup_next_entity -+ * is being invoked as a part of the expiration path -+ * of the in-service queue. In this case, even if -+ * sd->in_service_entity is not NULL, -+ * sd->in_service_entiy at this point is actually not -+ * in service any more, and, if needed, has already -+ * been properly queued or requeued into the right -+ * tree. The reason why sd->in_service_entity is still -+ * not NULL here, even if expiration is true, is that -+ * sd->in_service_entiy is reset as a last step in the -+ * expiration path. So, if expiration is true, tell -+ * __bfq_lookup_next_entity that there is no -+ * sd->in_service_entity. -+ */ - entity = __bfq_lookup_next_entity(st + class_idx, -- sd->in_service_entity); -+ sd->in_service_entity && -+ !expiration); - - if (entity) - break; -@@ -1891,7 +1911,7 @@ static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd) - for_each_entity(entity) { - struct bfq_sched_data *sd = entity->sched_data; - -- if(!bfq_update_next_in_service(sd, NULL)) -+ if (!bfq_update_next_in_service(sd, NULL, false)) - break; - } - -@@ -1951,16 +1971,17 @@ static void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) - entity->on_st); - - bfq_activate_requeue_entity(entity, bfq_bfqq_non_blocking_wait_rq(bfqq), -- false); -+ false, false); - bfq_clear_bfqq_non_blocking_wait_rq(bfqq); - } - --static void bfq_requeue_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) -+static void bfq_requeue_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, -+ bool expiration) - { - struct bfq_entity *entity = &bfqq->entity; - - bfq_activate_requeue_entity(entity, false, -- bfqq == bfqd->in_service_queue); -+ bfqq == bfqd->in_service_queue, expiration); - } - - static void bfqg_stats_update_dequeue(struct bfq_group *bfqg); -diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c -index e1960bf149d8..42393ab889a9 100644 ---- a/block/bfq-sq-iosched.c -+++ b/block/bfq-sq-iosched.c -@@ -644,7 +644,7 @@ static void bfq_updated_next_req(struct bfq_data *bfqd, - entity->budget = new_budget; - bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu", - new_budget); -- bfq_requeue_bfqq(bfqd, bfqq); -+ bfq_requeue_bfqq(bfqd, bfqq, false); - } - } - -@@ -2715,7 +2715,7 @@ static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq) - - bfq_del_bfqq_busy(bfqd, bfqq, true); - } else { -- bfq_requeue_bfqq(bfqd, bfqq); -+ bfq_requeue_bfqq(bfqd, bfqq, true); - /* - * Resort priority tree of potential close cooperators. - */ - -From ee9f95b24e1d88ffba4845981c2a4684aefd0245 Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Wed, 9 Aug 2017 22:53:00 +0200 -Subject: [PATCH 38/51] bfq-sq-mq: remove direct switch to an entity in higher - class - -If the function bfq_update_next_in_service is invoked as a consequence -of the activation or requeueing of an entity, say E, and finds out -that E belongs to a higher-priority class than that of the current -next-in-service entity, then it sets next_in_service directly to -E. But this may lead to anomalous schedules, because E may happen not -be eligible for service, because its virtual start time is higher than -the system virtual time for its service tree. - -This commit addresses this issue by simply removing this direct -switch. - -Signed-off-by: Paolo Valente ---- - block/bfq-sched.c | 19 +++++-------------- - 1 file changed, 5 insertions(+), 14 deletions(-) - -diff --git a/block/bfq-sched.c b/block/bfq-sched.c -index f3001af37256..b1a59088db88 100644 ---- a/block/bfq-sched.c -+++ b/block/bfq-sched.c -@@ -76,9 +76,8 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd, - * or repositiong of an entity that does not coincide with - * sd->next_in_service, then a full lookup in the active tree - * can be avoided. In fact, it is enough to check whether the -- * just-modified entity has a higher priority than -- * sd->next_in_service, or, even if it has the same priority -- * as sd->next_in_service, is eligible and has a lower virtual -+ * just-modified entity has the same priority as -+ * sd->next_in_service, is eligible and has a lower virtual - * finish time than sd->next_in_service. If this compound - * condition holds, then the new entity becomes the new - * next_in_service. Otherwise no change is needed. -@@ -94,9 +93,8 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd, - - /* - * If there is already a next_in_service candidate -- * entity, then compare class priorities or timestamps -- * to decide whether to replace sd->service_tree with -- * new_entity. -+ * entity, then compare timestamps to decide whether -+ * to replace sd->service_tree with new_entity. - */ - if (next_in_service) { - unsigned int new_entity_class_idx = -@@ -104,10 +102,6 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd, - struct bfq_service_tree *st = - sd->service_tree + new_entity_class_idx; - -- /* -- * For efficiency, evaluate the most likely -- * sub-condition first. -- */ - replace_next = - (new_entity_class_idx == - bfq_class_idx(next_in_service) -@@ -115,10 +109,7 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd, - !bfq_gt(new_entity->start, st->vtime) - && - bfq_gt(next_in_service->finish, -- new_entity->finish)) -- || -- new_entity_class_idx < -- bfq_class_idx(next_in_service); -+ new_entity->finish)); - } - - if (replace_next) - -From a3fdc5af40537355b68c1f0d3997c5a5fb54b9ce Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Thu, 10 Aug 2017 08:15:50 +0200 -Subject: [PATCH 39/51] bfq-sq-mq: guarantee update_next_in_service always - returns an eligible entity - -If the function bfq_update_next_in_service is invoked as a consequence -of the activation or requeueing of an entity, say E, then it doesn't -invoke bfq_lookup_next_entity to get the next-in-service entity. In -contrast, it follows a shorter path: if E happens to be eligible (see -commit "bfq-sq-mq: make lookup_next_entity push up vtime on -expirations" for details on eligibility) and to have a lower virtual -finish time than the current candidate as next-in-service entity, then -E directly becomes the next-in-service entity. Unfortunately, there is -a corner case for which this shorter path makes -bfq_update_next_in_service choose a non eligible entity: it occurs if -both E and the current next-in-service entity happen to be non -eligible when bfq_update_next_in_service is invoked. In this case, E -is not set as next-in-service, and, since bfq_lookup_next_entity is -not invoked, the state of the parent entity is not updated so as to -end up with an eligible entity as the proper next-in-service entity. - -In this respect, next-in-service is actually allowed to be non -eligible while some queue is in service: since no system-virtual-time -push-up can be performed in that case (see again commit "bfq-sq-mq: -make lookup_next_entity push up vtime on expirations" for details), -next-in-service is chosen, speculatively, as a function of the -possible value that the system virtual time may get after a push -up. But the correctness of the schedule breaks if next-in-service is -still a non eligible entity when it is time to set in service the next -entity. Unfortunately, this may happen in the above corner case. - -This commit fixes this problem by making bfq_update_next_in_service -invoke bfq_lookup_next_entity not only if the above shorter path -cannot be taken, but also if the shorter path is taken but fails to -yield an eligible next-in-service entity. - -Signed-off-by: Paolo Valente ---- - block/bfq-sched.c | 38 ++++++++++++++++++++++++++++---------- - 1 file changed, 28 insertions(+), 10 deletions(-) - -diff --git a/block/bfq-sched.c b/block/bfq-sched.c -index b1a59088db88..e4a2553a2d2c 100644 ---- a/block/bfq-sched.c -+++ b/block/bfq-sched.c -@@ -70,6 +70,7 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd, - struct bfq_entity *next_in_service = sd->next_in_service; - struct bfq_queue *bfqq; - bool parent_sched_may_change = false; -+ bool change_without_lookup = false; - - /* - * If this update is triggered by the activation, requeueing -@@ -89,7 +90,7 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd, - * set to true, and left as true if - * sd->next_in_service is NULL. - */ -- bool replace_next = true; -+ change_without_lookup = true; - - /* - * If there is already a next_in_service candidate -@@ -102,7 +103,7 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd, - struct bfq_service_tree *st = - sd->service_tree + new_entity_class_idx; - -- replace_next = -+ change_without_lookup = - (new_entity_class_idx == - bfq_class_idx(next_in_service) - && -@@ -112,15 +113,32 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd, - new_entity->finish)); - } - -- if (replace_next) -+ if (change_without_lookup) { - next_in_service = new_entity; -- } else /* invoked because of a deactivation: lookup needed */ -+ bfqq = bfq_entity_to_bfqq(next_in_service); -+ -+ if (bfqq) -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "update_next_in_service: chose without lookup"); -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ else { -+ struct bfq_group *bfqg = -+ container_of(next_in_service, -+ struct bfq_group, entity); -+ -+ bfq_log_bfqg((struct bfq_data*)bfqg->bfqd, bfqg, -+ "update_next_in_service: chose without lookup"); -+ } -+#endif -+ } -+ } -+ -+ if (!change_without_lookup) /* lookup needed */ - next_in_service = bfq_lookup_next_entity(sd, expiration); - -- if (next_in_service) { -+ if (next_in_service) - parent_sched_may_change = !sd->next_in_service || - bfq_update_parent_budget(next_in_service); -- } - - sd->next_in_service = next_in_service; - -@@ -1053,7 +1071,7 @@ static void bfq_update_fin_time_enqueue(struct bfq_entity *entity, - - if (bfqq) { - bfq_log_bfqq(bfqq->bfqd, bfqq, -- "__activate_entity: new queue finish %llu", -+ "update_fin_time_enqueue: new queue finish %llu", - ((entity->finish>>10)*1000)>>12); - #ifdef BFQ_GROUP_IOSCHED_ENABLED - } else { -@@ -1061,7 +1079,7 @@ static void bfq_update_fin_time_enqueue(struct bfq_entity *entity, - container_of(entity, struct bfq_group, entity); - - bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg, -- "__activate_entity: new group finish %llu", -+ "update_fin_time_enqueue: new group finish %llu", - ((entity->finish>>10)*1000)>>12); - #endif - } -@@ -1071,7 +1089,7 @@ static void bfq_update_fin_time_enqueue(struct bfq_entity *entity, - - if (bfqq) { - bfq_log_bfqq(bfqq->bfqd, bfqq, -- "__activate_entity: queue %seligible in st %p", -+ "update_fin_time_enqueue: queue %seligible in st %p", - entity->start <= st->vtime ? "" : "non ", st); - #ifdef BFQ_GROUP_IOSCHED_ENABLED - } else { -@@ -1079,7 +1097,7 @@ static void bfq_update_fin_time_enqueue(struct bfq_entity *entity, - container_of(entity, struct bfq_group, entity); - - bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg, -- "__activate_entity: group %seligible in st %p", -+ "update_fin_time_enqueue: group %seligible in st %p", - entity->start <= st->vtime ? "" : "non ", st); - #endif - } - -From 6565e4d1aac029b6f0a5d86a4c6ef38608838eac Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Thu, 31 Aug 2017 19:24:26 +0200 -Subject: [PATCH 40/51] doc, block, bfq: fix some typos and stale sentences - -Signed-off-by: Paolo Valente -Reviewed-by: Jeremy Hickman -Reviewed-by: Laurentiu Nicola ---- - Documentation/block/bfq-iosched.txt | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/Documentation/block/bfq-iosched.txt b/Documentation/block/bfq-iosched.txt -index 0e59f1c9d30e..dcfe15523da3 100644 ---- a/Documentation/block/bfq-iosched.txt -+++ b/Documentation/block/bfq-iosched.txt -@@ -17,7 +17,7 @@ instances of BFQ are available (otherwise only the first instance): - - bfq-mq: development version of BFQ for blk-mq; this version contains - also all latest features and fixes not yet landed in mainline, plus many - safety checks --- bfq: BFQ for legacy blk; also this version contains latest features -+- bfq-sq: BFQ for legacy blk; also this version contains latest features - and fixes, as well as safety checks - - In its default configuration, BFQ privileges latency over - -From 261ee8cc9f43e03d790a07184f0bcaa504ee6737 Mon Sep 17 00:00:00 2001 -From: Luca Miccio -Date: Wed, 13 Sep 2017 12:03:56 +0200 -Subject: [PATCH 41/51] bfq-mq, bfq-sq: Disable writeback throttling - -Similarly to CFQ, BFQ has its write-throttling heuristics, and it -is better not to combine them with further write-throttling -heuristics of a different nature. -So this commit disables write-back throttling for a device if BFQ -is used as I/O scheduler for that device. - -Signed-off-by: Luca Miccio -Signed-off-by: Paolo Valente -Tested-by: Oleksandr Natalenko ---- - block/bfq-mq-iosched.c | 2 ++ - block/bfq-sq-iosched.c | 7 +++++++ - 2 files changed, 9 insertions(+) - -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index b5c848650375..7d27d5b3befb 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -89,6 +89,7 @@ - #include "blk-mq-tag.h" - #include "blk-mq-sched.h" - #include "bfq-mq.h" -+#include "blk-wbt.h" - - /* Expiration time of sync (0) and async (1) requests, in ns. */ - static const u64 bfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 }; -@@ -5260,6 +5261,7 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) - bfq_init_root_group(bfqd->root_group, bfqd); - bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group); - -+ wbt_disable_default(q); - return 0; - - out_free: -diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c -index 42393ab889a9..6fdc3b1d5bb8 100644 ---- a/block/bfq-sq-iosched.c -+++ b/block/bfq-sq-iosched.c -@@ -83,6 +83,7 @@ - #include - #include "blk.h" - #include "bfq.h" -+#include "blk-wbt.h" - - /* Expiration time of sync (0) and async (1) requests, in ns. */ - static const u64 bfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 }; -@@ -4976,6 +4977,11 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) - return -ENOMEM; - } - -+static void bfq_registered_queue(struct request_queue *q) -+{ -+ wbt_disable_default(q); -+} -+ - static void bfq_slab_kill(void) - { - kmem_cache_destroy(bfq_pool); -@@ -5285,6 +5291,7 @@ static struct elevator_type iosched_bfq = { - .elevator_may_queue_fn = bfq_may_queue, - .elevator_init_fn = bfq_init_queue, - .elevator_exit_fn = bfq_exit_queue, -+ .elevator_registered_fn = bfq_registered_queue, - }, - .icq_size = sizeof(struct bfq_io_cq), - .icq_align = __alignof__(struct bfq_io_cq), - -From 40ea0aed088791da27fcfa51f3b64d1f96b0d06e Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Tue, 12 Sep 2017 16:45:53 +0200 -Subject: [PATCH 42/51] bfq-mq, bfq-sq: fix wrong init of saved start time for - weight raising - -This commit fixes a bug that causes bfq to fail to guarantee a high -responsiveness on some drives, if there is heavy random read+write I/O -in the background. More precisely, such a failure allowed this bug to -be found [1], but the bug may well cause other yet unreported -anomalies. - -BFQ raises the weight of the bfq_queues associated with soft real-time -applications, to privilege the I/O, and thus reduce latency, for these -applications. This mechanism is named soft-real-time weight raising in -BFQ. A soft real-time period may happen to be nested into an -interactive weight raising period, i.e., it may happen that, when a -bfq_queue switches to a soft real-time weight-raised state, the -bfq_queue is already being weight-raised because deemed interactive -too. In this case, BFQ saves in a special variable -wr_start_at_switch_to_srt, the time instant when the interactive -weight-raising period started for the bfq_queue, i.e., the time -instant when BFQ started to deem the bfq_queue interactive. This value -is then used to check whether the interactive weight-raising period -would still be in progress when the soft real-time weight-raising -period ends. If so, interactive weight raising is restored for the -bfq_queue. This restore is useful, in particular, because it prevents -bfq_queues from losing their interactive weight raising prematurely, -as a consequence of spurious, short-lived soft real-time -weight-raising periods caused by wrong detections as soft real-time. - -If, instead, a bfq_queue switches to soft-real-time weight raising -while it *is not* already in an interactive weight-raising period, -then the variable wr_start_at_switch_to_srt has no meaning during the -following soft real-time weight-raising period. Unfortunately the -handling of this case is wrong in BFQ: not only the variable is not -flagged somehow as meaningless, but it is also set to the time when -the switch to soft real-time weight-raising occurs. This may cause an -interactive weight-raising period to be considered mistakenly as still -in progress, and thus a spurious interactive weight-raising period to -start for the bfq_queue, at the end of the soft-real-time -weight-raising period. In particular the spurious interactive -weight-raising period will be considered as still in progress, if the -soft-real-time weight-raising period does not last very long. The -bfq_queue will then be wrongly privileged and, if I/O bound, will -unjustly steal bandwidth to truly interactive or soft real-time -bfq_queues, harming responsiveness and low latency. - -This commit fixes this issue by just setting wr_start_at_switch_to_srt -to minus infinity (farthest past time instant according to jiffies -macros): when the soft-real-time weight-raising period ends, certainly -no interactive weight-raising period will be considered as still in -progress. - -[1] Background I/O Type: Random - Background I/O mix: Reads and writes -- Application to start: LibreOffice Writer in -http://www.phoronix.com/scan.php?page=news_item&px=Linux-4.13-IO-Laptop - -Signed-off-by: Paolo Valente -Signed-off-by: Angelo Ruocco -Tested-by: Oleksandr Natalenko -Tested-by: Lee Tibbert -Tested-by: Mirko Montanari ---- - block/bfq-mq-iosched.c | 50 +++++++++++++++++++++++++++++++------------------- - block/bfq-sq-iosched.c | 50 +++++++++++++++++++++++++++++++------------------- - 2 files changed, 62 insertions(+), 38 deletions(-) - -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index 7d27d5b3befb..f378519b6d33 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -1204,6 +1204,24 @@ static bool bfq_bfqq_update_budg_for_activation(struct bfq_data *bfqd, - return wr_or_deserves_wr; - } - -+/* -+ * Return the farthest future time instant according to jiffies -+ * macros. -+ */ -+static unsigned long bfq_greatest_from_now(void) -+{ -+ return jiffies + MAX_JIFFY_OFFSET; -+} -+ -+/* -+ * Return the farthest past time instant according to jiffies -+ * macros. -+ */ -+static unsigned long bfq_smallest_from_now(void) -+{ -+ return jiffies - MAX_JIFFY_OFFSET; -+} -+ - static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd, - struct bfq_queue *bfqq, - unsigned int old_wr_coeff, -@@ -1218,7 +1236,19 @@ static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd, - bfqq->wr_coeff = bfqd->bfq_wr_coeff; - bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); - } else { -- bfqq->wr_start_at_switch_to_srt = jiffies; -+ /* -+ * No interactive weight raising in progress -+ * here: assign minus infinity to -+ * wr_start_at_switch_to_srt, to make sure -+ * that, at the end of the soft-real-time -+ * weight raising periods that is starting -+ * now, no interactive weight-raising period -+ * may be wrongly considered as still in -+ * progress (and thus actually started by -+ * mistake). -+ */ -+ bfqq->wr_start_at_switch_to_srt = -+ bfq_smallest_from_now(); - bfqq->wr_coeff = bfqd->bfq_wr_coeff * - BFQ_SOFTRT_WEIGHT_FACTOR; - bfqq->wr_cur_max_time = -@@ -3174,24 +3204,6 @@ static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd, - jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4); - } - --/* -- * Return the farthest future time instant according to jiffies -- * macros. -- */ --static unsigned long bfq_greatest_from_now(void) --{ -- return jiffies + MAX_JIFFY_OFFSET; --} -- --/* -- * Return the farthest past time instant according to jiffies -- * macros. -- */ --static unsigned long bfq_smallest_from_now(void) --{ -- return jiffies - MAX_JIFFY_OFFSET; --} -- - /** - * bfq_bfqq_expire - expire a queue. - * @bfqd: device owning the queue. -diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c -index 6fdc3b1d5bb8..f4654436cd55 100644 ---- a/block/bfq-sq-iosched.c -+++ b/block/bfq-sq-iosched.c -@@ -1165,6 +1165,24 @@ static bool bfq_bfqq_update_budg_for_activation(struct bfq_data *bfqd, - return wr_or_deserves_wr; - } - -+/* -+ * Return the farthest future time instant according to jiffies -+ * macros. -+ */ -+static unsigned long bfq_greatest_from_now(void) -+{ -+ return jiffies + MAX_JIFFY_OFFSET; -+} -+ -+/* -+ * Return the farthest past time instant according to jiffies -+ * macros. -+ */ -+static unsigned long bfq_smallest_from_now(void) -+{ -+ return jiffies - MAX_JIFFY_OFFSET; -+} -+ - static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd, - struct bfq_queue *bfqq, - unsigned int old_wr_coeff, -@@ -1179,7 +1197,19 @@ static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd, - bfqq->wr_coeff = bfqd->bfq_wr_coeff; - bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); - } else { -- bfqq->wr_start_at_switch_to_srt = jiffies; -+ /* -+ * No interactive weight raising in progress -+ * here: assign minus infinity to -+ * wr_start_at_switch_to_srt, to make sure -+ * that, at the end of the soft-real-time -+ * weight raising periods that is starting -+ * now, no interactive weight-raising period -+ * may be wrongly considered as still in -+ * progress (and thus actually started by -+ * mistake). -+ */ -+ bfqq->wr_start_at_switch_to_srt = -+ bfq_smallest_from_now(); - bfqq->wr_coeff = bfqd->bfq_wr_coeff * - BFQ_SOFTRT_WEIGHT_FACTOR; - bfqq->wr_cur_max_time = -@@ -3067,24 +3097,6 @@ static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd, - jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4); - } - --/* -- * Return the farthest future time instant according to jiffies -- * macros. -- */ --static unsigned long bfq_greatest_from_now(void) --{ -- return jiffies + MAX_JIFFY_OFFSET; --} -- --/* -- * Return the farthest past time instant according to jiffies -- * macros. -- */ --static unsigned long bfq_smallest_from_now(void) --{ -- return jiffies - MAX_JIFFY_OFFSET; --} -- - /** - * bfq_bfqq_expire - expire a queue. - * @bfqd: device owning the queue. - -From 9dbea44b6f721baeff35b9fdf628ec55fe00e09d Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Thu, 14 Sep 2017 05:12:58 -0400 -Subject: [PATCH 43/51] Fix commit "Unnest request-queue and ioc locks from - scheduler locks" - -The commit "Unnest request-queue and ioc locks from scheduler locks" -mistakenly removed the setting of the split flag in function -bfq_prepare_request. This commit puts this missing instruction back in -its place. - -Signed-off-by: Paolo Valente ---- - block/bfq-mq-iosched.c | 12 ++++++++++++ - 1 file changed, 12 insertions(+) - -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index f378519b6d33..288078e68a2a 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -744,6 +744,12 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd, - bfqq->wr_cur_max_time = bic->saved_wr_cur_max_time; - BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish)); - -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "[%s] bic %p wr_coeff %d start_finish %lu max_time %lu", -+ __func__, -+ bic, bfqq->wr_coeff, bfqq->last_wr_start_finish, -+ bfqq->wr_cur_max_time); -+ - if (bfqq->wr_coeff > 1 && (bfq_bfqq_in_large_burst(bfqq) || - time_is_before_jiffies(bfqq->last_wr_start_finish + - bfqq->wr_cur_max_time))) { -@@ -2208,6 +2214,11 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq) - bic->saved_last_wr_start_finish = bfqq->last_wr_start_finish; - bic->saved_wr_cur_max_time = bfqq->wr_cur_max_time; - BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish)); -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "[%s] bic %p wr_coeff %d start_finish %lu max_time %lu", -+ __func__, -+ bic, bfqq->wr_coeff, bfqq->last_wr_start_finish, -+ bfqq->wr_cur_max_time); - } - - static void -@@ -4950,6 +4961,7 @@ static void bfq_prepare_request(struct request *rq, struct bio *bio) - bic->saved_in_large_burst = true; - - bfqq = bfq_split_bfqq(bic, bfqq); -+ split = true; - - if (!bfqq) - bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, - -From d4ebb2a66a23dc183792088c521f2be2193b56db Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Fri, 15 Sep 2017 01:53:51 -0400 -Subject: [PATCH 44/51] bfq-sq, bfq-mq: check and switch back to interactive wr - also on queue split - -As already explained in the message of commit "bfq-mq, bfq-sq: fix -wrong init of saved start time for weight raising", if a soft -real-time weight-raising period happens to be nested in a larger -interactive weight-raising period, then BFQ restores the interactive -weight raising at the end of the soft real-time weight raising. In -particular, BFQ checks whether the latter has ended only on request -dispatches. - -Unfortunately, the above scheme fails to restore interactive weight -raising in the following corner case: if a bfq_queue, say Q, -1) Is merged with another bfq_queue while it is in a nested soft -real-time weight-raising period. The weight-raising state of Q is -then saved, and not considered any longer until a split occurs. -2) Is split from the other bfq_queue(s) at a time instant when its -soft real-time weight raising is already finished. -On the split, while resuming the previous, soft real-time -weight-raised state of the bfq_queue Q, BFQ checks whether the -current soft real-time weight-raising period is actually over. If so, -BFQ switches weight raising off for Q, *without* checking whether the -soft real-time period was actually nested in a non-yet-finished -interactive weight-raising period. - -This commit addresses this issue by adding the above missing check in -bfq_queue splits, and restoring interactive weight raising if needed. - -Signed-off-by: Paolo Valente -Tested-by: Angelo Ruocco -Tested-by: Mirko Montanari ---- - block/bfq-mq-iosched.c | 29 +++++++++++++++++++++-------- - block/bfq-sq-iosched.c | 35 +++++++++++++++++++++++++++-------- - 2 files changed, 48 insertions(+), 16 deletions(-) - -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index 288078e68a2a..6130a95c6497 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -716,6 +716,15 @@ static unsigned int bfq_wr_duration(struct bfq_data *bfqd) - return dur; - } - -+/* switch back from soft real-time to interactive weight raising */ -+static void switch_back_to_interactive_wr(struct bfq_queue *bfqq, -+ struct bfq_data *bfqd) -+{ -+ bfqq->wr_coeff = bfqd->bfq_wr_coeff; -+ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); -+ bfqq->last_wr_start_finish = bfqq->wr_start_at_switch_to_srt; -+} -+ - static void - bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd, - struct bfq_io_cq *bic, bool bfq_already_existing) -@@ -753,12 +762,20 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd, - if (bfqq->wr_coeff > 1 && (bfq_bfqq_in_large_burst(bfqq) || - time_is_before_jiffies(bfqq->last_wr_start_finish + - bfqq->wr_cur_max_time))) { -- bfq_log_bfqq(bfqq->bfqd, bfqq, -+ if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time && -+ !bfq_bfqq_in_large_burst(bfqq) && -+ time_is_after_eq_jiffies(bfqq->wr_start_at_switch_to_srt + -+ bfq_wr_duration(bfqd))) { -+ switch_back_to_interactive_wr(bfqq, bfqd); -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "resume state: switching back to interactive"); -+ } else { -+ bfqq->wr_coeff = 1; -+ bfq_log_bfqq(bfqq->bfqd, bfqq, - "resume state: switching off wr (%lu + %lu < %lu)", - bfqq->last_wr_start_finish, bfqq->wr_cur_max_time, - jiffies); -- -- bfqq->wr_coeff = 1; -+ } - } - - /* make sure weight will be updated, however we got here */ -@@ -3820,11 +3837,7 @@ static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq) - bfq_wr_duration(bfqd))) - bfq_bfqq_end_wr(bfqq); - else { -- /* switch back to interactive wr */ -- bfqq->wr_coeff = bfqd->bfq_wr_coeff; -- bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); -- bfqq->last_wr_start_finish = -- bfqq->wr_start_at_switch_to_srt; -+ switch_back_to_interactive_wr(bfqq, bfqd); - BUG_ON(time_is_after_jiffies( - bfqq->last_wr_start_finish)); - bfqq->entity.prio_changed = 1; -diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c -index f4654436cd55..e07d5d1c0d40 100644 ---- a/block/bfq-sq-iosched.c -+++ b/block/bfq-sq-iosched.c -@@ -678,6 +678,15 @@ static unsigned int bfq_wr_duration(struct bfq_data *bfqd) - return dur; - } - -+/* switch back from soft real-time to interactive weight raising */ -+static void switch_back_to_interactive_wr(struct bfq_queue *bfqq, -+ struct bfq_data *bfqd) -+{ -+ bfqq->wr_coeff = bfqd->bfq_wr_coeff; -+ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); -+ bfqq->last_wr_start_finish = bfqq->wr_start_at_switch_to_srt; -+} -+ - static void - bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd, - struct bfq_io_cq *bic, bool bfq_already_existing) -@@ -705,15 +714,29 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd, - bfqq->wr_cur_max_time = bic->saved_wr_cur_max_time; - BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish)); - -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "[%s] bic %p wr_coeff %d start_finish %lu max_time %lu", -+ __func__, -+ bic, bfqq->wr_coeff, bfqq->last_wr_start_finish, -+ bfqq->wr_cur_max_time); -+ - if (bfqq->wr_coeff > 1 && (bfq_bfqq_in_large_burst(bfqq) || - time_is_before_jiffies(bfqq->last_wr_start_finish + - bfqq->wr_cur_max_time))) { -- bfq_log_bfqq(bfqq->bfqd, bfqq, -+ if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time && -+ !bfq_bfqq_in_large_burst(bfqq) && -+ time_is_after_eq_jiffies(bfqq->wr_start_at_switch_to_srt + -+ bfq_wr_duration(bfqd))) { -+ switch_back_to_interactive_wr(bfqq, bfqd); -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "resume state: switching back to interactive"); -+ } else { -+ bfqq->wr_coeff = 1; -+ bfq_log_bfqq(bfqq->bfqd, bfqq, - "resume state: switching off wr (%lu + %lu < %lu)", - bfqq->last_wr_start_finish, bfqq->wr_cur_max_time, - jiffies); -- -- bfqq->wr_coeff = 1; -+ } - } - - /* make sure weight will be updated, however we got here */ -@@ -3703,11 +3726,7 @@ static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq) - bfq_wr_duration(bfqd))) - bfq_bfqq_end_wr(bfqq); - else { -- /* switch back to interactive wr */ -- bfqq->wr_coeff = bfqd->bfq_wr_coeff; -- bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); -- bfqq->last_wr_start_finish = -- bfqq->wr_start_at_switch_to_srt; -+ switch_back_to_interactive_wr(bfqq, bfqd); - BUG_ON(time_is_after_jiffies( - bfqq->last_wr_start_finish)); - bfqq->entity.prio_changed = 1; - -From 9eaec0c3a2d675763b09da81c9117a9c43bce942 Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Fri, 15 Sep 2017 04:58:33 -0400 -Subject: [PATCH 45/51] bfq-sq, bfq-mq: let early-merged queues be - weight-raised on split too - -A just-created bfq_queue, say Q, may happen to be merged with another -bfq_queue on the very first invocation of the function -__bfq_insert_request. In such a case, even if Q would clearly deserve -interactive weight raising (as it has just been created), the function -bfq_add_request does not make it to be invoked for Q, and thus to -activate weight raising for Q. As a consequence, when the state of Q -is saved for a possible future restore, after a split of Q from the -other bfq_queue(s), such a state happens to be (unjustly) -non-weight-raised. Then the bfq_queue will not enjoy any weight -raising on the split, even if should still be in an interactive -weight-raising period when the split occurs. - -This commit solves this problem as follows, for a just-created -bfq_queue that is being early-merged: it stores directly, in the saved -state of the bfq_queue, the weight-raising state that would have been -assigned to the bfq_queue if not early-merged. - -Signed-off-by: Paolo Valente -Tested-by: Angelo Ruocco -Tested-by: Mirko Montanari ---- - block/bfq-mq-iosched.c | 28 +++++++++++++++++++++++----- - block/bfq-sq-iosched.c | 28 +++++++++++++++++++++++----- - 2 files changed, 46 insertions(+), 10 deletions(-) - -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index 6130a95c6497..af84e506e897 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -2226,10 +2226,27 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq) - bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq); - bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq); - bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node); -- bic->saved_wr_coeff = bfqq->wr_coeff; -- bic->saved_wr_start_at_switch_to_srt = bfqq->wr_start_at_switch_to_srt; -- bic->saved_last_wr_start_finish = bfqq->last_wr_start_finish; -- bic->saved_wr_cur_max_time = bfqq->wr_cur_max_time; -+ if (unlikely(bfq_bfqq_just_created(bfqq) && -+ !bfq_bfqq_in_large_burst(bfqq))) { -+ /* -+ * bfqq being merged ritgh after being created: bfqq -+ * would have deserved interactive weight raising, but -+ * did not make it to be set in a weight-raised state, -+ * because of this early merge. Store directly the -+ * weight-raising state that would have been assigned -+ * to bfqq, so that to avoid that bfqq unjustly fails -+ * to enjoy weight raising if split soon. -+ */ -+ bic->saved_wr_coeff = bfqq->bfqd->bfq_wr_coeff; -+ bic->saved_wr_cur_max_time = bfq_wr_duration(bfqq->bfqd); -+ bic->saved_last_wr_start_finish = jiffies; -+ } else { -+ bic->saved_wr_coeff = bfqq->wr_coeff; -+ bic->saved_wr_start_at_switch_to_srt = -+ bfqq->wr_start_at_switch_to_srt; -+ bic->saved_last_wr_start_finish = bfqq->last_wr_start_finish; -+ bic->saved_wr_cur_max_time = bfqq->wr_cur_max_time; -+ } - BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish)); - bfq_log_bfqq(bfqq->bfqd, bfqq, - "[%s] bic %p wr_coeff %d start_finish %lu max_time %lu", -@@ -4560,7 +4577,6 @@ static void __bfq_insert_request(struct bfq_data *bfqd, struct request *rq) - bfqq->allocated); - - new_bfqq->ref++; -- bfq_clear_bfqq_just_created(bfqq); - /* - * If the bic associated with the process - * issuing this request still points to bfqq -@@ -4572,6 +4588,8 @@ static void __bfq_insert_request(struct bfq_data *bfqd, struct request *rq) - if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq) - bfq_merge_bfqqs(bfqd, RQ_BIC(rq), - bfqq, new_bfqq); -+ -+ bfq_clear_bfqq_just_created(bfqq); - /* - * rq is about to be enqueued into new_bfqq, - * release rq reference on bfqq -diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c -index e07d5d1c0d40..0c48f527fe3f 100644 ---- a/block/bfq-sq-iosched.c -+++ b/block/bfq-sq-iosched.c -@@ -2105,10 +2105,27 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq) - bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq); - bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq); - bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node); -- bic->saved_wr_coeff = bfqq->wr_coeff; -- bic->saved_wr_start_at_switch_to_srt = bfqq->wr_start_at_switch_to_srt; -- bic->saved_last_wr_start_finish = bfqq->last_wr_start_finish; -- bic->saved_wr_cur_max_time = bfqq->wr_cur_max_time; -+ if (unlikely(bfq_bfqq_just_created(bfqq) && -+ !bfq_bfqq_in_large_burst(bfqq))) { -+ /* -+ * bfqq being merged ritgh after being created: bfqq -+ * would have deserved interactive weight raising, but -+ * did not make it to be set in a weight-raised state, -+ * because of this early merge. Store directly the -+ * weight-raising state that would have been assigned -+ * to bfqq, so that to avoid that bfqq unjustly fails -+ * to enjoy weight raising if split soon. -+ */ -+ bic->saved_wr_coeff = bfqq->bfqd->bfq_wr_coeff; -+ bic->saved_wr_cur_max_time = bfq_wr_duration(bfqq->bfqd); -+ bic->saved_last_wr_start_finish = jiffies; -+ } else { -+ bic->saved_wr_coeff = bfqq->wr_coeff; -+ bic->saved_wr_start_at_switch_to_srt = -+ bfqq->wr_start_at_switch_to_srt; -+ bic->saved_last_wr_start_finish = bfqq->last_wr_start_finish; -+ bic->saved_wr_cur_max_time = bfqq->wr_cur_max_time; -+ } - BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish)); - } - -@@ -4383,10 +4400,11 @@ static void bfq_insert_request(struct request_queue *q, struct request *rq) - new_bfqq->allocated[rq_data_dir(rq)]++; - bfqq->allocated[rq_data_dir(rq)]--; - new_bfqq->ref++; -- bfq_clear_bfqq_just_created(bfqq); - if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq) - bfq_merge_bfqqs(bfqd, RQ_BIC(rq), - bfqq, new_bfqq); -+ -+ bfq_clear_bfqq_just_created(bfqq); - /* - * rq is about to be enqueued into new_bfqq, - * release rq reference on bfqq - -From cb05150675095cb97ab22e4955eb82e4fe2e9dbe Mon Sep 17 00:00:00 2001 -From: omcira -Date: Mon, 18 Sep 2017 10:49:48 +0200 -Subject: [PATCH 46/51] bfq-sq, bfq-mq: decrease burst size when queues in - burst exit - -If many queues belonging to the same group happen to be created -shortly after each other, then the concurrent processes associated -with these queues have typically a common goal, and they get it done -as soon as possible if not hampered by device idling. Examples are -processes spawned by git grep, or by systemd during boot. As for -device idling, this mechanism is currently necessary for weight -raising to succeed in its goal: privileging I/O. In view of these -facts, BFQ does not provide the above queues with either weight -raising or device idling. - -On the other hand, a burst of queue creations may be caused also by -the start-up of a complex application. In this case, these queues need -usually to be served one after the other, and as quickly as possible, -to maximise responsiveness. Therefore, in this case the best strategy -is to weight-raise all the queues created during the burst, i.e., the -exact opposite of the strategy for the above case. - -To distinguish between the two cases, BFQ uses an empirical burst-size -threshold, found through extensive tests and monitoring of daily -usage. Only large bursts, i.e., burst with a size above this -threshold, are considered as generated by a high number of parallel -processes. In this respect, upstart-based boot proved to be rather -hard to detect as generating a large burst of queue creations, because -with upstart most of the queues created in a burst exit *before* the -next queues in the same burst are created. To address this issue, I -changed the burst-detection mechanism so as to not decrease the size -of the current burst even if one of the queues in the burst is -eliminated. - -Unfortunately, this missing decrease causes false positives on very -fast systems: on the start-up of a complex application, such as -libreoffice writer, so many queues are created, served and exited -shortly after each other, that a large burst of queue creations is -wrongly detected as occurring. These false positives just disappear if -the size of a burst is decreased when one of the queues in the burst -exits. This commit restores the missing burst-size decrease, relying -of the fact that upstart is apparently unlikely to be used on systems -running this and future versions of the kernel. - -Signed-off-by: Paolo Valente -Signed-off-by: Mauro Andreolini -Signed-off-by: Angelo Ruocco -Tested-by: Mirko Montanari ---- - block/bfq-mq-iosched.c | 12 +++--------- - block/bfq-sq-iosched.c | 12 +++--------- - 2 files changed, 6 insertions(+), 18 deletions(-) - -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index af84e506e897..6e413d7236ce 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -4111,16 +4111,10 @@ static void bfq_put_queue(struct bfq_queue *bfqq) - BUG_ON(bfqq->entity.tree); - BUG_ON(bfq_bfqq_busy(bfqq)); - -- if (bfq_bfqq_sync(bfqq)) -- /* -- * The fact that this queue is being destroyed does not -- * invalidate the fact that this queue may have been -- * activated during the current burst. As a consequence, -- * although the queue does not exist anymore, and hence -- * needs to be removed from the burst list if there, -- * the burst size has not to be decremented. -- */ -+ if (bfq_bfqq_sync(bfqq) && !hlist_unhashed(&bfqq->burst_list_node)) { - hlist_del_init(&bfqq->burst_list_node); -+ bfqq->bfqd->burst_size--; -+ } - - if (bfqq->bfqd) - bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p freed", bfqq); -diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c -index 0c48f527fe3f..93034dd7b801 100644 ---- a/block/bfq-sq-iosched.c -+++ b/block/bfq-sq-iosched.c -@@ -3945,16 +3945,10 @@ static void bfq_put_queue(struct bfq_queue *bfqq) - BUG_ON(bfqq->entity.tree); - BUG_ON(bfq_bfqq_busy(bfqq)); - -- if (bfq_bfqq_sync(bfqq)) -- /* -- * The fact that this queue is being destroyed does not -- * invalidate the fact that this queue may have been -- * activated during the current burst. As a consequence, -- * although the queue does not exist anymore, and hence -- * needs to be removed from the burst list if there, -- * the burst size has not to be decremented. -- */ -+ if (bfq_bfqq_sync(bfqq) && !hlist_unhashed(&bfqq->burst_list_node)) { - hlist_del_init(&bfqq->burst_list_node); -+ bfqq->bfqd->burst_size--; -+ } - - bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p freed", bfqq); - - -From 60de7307d5e3ed7f272f12c900f631bdfe114db2 Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Fri, 6 Oct 2017 19:35:38 +0200 -Subject: [PATCH 47/51] bfq-sq, bfq-mq: fix unbalanced decrements of burst size -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -The commit "bfq-sq, bfq-mq: decrease burst size when queues in burst -exit" introduced the decrement of burst_size on the removal of a -bfq_queue from the burst list. Unfortunately, this decrement can -happen to be performed even when burst size is already equal to 0, -because of unbalanced decrements. A description follows of the cause -of these unbalanced decrements, namely a wrong assumption, and of the -way how this wrong assumption leads to unbalanced decrements. - -The wrong assumption is that a bfq_queue can exit only if the process -associated with the bfq_queue has exited. This is false, because a -bfq_queue, say Q, may exit also as a consequence of a merge with -another bfq_queue. In this case, Q exits because the I/O of its -associated process has been redirected to another bfq_queue. - -The decrement unbalance occurs because Q may then be re-created after -a split, and added back to the current burst list, *without* -incrementing burst_size. burst_size is not incremented because Q is -not a new bfq_queue added to the burst list, but a bfq_queue only -temporarily removed from the list, and, before the commit "bfq-sq, -bfq-mq: decrease burst size when queues in burst exit", burst_size was -not decremented when Q was removed. - -This commit addresses this issue by just checking whether the exiting -bfq_queue is a merged bfq_queue, and, in that case, not decrementing -burst_size. Unfortunately, this still leaves room for unbalanced -decrements, in the following rarer case: on a split, the bfq_queue -happens to be inserted into a different burst list than that it was -removed from when merged. If this happens, the number of elements in -the new burst list becomes higher than burst_size (by one). When the -bfq_queue then exits, it is of course not in a merged state any -longer, thus burst_size is decremented, which results in an unbalanced -decrement. To handle this sporadic, unlucky case in a simple way, -this commit also checks that burst_size is larger than 0 before -decrementing it. - -Finally, this commit removes an useless, extra check: the check that -the bfq_queue is sync, performed before checking whether the bfq_queue -is in the burst list. This extra check is redundant, because only sync -bfq_queues can be inserted into the burst list. - -Reported-by: Philip Müller -Signed-off-by: Paolo Valente -Signed-off-by: Angelo Ruocco -Tested-by: Philip Müller -Tested-by: Oleksandr Natalenko -Tested-by: Lee Tibbert ---- - block/bfq-mq-iosched.c | 59 ++++++++++++++++++++++++++++++++++++++++++++++++-- - block/bfq-sq-iosched.c | 59 ++++++++++++++++++++++++++++++++++++++++++++++++-- - 2 files changed, 114 insertions(+), 4 deletions(-) - -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index 6e413d7236ce..816bac6cdd3d 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -4111,9 +4111,36 @@ static void bfq_put_queue(struct bfq_queue *bfqq) - BUG_ON(bfqq->entity.tree); - BUG_ON(bfq_bfqq_busy(bfqq)); - -- if (bfq_bfqq_sync(bfqq) && !hlist_unhashed(&bfqq->burst_list_node)) { -+ if (!hlist_unhashed(&bfqq->burst_list_node)) { - hlist_del_init(&bfqq->burst_list_node); -- bfqq->bfqd->burst_size--; -+ /* -+ * Decrement also burst size after the removal, if the -+ * process associated with bfqq is exiting, and thus -+ * does not contribute to the burst any longer. This -+ * decrement helps filter out false positives of large -+ * bursts, when some short-lived process (often due to -+ * the execution of commands by some service) happens -+ * to start and exit while a complex application is -+ * starting, and thus spawning several processes that -+ * do I/O (and that *must not* be treated as a large -+ * burst, see comments on bfq_handle_burst). -+ * -+ * In particular, the decrement is performed only if: -+ * 1) bfqq is not a merged queue, because, if it is, -+ * then this free of bfqq is not triggered by the exit -+ * of the process bfqq is associated with, but exactly -+ * by the fact that bfqq has just been merged. -+ * 2) burst_size is greater than 0, to handle -+ * unbalanced decrements. Unbalanced decrements may -+ * happen in te following case: bfqq is inserted into -+ * the current burst list--without incrementing -+ * bust_size--because of a split, but the current -+ * burst list is not the burst list bfqq belonged to -+ * (see comments on the case of a split in -+ * bfq_set_request). -+ */ -+ if (bfqq->bic && bfqq->bfqd->burst_size > 0) -+ bfqq->bfqd->burst_size--; - } - - if (bfqq->bfqd) -@@ -4940,6 +4967,34 @@ static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd, - "large burst"); - bfq_clear_bfqq_in_large_burst(bfqq); - if (bic->was_in_burst_list) -+ /* -+ * If bfqq was in the current -+ * burst list before being -+ * merged, then we have to add -+ * it back. And we do not need -+ * to increase burst_size, as -+ * we did not decrement -+ * burst_size when we removed -+ * bfqq from the burst list as -+ * a consequence of a merge -+ * (see comments in -+ * bfq_put_queue). In this -+ * respect, it would be rather -+ * costly to know whether the -+ * current burst list is still -+ * the same burst list from -+ * which bfqq was removed on -+ * the merge. To avoid this -+ * cost, if bfqq was in a -+ * burst list, then we add -+ * bfqq to the current burst -+ * list without any further -+ * check. This can cause -+ * inappropriate insertions, -+ * but rarely enough to not -+ * harm the detection of large -+ * bursts significantly. -+ */ - hlist_add_head(&bfqq->burst_list_node, - &bfqd->burst_list); - } -diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c -index 93034dd7b801..4bbd7f4c0154 100644 ---- a/block/bfq-sq-iosched.c -+++ b/block/bfq-sq-iosched.c -@@ -3945,9 +3945,36 @@ static void bfq_put_queue(struct bfq_queue *bfqq) - BUG_ON(bfqq->entity.tree); - BUG_ON(bfq_bfqq_busy(bfqq)); - -- if (bfq_bfqq_sync(bfqq) && !hlist_unhashed(&bfqq->burst_list_node)) { -+ if (!hlist_unhashed(&bfqq->burst_list_node)) { - hlist_del_init(&bfqq->burst_list_node); -- bfqq->bfqd->burst_size--; -+ /* -+ * Decrement also burst size after the removal, if the -+ * process associated with bfqq is exiting, and thus -+ * does not contribute to the burst any longer. This -+ * decrement helps filter out false positives of large -+ * bursts, when some short-lived process (often due to -+ * the execution of commands by some service) happens -+ * to start and exit while a complex application is -+ * starting, and thus spawning several processes that -+ * do I/O (and that *must not* be treated as a large -+ * burst, see comments on bfq_handle_burst). -+ * -+ * In particular, the decrement is performed only if: -+ * 1) bfqq is not a merged queue, because, if it is, -+ * then this free of bfqq is not triggered by the exit -+ * of the process bfqq is associated with, but exactly -+ * by the fact that bfqq has just been merged. -+ * 2) burst_size is greater than 0, to handle -+ * unbalanced decrements. Unbalanced decrements may -+ * happen in te following case: bfqq is inserted into -+ * the current burst list--without incrementing -+ * bust_size--because of a split, but the current -+ * burst list is not the burst list bfqq belonged to -+ * (see comments on the case of a split in -+ * bfq_set_request). -+ */ -+ if (bfqq->bic && bfqq->bfqd->burst_size > 0) -+ bfqq->bfqd->burst_size--; - } - - bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p freed", bfqq); -@@ -4691,6 +4718,34 @@ static int bfq_set_request(struct request_queue *q, struct request *rq, - "large burst"); - bfq_clear_bfqq_in_large_burst(bfqq); - if (bic->was_in_burst_list) -+ /* -+ * If bfqq was in the current -+ * burst list before being -+ * merged, then we have to add -+ * it back. And we do not need -+ * to increase burst_size, as -+ * we did not decrement -+ * burst_size when we removed -+ * bfqq from the burst list as -+ * a consequence of a merge -+ * (see comments in -+ * bfq_put_queue). In this -+ * respect, it would be rather -+ * costly to know whether the -+ * current burst list is still -+ * the same burst list from -+ * which bfqq was removed on -+ * the merge. To avoid this -+ * cost, if bfqq was in a -+ * burst list, then we add -+ * bfqq to the current burst -+ * list without any further -+ * check. This can cause -+ * inappropriate insertions, -+ * but rarely enough to not -+ * harm the detection of large -+ * bursts significantly. -+ */ - hlist_add_head(&bfqq->burst_list_node, - &bfqd->burst_list); - } - -From 09adbd0f46f4ba395964b35bf611b7cc3dd84b4d Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Mon, 30 Oct 2017 16:50:50 +0100 -Subject: [PATCH 48/51] doc, block, bfq-mq: update max IOPS sustainable with - BFQ - -We have investigated more deeply the performance of BFQ, in terms of -number of IOPS that can be processed by the CPU when BFQ is used as -I/O scheduler. In more detail, using the script [1], we have measured -the number of IOPS reached on top of a null block device configured -with zero latency, as a function of the workload (sequential read, -sequential write, random read, random write) and of the system (we -considered desktops, laptops and embedded systems). - -Basing on the resulting figures, with this commit we update the -current, conservative IOPS range reported in BFQ documentation. In -particular, the documentation now reports, for each of three different -systems, the lowest number of IOPS obtained for that system with the -above test (namely, the value obtained with the workload leading to -the lowest IOPS). - -[1] https://github.com/Algodev-github/IOSpeed - -Signed-off-by: Paolo Valente -Signed-off-by: Luca Miccio ---- - Documentation/block/bfq-iosched.txt | 19 +++++++++++++------ - 1 file changed, 13 insertions(+), 6 deletions(-) - -diff --git a/Documentation/block/bfq-iosched.txt b/Documentation/block/bfq-iosched.txt -index dcfe15523da3..595ff7a5ff34 100644 ---- a/Documentation/block/bfq-iosched.txt -+++ b/Documentation/block/bfq-iosched.txt -@@ -29,12 +29,19 @@ for that device, by setting low_latency to 0. See Section 3 for - details on how to configure BFQ for the desired tradeoff between - latency and throughput, or on how to maximize throughput. - --On average CPUs, the current version of BFQ can handle devices --performing at most ~30K IOPS; at most ~50 KIOPS on faster CPUs. As a --reference, 30-50 KIOPS correspond to very high bandwidths with --sequential I/O (e.g., 8-12 GB/s if I/O requests are 256 KB large), and --to 120-200 MB/s with 4KB random I/O. BFQ is currently being tested on --multi-queue devices too. -+BFQ has a non-null overhead, which limits the maximum IOPS that the -+CPU can process for a device scheduled with BFQ. To give an idea of -+the limits on slow or average CPUs, here are BFQ limits for three -+different CPUs, on, respectively, an average laptop, an old desktop, -+and a cheap embedded system, in case full hierarchical support is -+enabled (i.e., CONFIG_BFQ_SQ_GROUP_IOSCHED is set for bfq-sq, or -+CONFIG_MQ_BFQ_GROUP_IOSCHED is set for bfq-mq, or, finally, -+CONFIG_BFQ_GROUP_IOSCHED is set for bfq): -+- Intel i7-4850HQ: 250 KIOPS -+- AMD A8-3850: 170 KIOPS -+- ARM CortexTM-A53 Octa-core: 45 KIOPS -+ -+BFQ works for multi-queue devices too (bfq and bfq-mq instances). - - The table of contents follows. Impatients can just jump to Section 3. - - -From be94f97b577dc587593185224a7718aa59ac43f7 Mon Sep 17 00:00:00 2001 -From: Luca Miccio -Date: Tue, 31 Oct 2017 09:50:11 +0100 -Subject: [PATCH 49/51] block, bfq-mq: add missing invocations of - bfqg_stats_update_io_add/remove - -bfqg_stats_update_io_add and bfqg_stats_update_io_remove are to be -invoked, respectively, when an I/O request enters and when an I/O -request exits the scheduler. Unfortunately, bfq-mq does not fully comply -with this scheme, because it does not invoke these functions for -requests that are inserted into or extracted from its priority -dispatch list. This commit fixes this mistake. - -Signed-off-by: Paolo Valente -Signed-off-by: Luca Miccio ---- - block/bfq-mq-iosched.c | 24 +++++++++++++++++++----- - 1 file changed, 19 insertions(+), 5 deletions(-) - -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index 816bac6cdd3d..fbf28804c220 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -1394,7 +1394,6 @@ static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd, - BUG_ON(bfqq->entity.budget < bfqq->entity.service); - - BUG_ON(bfqq == bfqd->in_service_queue); -- bfqg_stats_update_io_add(bfqq_group(RQ_BFQQ(rq)), bfqq, rq->cmd_flags); - - /* - * bfqq deserves to be weight-raised if: -@@ -1734,7 +1733,6 @@ static void bfq_remove_request(struct request_queue *q, - BUG_ON(bfqq->meta_pending == 0); - bfqq->meta_pending--; - } -- bfqg_stats_update_io_remove(bfqq_group(bfqq), rq->cmd_flags); - } - - static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio) -@@ -1879,6 +1877,7 @@ static void bfq_requests_merged(struct request_queue *q, struct request *rq, - bfqq->next_rq = rq; - - bfq_remove_request(q, next); -+ bfqg_stats_update_io_remove(bfqq_group(bfqq), next->cmd_flags); - - spin_unlock_irq(&bfqq->bfqd->lock); - end: -@@ -4077,6 +4076,10 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) - spin_lock_irq(&bfqd->lock); - - rq = __bfq_dispatch_request(hctx); -+ if (rq && RQ_BFQQ(rq)) -+ bfqg_stats_update_io_remove(bfqq_group(RQ_BFQQ(rq)), -+ rq->cmd_flags); -+ - spin_unlock_irq(&bfqd->lock); - - return rq; -@@ -4634,6 +4637,7 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, - { - struct request_queue *q = hctx->queue; - struct bfq_data *bfqd = q->elevator->elevator_data; -+ struct bfq_queue *bfqq = RQ_BFQQ(rq); - - spin_lock_irq(&bfqd->lock); - if (blk_mq_sched_try_insert_merge(q, rq)) { -@@ -4647,8 +4651,6 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, - - spin_lock_irq(&bfqd->lock); - if (at_head || blk_rq_is_passthrough(rq)) { -- struct bfq_queue *bfqq = RQ_BFQQ(rq); -- - if (at_head) - list_add(&rq->queuelist, &bfqd->dispatch); - else -@@ -4668,6 +4670,12 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, - rq->rq_flags &= ~RQF_GOT; - - __bfq_insert_request(bfqd, rq); -+ /* -+ * Update bfqq, because, if a queue merge has occurred -+ * in __bfq_insert_request, then rq has been -+ * redirected into a new queue. -+ */ -+ bfqq = RQ_BFQQ(rq); - - if (rq_mergeable(rq)) { - elv_rqhash_add(q, rq); -@@ -4676,6 +4684,9 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, - } - } - -+ if (bfqq) -+ bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, rq->cmd_flags); -+ - spin_unlock_irq(&bfqd->lock); - } - -@@ -4893,8 +4904,11 @@ static void bfq_finish_request(struct request *rq) - BUG_ON(in_interrupt()); - - assert_spin_locked(&bfqd->lock); -- if (!RB_EMPTY_NODE(&rq->rb_node)) -+ if (!RB_EMPTY_NODE(&rq->rb_node)) { - bfq_remove_request(rq->q, rq); -+ bfqg_stats_update_io_remove(bfqq_group(bfqq), -+ rq->cmd_flags); -+ } - bfq_put_rq_priv_body(bfqq); - } - - -From 8659a1549d2bf241129a0f7c90429bddd9c2bc53 Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Wed, 8 Nov 2017 19:07:40 +0100 -Subject: [PATCH 50/51] block, bfq-mq: update blkio stats outside the scheduler - lock - -bfq-mq invokes various blkg_*stats_* functions to update the statistics -contained in the special files blkio.bfq-mq.* in the blkio controller -groups, i.e., the I/O accounting related to the proportional-share -policy provided by bfq-mq. The execution of these functions takes a -considerable percentage, about 40%, of the total per-request execution -time of bfq-mq (i.e., of the sum of the execution time of all the bfq-mq -functions that have to be executed to process an I/O request from its -creation to its destruction). This reduces the request-processing -rate sustainable by bfq-mq noticeably, even on a multicore CPU. In fact, -the bfq-mq functions that invoke blkg_*stats_* functions cannot be -executed in parallel with the rest of the code of bfq-mq, because -both are executed under the same same per-device scheduler lock. - -To reduce this slowdown, this commit moves, wherever possible, the -invocation of these functions (more precisely, of the bfq-mq functions -that invoke blkg_*stats_* functions) outside the critical sections -protected by the scheduler lock. - -With this change, and with all blkio.bfq-mq.* statistics enabled, the -throughput grows, e.g., from 250 to 310 KIOPS (+25%) on an Intel -i7-4850HQ, in case of 8 threads doing random I/O in parallel on -null_blk, with the latter configured with 0 latency. We obtained the -same or higher throughput boosts, up to +30%, with other processors -(some figures are reported in the documentation). For our tests, we -used the script [1], with which our results can be easily reproduced. - -NOTE. This commit still protects the invocation of blkg_*stats_* -functions with the request_queue lock, because the group these -functions are invoked on may otherwise disappear before or while these -functions are executed. Fortunately, tests without even this lock -show, by difference, that the serialization caused by this lock has a -little impact (at most ~5% of throughput reduction). - -[1] https://github.com/Algodev-github/IOSpeed - -Signed-off-by: Paolo Valente -Signed-off-by: Luca Miccio ---- - Documentation/block/bfq-iosched.txt | 18 ++++-- - block/bfq-mq-iosched.c | 112 +++++++++++++++++++++++++++++++----- - block/bfq-sched.c | 2 + - 3 files changed, 112 insertions(+), 20 deletions(-) - -diff --git a/Documentation/block/bfq-iosched.txt b/Documentation/block/bfq-iosched.txt -index 595ff7a5ff34..c816c595082d 100644 ---- a/Documentation/block/bfq-iosched.txt -+++ b/Documentation/block/bfq-iosched.txt -@@ -31,16 +31,22 @@ latency and throughput, or on how to maximize throughput. - - BFQ has a non-null overhead, which limits the maximum IOPS that the - CPU can process for a device scheduled with BFQ. To give an idea of --the limits on slow or average CPUs, here are BFQ limits for three --different CPUs, on, respectively, an average laptop, an old desktop, --and a cheap embedded system, in case full hierarchical support is --enabled (i.e., CONFIG_BFQ_SQ_GROUP_IOSCHED is set for bfq-sq, or --CONFIG_MQ_BFQ_GROUP_IOSCHED is set for bfq-mq, or, finally, --CONFIG_BFQ_GROUP_IOSCHED is set for bfq): -+the limits on slow or average CPUs, here are, first, the limits of -+bfq-sq for three different CPUs, on, respectively, an average laptop, -+an old desktop, and a cheap embedded system, in case full hierarchical -+support is enabled (i.e., CONFIG_BFQ_SQ_GROUP_IOSCHED is set): - - Intel i7-4850HQ: 250 KIOPS - - AMD A8-3850: 170 KIOPS - - ARM CortexTM-A53 Octa-core: 45 KIOPS - -+bfq-mq and bfq instances reach, instead, a higher sustainable -+throughput. Their limits, on the same systems as above, are, with full -+hierarchical support enabled (i.e., CONFIG_MQ_BFQ_GROUP_IOSCHED set -+for bfq-mq, or CONFIG_BFQ_GROUP_IOSCHED set for bfq): -+- Intel i7-4850HQ: 310 KIOPS -+- AMD A8-3850: 200 KIOPS -+- ARM CortexTM-A53 Octa-core: 56 KIOPS -+ - BFQ works for multi-queue devices too (bfq and bfq-mq instances). - - The table of contents follows. Impatients can just jump to Section 3. -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index fbf28804c220..ab3b83d612c2 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -1822,7 +1822,7 @@ static void bfq_request_merged(struct request_queue *q, struct request *req, - bfqq->next_rq = next_rq; - - bfq_log_bfqq(bfqd, bfqq, -- "requests_merged: req %p prev %p next_rq %p bfqq %p", -+ "request_merged: req %p prev %p next_rq %p bfqq %p", - req, prev, next_rq, bfqq); - - /* -@@ -2415,7 +2415,6 @@ static void __bfq_set_in_service_queue(struct bfq_data *bfqd, - struct bfq_queue *bfqq) - { - if (bfqq) { -- bfqg_stats_update_avg_queue_size(bfqq_group(bfqq)); - bfq_clear_bfqq_fifo_expire(bfqq); - - bfqd->budgets_assigned = (bfqd->budgets_assigned*7 + 256) / 8; -@@ -3784,7 +3783,6 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd) - */ - bfq_clear_bfqq_wait_request(bfqq); - hrtimer_try_to_cancel(&bfqd->idle_slice_timer); -- bfqg_stats_update_idle_time(bfqq_group(bfqq)); - } - goto keep_queue; - } -@@ -4072,16 +4070,67 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) - { - struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; - struct request *rq; -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ struct bfq_queue *in_serv_queue, *bfqq; -+ bool waiting_rq, idle_timer_disabled; -+#endif - - spin_lock_irq(&bfqd->lock); - -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ in_serv_queue = bfqd->in_service_queue; -+ waiting_rq = in_serv_queue && bfq_bfqq_wait_request(in_serv_queue); -+ - rq = __bfq_dispatch_request(hctx); -- if (rq && RQ_BFQQ(rq)) -- bfqg_stats_update_io_remove(bfqq_group(RQ_BFQQ(rq)), -- rq->cmd_flags); - -+ idle_timer_disabled = -+ waiting_rq && !bfq_bfqq_wait_request(in_serv_queue); -+ -+#else -+ rq = __bfq_dispatch_request(hctx); -+#endif - spin_unlock_irq(&bfqd->lock); - -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ bfqq = rq ? RQ_BFQQ(rq) : NULL; -+ if (!idle_timer_disabled && !bfqq) -+ return rq; -+ -+ /* -+ * rq and bfqq are guaranteed to exist until this function -+ * ends, for the following reasons. First, rq can be -+ * dispatched to the device, and then can be completed and -+ * freed, only after this function ends. Second, rq cannot be -+ * merged (and thus freed because of a merge) any longer, -+ * because it has already started. Thus rq cannot be freed -+ * before this function ends, and, since rq has a reference to -+ * bfqq, the same guarantee holds for bfqq too. -+ * -+ * In addition, the following queue lock guarantees that -+ * bfqq_group(bfqq) exists as well. -+ */ -+ spin_lock_irq(hctx->queue->queue_lock); -+ if (idle_timer_disabled) -+ /* -+ * Since the idle timer has been disabled, -+ * in_serv_queue contained some request when -+ * __bfq_dispatch_request was invoked above, which -+ * implies that rq was picked exactly from -+ * in_serv_queue. Thus in_serv_queue == bfqq, and is -+ * therefore guaranteed to exist because of the above -+ * arguments. -+ */ -+ bfqg_stats_update_idle_time(bfqq_group(in_serv_queue)); -+ if (bfqq) { -+ struct bfq_group *bfqg = bfqq_group(bfqq); -+ -+ bfqg_stats_update_avg_queue_size(bfqg); -+ bfqg_stats_set_start_empty_time(bfqg); -+ bfqg_stats_update_io_remove(bfqg, rq->cmd_flags); -+ } -+ spin_unlock_irq(hctx->queue->queue_lock); -+#endif -+ - return rq; - } - -@@ -4200,7 +4249,6 @@ static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync) - unsigned long flags; - - spin_lock_irqsave(&bfqd->lock, flags); -- - bfq_exit_bfqq(bfqd, bfqq); - bic_set_bfqq(bic, NULL, is_sync); - spin_unlock_irqrestore(&bfqd->lock, flags); -@@ -4554,7 +4602,6 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, - */ - bfq_clear_bfqq_wait_request(bfqq); - hrtimer_try_to_cancel(&bfqd->idle_slice_timer); -- bfqg_stats_update_idle_time(bfqq_group(bfqq)); - - /* - * The queue is not empty, because a new request just -@@ -4569,9 +4616,11 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, - } - } - --static void __bfq_insert_request(struct bfq_data *bfqd, struct request *rq) -+/* returns true if it causes the idle timer to be disabled */ -+static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq) - { - struct bfq_queue *bfqq = RQ_BFQQ(rq), *new_bfqq; -+ bool waiting, idle_timer_disabled = false; - BUG_ON(!bfqq); - - assert_spin_locked(&bfqd->lock); -@@ -4624,12 +4673,16 @@ static void __bfq_insert_request(struct bfq_data *bfqd, struct request *rq) - } - } - -+ waiting = bfqq && bfq_bfqq_wait_request(bfqq); - bfq_add_request(rq); -+ idle_timer_disabled = waiting && !bfq_bfqq_wait_request(bfqq); - - rq->fifo_time = ktime_get_ns() + bfqd->bfq_fifo_expire[rq_is_sync(rq)]; - list_add_tail(&rq->queuelist, &bfqq->fifo); - - bfq_rq_enqueued(bfqd, bfqq, rq); -+ -+ return idle_timer_disabled; - } - - static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, -@@ -4638,6 +4691,10 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, - struct request_queue *q = hctx->queue; - struct bfq_data *bfqd = q->elevator->elevator_data; - struct bfq_queue *bfqq = RQ_BFQQ(rq); -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ bool idle_timer_disabled = false; -+ unsigned int cmd_flags; -+#endif - - spin_lock_irq(&bfqd->lock); - if (blk_mq_sched_try_insert_merge(q, rq)) { -@@ -4669,13 +4726,17 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, - BUG_ON(!(rq->rq_flags & RQF_GOT)); - rq->rq_flags &= ~RQF_GOT; - -- __bfq_insert_request(bfqd, rq); -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ idle_timer_disabled = __bfq_insert_request(bfqd, rq); - /* - * Update bfqq, because, if a queue merge has occurred - * in __bfq_insert_request, then rq has been - * redirected into a new queue. - */ - bfqq = RQ_BFQQ(rq); -+#else -+ __bfq_insert_request(bfqd, rq); -+#endif - - if (rq_mergeable(rq)) { - elv_rqhash_add(q, rq); -@@ -4683,11 +4744,34 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, - q->last_merge = rq; - } - } -- -- if (bfqq) -- bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, rq->cmd_flags); -- -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ /* -+ * Cache cmd_flags before releasing scheduler lock, because rq -+ * may disappear afterwards (for example, because of a request -+ * merge). -+ */ -+ cmd_flags = rq->cmd_flags; -+#endif - spin_unlock_irq(&bfqd->lock); -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ if (!bfqq) -+ return; -+ /* -+ * bfqq still exists, because it can disappear only after -+ * either it is merged with another queue, or the process it -+ * is associated with exits. But both actions must be taken by -+ * the same process currently executing this flow of -+ * instruction. -+ * -+ * In addition, the following queue lock guarantees that -+ * bfqq_group(bfqq) exists as well. -+ */ -+ spin_lock_irq(q->queue_lock); -+ bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, cmd_flags); -+ if (idle_timer_disabled) -+ bfqg_stats_update_idle_time(bfqq_group(bfqq)); -+ spin_unlock_irq(q->queue_lock); -+#endif - } - - static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx, -diff --git a/block/bfq-sched.c b/block/bfq-sched.c -index e4a2553a2d2c..616c0692335a 100644 ---- a/block/bfq-sched.c -+++ b/block/bfq-sched.c -@@ -949,9 +949,11 @@ static void bfq_bfqq_served(struct bfq_queue *bfqq, int served) - st->vtime += bfq_delta(served, st->wsum); - bfq_forget_idle(st); - } -+#ifndef BFQ_MQ - #ifdef BFQ_GROUP_IOSCHED_ENABLED - bfqg_stats_set_start_empty_time(bfqq_group(bfqq)); - #endif -+#endif - st = bfq_entity_service_tree(&bfqq->entity); - bfq_log_bfqq(bfqq->bfqd, bfqq, "bfqq_served %d secs, vtime %llu on %p", - served, ((st->vtime>>10)*1000)>>12, st); - -From abdfb33a3325df55ec0261fd824ca61ddac13575 Mon Sep 17 00:00:00 2001 -From: Luca Miccio -Date: Wed, 8 Nov 2017 19:07:41 +0100 -Subject: [PATCH 51/51] block, bfq-sq, bfq-mq: move debug blkio stats behind - CONFIG_DEBUG_BLK_CGROUP - -BFQ (both bfq-mq and bfq-sq) currently creates, and updates, its own -instance of the whole set of blkio statistics that cfq creates. Yet, -from the comments of Tejun Heo in [1], it turned out that most of -these statistics are meant/useful only for debugging. This commit -makes BFQ create the latter, debugging statistics only if the option -CONFIG_DEBUG_BLK_CGROUP is set. - -By doing so, this commit also enables BFQ to enjoy a high perfomance -boost. The reason is that, if CONFIG_DEBUG_BLK_CGROUP is not set, then -BFQ has to update far fewer statistics, and, in particular, not the -heaviest to update. To give an idea of the benefits, if -CONFIG_DEBUG_BLK_CGROUP is not set, then, on an Intel i7-4850HQ, and -with 8 threads doing random I/O in parallel on null_blk (configured -with 0 latency), the throughput of bfq-mq grows from 310 to 400 KIOPS -(+30%). We have measured similar or even much higher boosts with other -CPUs: e.g., +45% with an ARM CortexTM-A53 Octa-core. Our results have -been obtained and can be reproduced very easily with the script in [1]. - -[1] https://www.spinics.net/lists/linux-block/msg18943.html - -Reported-by: Tejun Heo -Signed-off-by: Luca Miccio -Signed-off-by: Paolo Valente ---- - Documentation/block/bfq-iosched.txt | 59 ++++++++++--- - block/bfq-cgroup-included.c | 163 ++++++++++++++++++++---------------- - block/bfq-mq-iosched.c | 14 ++-- - block/bfq-mq.h | 4 +- - block/bfq.h | 4 +- - 5 files changed, 147 insertions(+), 97 deletions(-) - -diff --git a/Documentation/block/bfq-iosched.txt b/Documentation/block/bfq-iosched.txt -index c816c595082d..30ef2dba85ad 100644 ---- a/Documentation/block/bfq-iosched.txt -+++ b/Documentation/block/bfq-iosched.txt -@@ -29,24 +29,41 @@ for that device, by setting low_latency to 0. See Section 3 for - details on how to configure BFQ for the desired tradeoff between - latency and throughput, or on how to maximize throughput. - --BFQ has a non-null overhead, which limits the maximum IOPS that the --CPU can process for a device scheduled with BFQ. To give an idea of --the limits on slow or average CPUs, here are, first, the limits of --bfq-sq for three different CPUs, on, respectively, an average laptop, -+BFQ has a non-null overhead, which limits the maximum IOPS that a CPU -+can process for a device scheduled with BFQ. To give an idea of the -+limits on slow or average CPUs, here are, first, the limits of bfq-mq -+and bfq for three different CPUs, on, respectively, an average laptop, - an old desktop, and a cheap embedded system, in case full hierarchical --support is enabled (i.e., CONFIG_BFQ_SQ_GROUP_IOSCHED is set): --- Intel i7-4850HQ: 250 KIOPS --- AMD A8-3850: 170 KIOPS --- ARM CortexTM-A53 Octa-core: 45 KIOPS -- --bfq-mq and bfq instances reach, instead, a higher sustainable --throughput. Their limits, on the same systems as above, are, with full --hierarchical support enabled (i.e., CONFIG_MQ_BFQ_GROUP_IOSCHED set --for bfq-mq, or CONFIG_BFQ_GROUP_IOSCHED set for bfq): -+support is enabled (i.e., CONFIG_MQ_BFQ_GROUP_IOSCHED is set for -+bfq-mq, or CONFIG_BFQ_GROUP_IOSCHED is set for bfq), but -+CONFIG_DEBUG_BLK_CGROUP is not set (Section 4-2): -+- Intel i7-4850HQ: 400 KIOPS -+- AMD A8-3850: 250 KIOPS -+- ARM CortexTM-A53 Octa-core: 80 KIOPS -+ -+As for bfq-sq, it cannot reach the above IOPS, because of the -+inherent, lower parallelism of legacy blk and of the components within -+it (including bfq-sq itself). In particular, results with -+CONFIG_DEBUG_BLK_CGROUP unset are rather fluctuating. The limits -+reported below for the case CONFIG_DEBUG_BLK_CGROUP set will however -+provide a lower bound to the limits of bfq-sq. -+ -+Turning back to bfq-mq and bfq, If CONFIG_DEBUG_BLK_CGROUP is set (and -+of course full hierarchical support is enabled), then the sustainable -+throughput with bfq-mq and bfq decreases, because all blkio.bfq* -+statistics are created and updated (Section 4-2). For bfq-mq and bfq, -+this leads to the following maximum sustainable throughputs, on the -+same systems as above: - - Intel i7-4850HQ: 310 KIOPS - - AMD A8-3850: 200 KIOPS - - ARM CortexTM-A53 Octa-core: 56 KIOPS - -+Finally, if CONFIG_DEBUG_BLK_CGROUP is set (and full hierarchical -+support is enabled), then bfq-sq exhibits the following limits: -+- Intel i7-4850HQ: 250 KIOPS -+- AMD A8-3850: 170 KIOPS -+- ARM CortexTM-A53 Octa-core: 45 KIOPS -+ - BFQ works for multi-queue devices too (bfq and bfq-mq instances). - - The table of contents follows. Impatients can just jump to Section 3. -@@ -524,6 +541,22 @@ BFQ-specific files is "blkio.bfqX." or "io.bfqX.", where X can be "" - to set the weight of a group with the mainline BFQ is blkio.bfq.weight - or io.bfq.weight. - -+As for cgroups-v1 (blkio controller), the exact set of stat files -+created, and kept up-to-date by bfq*, depends on whether -+CONFIG_DEBUG_BLK_CGROUP is set. If it is set, then bfq* creates all -+the stat files documented in -+Documentation/cgroup-v1/blkio-controller.txt. If, instead, -+CONFIG_DEBUG_BLK_CGROUP is not set, then bfq* creates only the files -+blkio.bfq*.io_service_bytes -+blkio.bfq*.io_service_bytes_recursive -+blkio.bfq*.io_serviced -+blkio.bfq*.io_serviced_recursive -+ -+The value of CONFIG_DEBUG_BLK_CGROUP greatly influences the maximum -+throughput sustainable with bfq*, because updating the blkio.bfq* -+stats is rather costly, especially for some of the stats enabled by -+CONFIG_DEBUG_BLK_CGROUP. -+ - Parameters to set - ----------------- - -diff --git a/block/bfq-cgroup-included.c b/block/bfq-cgroup-included.c -index 631e53d9150d..562b0ce581a7 100644 ---- a/block/bfq-cgroup-included.c -+++ b/block/bfq-cgroup-included.c -@@ -15,7 +15,7 @@ - * file. - */ - --#ifdef BFQ_GROUP_IOSCHED_ENABLED -+#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP) - - /* bfqg stats flags */ - enum bfqg_stats_flags { -@@ -155,6 +155,63 @@ static void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) - bfqg_stats_update_group_wait_time(stats); - } - -+static void bfqg_stats_update_io_add(struct bfq_group *bfqg, -+ struct bfq_queue *bfqq, unsigned int op) -+{ -+ blkg_rwstat_add(&bfqg->stats.queued, op, 1); -+ bfqg_stats_end_empty_time(&bfqg->stats); -+ if (!(bfqq == ((struct bfq_data *)bfqg->bfqd)->in_service_queue)) -+ bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq)); -+} -+ -+static void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) -+{ -+ blkg_rwstat_add(&bfqg->stats.queued, op, -1); -+} -+ -+static void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) -+{ -+ blkg_rwstat_add(&bfqg->stats.merged, op, 1); -+} -+ -+static void bfqg_stats_update_completion(struct bfq_group *bfqg, -+ uint64_t start_time, uint64_t io_start_time, unsigned int op) -+{ -+ struct bfqg_stats *stats = &bfqg->stats; -+ unsigned long long now = sched_clock(); -+ -+ if (time_after64(now, io_start_time)) -+ blkg_rwstat_add(&stats->service_time, op, -+ now - io_start_time); -+ if (time_after64(io_start_time, start_time)) -+ blkg_rwstat_add(&stats->wait_time, op, -+ io_start_time - start_time); -+} -+ -+#else /* BFQ_GROUP_IOSCHED_ENABLED && CONFIG_DEBUG_BLK_CGROUP */ -+ -+static inline void bfqg_stats_update_io_add(struct bfq_group *bfqg, -+ struct bfq_queue *bfqq, unsigned int op) { } -+static inline void -+bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) { } -+static inline void -+bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) { } -+static inline void bfqg_stats_update_completion(struct bfq_group *bfqg, -+ uint64_t start_time, uint64_t io_start_time, -+ unsigned int op) { } -+static inline void -+bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg, -+ struct bfq_group *curr_bfqg) { } -+static inline void bfqg_stats_end_empty_time(struct bfqg_stats *stats) { } -+static inline void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { } -+static inline void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) { } -+static inline void bfqg_stats_update_idle_time(struct bfq_group *bfqg) { } -+static inline void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { } -+static inline void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) { } -+ -+#endif /* BFQ_GROUP_IOSCHED_ENABLED && CONFIG_DEBUG_BLK_CGROUP */ -+ -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - static struct blkcg_policy blkcg_policy_bfq; - - /* -@@ -247,44 +304,10 @@ static void bfqg_and_blkg_put(struct bfq_group *bfqg) - } - #endif - --static void bfqg_stats_update_io_add(struct bfq_group *bfqg, -- struct bfq_queue *bfqq, -- unsigned int op) --{ -- blkg_rwstat_add(&bfqg->stats.queued, op, 1); -- bfqg_stats_end_empty_time(&bfqg->stats); -- if (!(bfqq == ((struct bfq_data *)bfqg->bfqd)->in_service_queue)) -- bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq)); --} -- --static void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) --{ -- blkg_rwstat_add(&bfqg->stats.queued, op, -1); --} -- --static void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) --{ -- blkg_rwstat_add(&bfqg->stats.merged, op, 1); --} -- --static void bfqg_stats_update_completion(struct bfq_group *bfqg, -- uint64_t start_time, uint64_t io_start_time, -- unsigned int op) --{ -- struct bfqg_stats *stats = &bfqg->stats; -- unsigned long long now = sched_clock(); -- -- if (time_after64(now, io_start_time)) -- blkg_rwstat_add(&stats->service_time, op, -- now - io_start_time); -- if (time_after64(io_start_time, start_time)) -- blkg_rwstat_add(&stats->wait_time, op, -- io_start_time - start_time); --} -- - /* @stats = 0 */ - static void bfqg_stats_reset(struct bfqg_stats *stats) - { -+#ifdef CONFIG_DEBUG_BLK_CGROUP - /* queued stats shouldn't be cleared */ - blkg_rwstat_reset(&stats->merged); - blkg_rwstat_reset(&stats->service_time); -@@ -296,6 +319,7 @@ static void bfqg_stats_reset(struct bfqg_stats *stats) - blkg_stat_reset(&stats->group_wait_time); - blkg_stat_reset(&stats->idle_time); - blkg_stat_reset(&stats->empty_time); -+#endif - } - - /* @to += @from */ -@@ -304,6 +328,7 @@ static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from) - if (!to || !from) - return; - -+#ifdef CONFIG_DEBUG_BLK_CGROUP - /* queued stats shouldn't be cleared */ - blkg_rwstat_add_aux(&to->merged, &from->merged); - blkg_rwstat_add_aux(&to->service_time, &from->service_time); -@@ -316,6 +341,7 @@ static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from) - blkg_stat_add_aux(&to->group_wait_time, &from->group_wait_time); - blkg_stat_add_aux(&to->idle_time, &from->idle_time); - blkg_stat_add_aux(&to->empty_time, &from->empty_time); -+#endif - } - - /* -@@ -367,6 +393,7 @@ static void bfq_init_entity(struct bfq_entity *entity, - - static void bfqg_stats_exit(struct bfqg_stats *stats) - { -+#ifdef CONFIG_DEBUG_BLK_CGROUP - blkg_rwstat_exit(&stats->merged); - blkg_rwstat_exit(&stats->service_time); - blkg_rwstat_exit(&stats->wait_time); -@@ -378,10 +405,12 @@ static void bfqg_stats_exit(struct bfqg_stats *stats) - blkg_stat_exit(&stats->group_wait_time); - blkg_stat_exit(&stats->idle_time); - blkg_stat_exit(&stats->empty_time); -+#endif - } - - static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp) - { -+#ifdef CONFIG_DEBUG_BLK_CGROUP - if (blkg_rwstat_init(&stats->merged, gfp) || - blkg_rwstat_init(&stats->service_time, gfp) || - blkg_rwstat_init(&stats->wait_time, gfp) || -@@ -396,6 +425,7 @@ static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp) - bfqg_stats_exit(stats); - return -ENOMEM; - } -+#endif - - return 0; - } -@@ -1003,6 +1033,7 @@ static ssize_t bfq_io_set_weight(struct kernfs_open_file *of, - return bfq_io_set_weight_legacy(of_css(of), NULL, weight); - } - -+#ifdef CONFIG_DEBUG_BLK_CGROUP - static int bfqg_print_stat(struct seq_file *sf, void *v) - { - blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat, -@@ -1108,6 +1139,7 @@ static int bfqg_print_avg_queue_size(struct seq_file *sf, void *v) - 0, false); - return 0; - } -+#endif /* CONFIG_DEBUG_BLK_CGROUP */ - - static struct bfq_group * - bfq_create_group_hierarchy(struct bfq_data *bfqd, int node) -@@ -1137,15 +1169,6 @@ static struct cftype bfq_blkcg_legacy_files[] = { - - /* statistics, covers only the tasks in the bfqg */ - { -- .name = BFQ_CGROUP_FNAME(time), -- .private = offsetof(struct bfq_group, stats.time), -- .seq_show = bfqg_print_stat, -- }, -- { -- .name = BFQ_CGROUP_FNAME(sectors), -- .seq_show = bfqg_print_stat_sectors, -- }, -- { - .name = BFQ_CGROUP_FNAME(io_service_bytes), - .private = (unsigned long)&blkcg_policy_bfq, - .seq_show = blkg_print_stat_bytes, -@@ -1155,6 +1178,16 @@ static struct cftype bfq_blkcg_legacy_files[] = { - .private = (unsigned long)&blkcg_policy_bfq, - .seq_show = blkg_print_stat_ios, - }, -+#ifdef CONFIG_DEBUG_BLK_CGROUP -+ { -+ .name = BFQ_CGROUP_FNAME(time), -+ .private = offsetof(struct bfq_group, stats.time), -+ .seq_show = bfqg_print_stat, -+ }, -+ { -+ .name = BFQ_CGROUP_FNAME(sectors), -+ .seq_show = bfqg_print_stat_sectors, -+ }, - { - .name = BFQ_CGROUP_FNAME(io_service_time), - .private = offsetof(struct bfq_group, stats.service_time), -@@ -1175,18 +1208,10 @@ static struct cftype bfq_blkcg_legacy_files[] = { - .private = offsetof(struct bfq_group, stats.queued), - .seq_show = bfqg_print_rwstat, - }, -+#endif /* CONFIG_DEBUG_BLK_CGROUP */ - - /* the same statictics which cover the bfqg and its descendants */ - { -- .name = BFQ_CGROUP_FNAME(time_recursive), -- .private = offsetof(struct bfq_group, stats.time), -- .seq_show = bfqg_print_stat_recursive, -- }, -- { -- .name = BFQ_CGROUP_FNAME(sectors_recursive), -- .seq_show = bfqg_print_stat_sectors_recursive, -- }, -- { - .name = BFQ_CGROUP_FNAME(io_service_bytes_recursive), - .private = (unsigned long)&blkcg_policy_bfq, - .seq_show = blkg_print_stat_bytes_recursive, -@@ -1196,6 +1221,16 @@ static struct cftype bfq_blkcg_legacy_files[] = { - .private = (unsigned long)&blkcg_policy_bfq, - .seq_show = blkg_print_stat_ios_recursive, - }, -+#ifdef CONFIG_DEBUG_BLK_CGROUP -+ { -+ .name = BFQ_CGROUP_FNAME(time_recursive), -+ .private = offsetof(struct bfq_group, stats.time), -+ .seq_show = bfqg_print_stat_recursive, -+ }, -+ { -+ .name = BFQ_CGROUP_FNAME(sectors_recursive), -+ .seq_show = bfqg_print_stat_sectors_recursive, -+ }, - { - .name = BFQ_CGROUP_FNAME(io_service_time_recursive), - .private = offsetof(struct bfq_group, stats.service_time), -@@ -1240,6 +1275,7 @@ static struct cftype bfq_blkcg_legacy_files[] = { - .private = offsetof(struct bfq_group, stats.dequeue), - .seq_show = bfqg_print_stat, - }, -+#endif /* CONFIG_DEBUG_BLK_CGROUP */ - { } /* terminate */ - }; - -@@ -1257,25 +1293,6 @@ static struct cftype bfq_blkg_files[] = { - - #else /* BFQ_GROUP_IOSCHED_ENABLED */ - --static inline void bfqg_stats_update_io_add(struct bfq_group *bfqg, -- struct bfq_queue *bfqq, unsigned int op) { } --static inline void --bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) { } --static inline void --bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) { } --static inline void bfqg_stats_update_completion(struct bfq_group *bfqg, -- uint64_t start_time, uint64_t io_start_time, -- unsigned int op) { } --static inline void --bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg, -- struct bfq_group *curr_bfqg) { } --static inline void bfqg_stats_end_empty_time(struct bfqg_stats *stats) { } --static inline void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { } --static inline void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) { } --static inline void bfqg_stats_update_idle_time(struct bfq_group *bfqg) { } --static inline void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { } --static inline void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) { } -- - static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, - struct bfq_group *bfqg) {} - -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index ab3b83d612c2..0c09609a6099 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -4070,14 +4070,14 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) - { - struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; - struct request *rq; --#ifdef BFQ_GROUP_IOSCHED_ENABLED -+#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP) - struct bfq_queue *in_serv_queue, *bfqq; - bool waiting_rq, idle_timer_disabled; - #endif - - spin_lock_irq(&bfqd->lock); - --#ifdef BFQ_GROUP_IOSCHED_ENABLED -+#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP) - in_serv_queue = bfqd->in_service_queue; - waiting_rq = in_serv_queue && bfq_bfqq_wait_request(in_serv_queue); - -@@ -4091,7 +4091,7 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) - #endif - spin_unlock_irq(&bfqd->lock); - --#ifdef BFQ_GROUP_IOSCHED_ENABLED -+#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP) - bfqq = rq ? RQ_BFQQ(rq) : NULL; - if (!idle_timer_disabled && !bfqq) - return rq; -@@ -4691,7 +4691,7 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, - struct request_queue *q = hctx->queue; - struct bfq_data *bfqd = q->elevator->elevator_data; - struct bfq_queue *bfqq = RQ_BFQQ(rq); --#ifdef BFQ_GROUP_IOSCHED_ENABLED -+#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP) - bool idle_timer_disabled = false; - unsigned int cmd_flags; - #endif -@@ -4726,7 +4726,7 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, - BUG_ON(!(rq->rq_flags & RQF_GOT)); - rq->rq_flags &= ~RQF_GOT; - --#ifdef BFQ_GROUP_IOSCHED_ENABLED -+#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP) - idle_timer_disabled = __bfq_insert_request(bfqd, rq); - /* - * Update bfqq, because, if a queue merge has occurred -@@ -4744,7 +4744,7 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, - q->last_merge = rq; - } - } --#ifdef BFQ_GROUP_IOSCHED_ENABLED -+#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP) - /* - * Cache cmd_flags before releasing scheduler lock, because rq - * may disappear afterwards (for example, because of a request -@@ -4753,7 +4753,7 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, - cmd_flags = rq->cmd_flags; - #endif - spin_unlock_irq(&bfqd->lock); --#ifdef BFQ_GROUP_IOSCHED_ENABLED -+#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP) - if (!bfqq) - return; - /* -diff --git a/block/bfq-mq.h b/block/bfq-mq.h -index 7ed2cc29be57..1cb05bb853d2 100644 ---- a/block/bfq-mq.h -+++ b/block/bfq-mq.h -@@ -784,7 +784,7 @@ enum bfqq_expiration { - - - struct bfqg_stats { --#ifdef BFQ_GROUP_IOSCHED_ENABLED -+#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP) - /* number of ios merged */ - struct blkg_rwstat merged; - /* total time spent on device in ns, may not be accurate w/ queueing */ -@@ -812,7 +812,7 @@ struct bfqg_stats { - uint64_t start_idle_time; - uint64_t start_empty_time; - uint16_t flags; --#endif -+#endif /* BFQ_GROUP_IOSCHED_ENABLED && CONFIG_DEBUG_BLK_CGROUP */ - }; - - #ifdef BFQ_GROUP_IOSCHED_ENABLED -diff --git a/block/bfq.h b/block/bfq.h -index 15d326f466b7..47cd4d5a8c32 100644 ---- a/block/bfq.h -+++ b/block/bfq.h -@@ -791,7 +791,7 @@ enum bfqq_expiration { - - - struct bfqg_stats { --#ifdef BFQ_GROUP_IOSCHED_ENABLED -+#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP) - /* number of ios merged */ - struct blkg_rwstat merged; - /* total time spent on device in ns, may not be accurate w/ queueing */ -@@ -819,7 +819,7 @@ struct bfqg_stats { - uint64_t start_idle_time; - uint64_t start_empty_time; - uint16_t flags; --#endif -+#endif /* BFQ_GROUP_IOSCHED_ENABLED && CONFIG_DEBUG_BLK_CGROUP */ - }; - - #ifdef BFQ_GROUP_IOSCHED_ENABLED diff --git a/sys-kernel/linux-image-redcore-lts/files/4.14-0002-BFQ-v8r12-20180404.patch b/sys-kernel/linux-image-redcore-lts/files/4.14-0002-BFQ-v8r12-20180404.patch deleted file mode 100644 index 104325d6..00000000 --- a/sys-kernel/linux-image-redcore-lts/files/4.14-0002-BFQ-v8r12-20180404.patch +++ /dev/null @@ -1,4611 +0,0 @@ -From 7bd365a925748767d7ed807e5498f90bae0ebc25 Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Tue, 14 Nov 2017 08:28:45 +0100 -Subject: [PATCH 01/23] block, bfq-mq: turn BUG_ON on request-size into WARN_ON - -BFQ has many checks of internal and external consistency. One of them -checks that an I/O request has still sectors to serve, if it happens -to be retired without being served. If the request has no sector to -serve, a BUG_ON signals the failure and causes the kernel to -terminate. Yet, from a crash report by a user [1], this condition may -happen to hold, in apparently correct functioning, for I/O with a -CD/DVD. - -To address this issue, this commit turns the above BUG_ON into a -WARN_ON. This commit also adds a companion WARN_ON on request -insertion into the scheduler. - -[1] https://groups.google.com/d/msg/bfq-iosched/DDOTJBroBa4/VyU1zUFtCgAJ - -Reported-by: Alexandre Frade -Signed-off-by: Paolo Valente ---- - block/bfq-mq-iosched.c | 4 +++- - 1 file changed, 3 insertions(+), 1 deletion(-) - -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index 0c09609a6099..0fc757ae7a42 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -1540,6 +1540,8 @@ static void bfq_add_request(struct request *rq) - - BUG_ON(!RQ_BFQQ(rq)); - BUG_ON(RQ_BFQQ(rq) != bfqq); -+ WARN_ON(blk_rq_sectors(rq) == 0); -+ - elv_rb_add(&bfqq->sort_list, rq); - - /* -@@ -4962,7 +4964,7 @@ static void bfq_finish_request(struct request *rq) - rq_io_start_time_ns(rq), - rq->cmd_flags); - -- BUG_ON(blk_rq_sectors(rq) == 0 && !(rq->rq_flags & RQF_STARTED)); -+ WARN_ON(blk_rq_sectors(rq) == 0 && !(rq->rq_flags & RQF_STARTED)); - - if (likely(rq->rq_flags & RQF_STARTED)) { - unsigned long flags; - -From 1097d368a20456c88acd75b3184c68df38e8f7b8 Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Sun, 12 Nov 2017 22:43:46 +0100 -Subject: [PATCH 02/23] block, bfq-sq, bfq-mq: consider also past I/O in soft - real-time detection - -BFQ privileges the I/O of soft real-time applications, such as video -players, to guarantee to these application a high bandwidth and a low -latency. In this respect, it is not easy to correctly detect when an -application is soft real-time. A particularly nasty false positive is -that of an I/O-bound application that occasionally happens to meet all -requirements to be deemed as soft real-time. After being detected as -soft real-time, such an application monopolizes the device. Fortunately, -BFQ will realize soon that the application is actually not soft -real-time and suspend every privilege. Yet, the application may happen -again to be wrongly detected as soft real-time, and so on. - -As highlighted by our tests, this problem causes BFQ to occasionally -fail to guarantee a high responsiveness, in the presence of heavy -background I/O workloads. The reason is that the background workload -happens to be detected as soft real-time, more or less frequently, -during the execution of the interactive task under test. To give an -idea, because of this problem, Libreoffice Writer occasionally takes 8 -seconds, instead of 3, to start up, if there are sequential reads and -writes in the background, on a Kingston SSDNow V300. - -This commit addresses this issue by leveraging the following facts. - -The reason why some applications are detected as soft real-time despite -all BFQ checks to avoid false positives, is simply that, during high -CPU or storage-device load, I/O-bound applications may happen to do -I/O slowly enough to meet all soft real-time requirements, and pass -all BFQ extra checks. Yet, this happens only for limited time periods: -slow-speed time intervals are usually interspersed between other time -intervals during which these applications do I/O at a very high speed. -To exploit these facts, this commit introduces a little change, in the -detection of soft real-time behavior, to systematically consider also -the recent past: the higher the speed was in the recent past, the -later next I/O should arrive for the application to be considered as -soft real-time. At the beginning of a slow-speed interval, the minimum -arrival time allowed for the next I/O usually happens to still be so -high, to fall *after* the end of the slow-speed period itself. As a -consequence, the application does not risk to be deemed as soft -real-time during the slow-speed interval. Then, during the next -high-speed interval, the application cannot, evidently, be deemed as -soft real-time (exactly because of its speed), and so on. - -This extra filtering proved to be rather effective: in the above test, -the frequency of false positives became so low that the start-up time -was 3 seconds in all iterations (apart from occasional outliers, -caused by page-cache-management issues, which are out of the scope of -this commit, and cannot be solved by an I/O scheduler). - -Signed-off-by: Paolo Valente -Signed-off-by: Angelo Ruocco ---- - block/bfq-mq-iosched.c | 115 ++++++++++++++++++++++++++++++++++--------------- - block/bfq-sq-iosched.c | 115 ++++++++++++++++++++++++++++++++++--------------- - 2 files changed, 162 insertions(+), 68 deletions(-) - -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index 0fc757ae7a42..4d06d900f45e 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -3201,37 +3201,78 @@ static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq, - * whereas soft_rt_next_start is set to infinity for applications that do - * not. - * -- * Unfortunately, even a greedy application may happen to behave in an -- * isochronous way if the CPU load is high. In fact, the application may -- * stop issuing requests while the CPUs are busy serving other processes, -- * then restart, then stop again for a while, and so on. In addition, if -- * the disk achieves a low enough throughput with the request pattern -- * issued by the application (e.g., because the request pattern is random -- * and/or the device is slow), then the application may meet the above -- * bandwidth requirement too. To prevent such a greedy application to be -- * deemed as soft real-time, a further rule is used in the computation of -- * soft_rt_next_start: soft_rt_next_start must be higher than the current -- * time plus the maximum time for which the arrival of a request is waited -- * for when a sync queue becomes idle, namely bfqd->bfq_slice_idle. -- * This filters out greedy applications, as the latter issue instead their -- * next request as soon as possible after the last one has been completed -- * (in contrast, when a batch of requests is completed, a soft real-time -- * application spends some time processing data). -+ * Unfortunately, even a greedy (i.e., I/O-bound) application may -+ * happen to meet, occasionally or systematically, both the above -+ * bandwidth and isochrony requirements. This may happen at least in -+ * the following circumstances. First, if the CPU load is high. The -+ * application may stop issuing requests while the CPUs are busy -+ * serving other processes, then restart, then stop again for a while, -+ * and so on. The other circumstances are related to the storage -+ * device: the storage device is highly loaded or reaches a low-enough -+ * throughput with the I/O of the application (e.g., because the I/O -+ * is random and/or the device is slow). In all these cases, the -+ * I/O of the application may be simply slowed down enough to meet -+ * the bandwidth and isochrony requirements. To reduce the probability -+ * that greedy applications are deemed as soft real-time in these -+ * corner cases, a further rule is used in the computation of -+ * soft_rt_next_start: the return value of this function is forced to -+ * be higher than the maximum between the following two quantities. - * -- * Unfortunately, the last filter may easily generate false positives if -- * only bfqd->bfq_slice_idle is used as a reference time interval and one -- * or both the following cases occur: -- * 1) HZ is so low that the duration of a jiffy is comparable to or higher -- * than bfqd->bfq_slice_idle. This happens, e.g., on slow devices with -- * HZ=100. -+ * (a) Current time plus: (1) the maximum time for which the arrival -+ * of a request is waited for when a sync queue becomes idle, -+ * namely bfqd->bfq_slice_idle, and (2) a few extra jiffies. We -+ * postpone for a moment the reason for adding a few extra -+ * jiffies; we get back to it after next item (b). Lower-bounding -+ * the return value of this function with the current time plus -+ * bfqd->bfq_slice_idle tends to filter out greedy applications, -+ * because the latter issue their next request as soon as possible -+ * after the last one has been completed. In contrast, a soft -+ * real-time application spends some time processing data, after a -+ * batch of its requests has been completed. -+ * -+ * (b) Current value of bfqq->soft_rt_next_start. As pointed out -+ * above, greedy applications may happen to meet both the -+ * bandwidth and isochrony requirements under heavy CPU or -+ * storage-device load. In more detail, in these scenarios, these -+ * applications happen, only for limited time periods, to do I/O -+ * slowly enough to meet all the requirements described so far, -+ * including the filtering in above item (a). These slow-speed -+ * time intervals are usually interspersed between other time -+ * intervals during which these applications do I/O at a very high -+ * speed. Fortunately, exactly because of the high speed of the -+ * I/O in the high-speed intervals, the values returned by this -+ * function happen to be so high, near the end of any such -+ * high-speed interval, to be likely to fall *after* the end of -+ * the low-speed time interval that follows. These high values are -+ * stored in bfqq->soft_rt_next_start after each invocation of -+ * this function. As a consequence, if the last value of -+ * bfqq->soft_rt_next_start is constantly used to lower-bound the -+ * next value that this function may return, then, from the very -+ * beginning of a low-speed interval, bfqq->soft_rt_next_start is -+ * likely to be constantly kept so high that any I/O request -+ * issued during the low-speed interval is considered as arriving -+ * to soon for the application to be deemed as soft -+ * real-time. Then, in the high-speed interval that follows, the -+ * application will not be deemed as soft real-time, just because -+ * it will do I/O at a high speed. And so on. -+ * -+ * Getting back to the filtering in item (a), in the following two -+ * cases this filtering might be easily passed by a greedy -+ * application, if the reference quantity was just -+ * bfqd->bfq_slice_idle: -+ * 1) HZ is so low that the duration of a jiffy is comparable to or -+ * higher than bfqd->bfq_slice_idle. This happens, e.g., on slow -+ * devices with HZ=100. The time granularity may be so coarse -+ * that the approximation, in jiffies, of bfqd->bfq_slice_idle -+ * is rather lower than the exact value. - * 2) jiffies, instead of increasing at a constant rate, may stop increasing - * for a while, then suddenly 'jump' by several units to recover the lost - * increments. This seems to happen, e.g., inside virtual machines. -- * To address this issue, we do not use as a reference time interval just -- * bfqd->bfq_slice_idle, but bfqd->bfq_slice_idle plus a few jiffies. In -- * particular we add the minimum number of jiffies for which the filter -- * seems to be quite precise also in embedded systems and KVM/QEMU virtual -- * machines. -+ * To address this issue, in the filtering in (a) we do not use as a -+ * reference time interval just bfqd->bfq_slice_idle, but -+ * bfqd->bfq_slice_idle plus a few jiffies. In particular, we add the -+ * minimum number of jiffies for which the filter seems to be quite -+ * precise also in embedded systems and KVM/QEMU virtual machines. - */ - static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd, - struct bfq_queue *bfqq) -@@ -3243,10 +3284,11 @@ static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd, - jiffies_to_msecs(HZ * bfqq->service_from_backlogged / - bfqd->bfq_wr_max_softrt_rate)); - -- return max(bfqq->last_idle_bklogged + -- HZ * bfqq->service_from_backlogged / -- bfqd->bfq_wr_max_softrt_rate, -- jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4); -+ return max3(bfqq->soft_rt_next_start, -+ bfqq->last_idle_bklogged + -+ HZ * bfqq->service_from_backlogged / -+ bfqd->bfq_wr_max_softrt_rate, -+ jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4); - } - - /** -@@ -4395,10 +4437,15 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, - bfqq->split_time = bfq_smallest_from_now(); - - /* -- * Set to the value for which bfqq will not be deemed as -- * soft rt when it becomes backlogged. -+ * To not forget the possibly high bandwidth consumed by a -+ * process/queue in the recent past, -+ * bfq_bfqq_softrt_next_start() returns a value at least equal -+ * to the current value of bfqq->soft_rt_next_start (see -+ * comments on bfq_bfqq_softrt_next_start). Set -+ * soft_rt_next_start to now, to mean that bfqq has consumed -+ * no bandwidth so far. - */ -- bfqq->soft_rt_next_start = bfq_greatest_from_now(); -+ bfqq->soft_rt_next_start = jiffies; - - /* first request is almost certainly seeky */ - bfqq->seek_history = 1; -diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c -index 4bbd7f4c0154..987dc255c82c 100644 ---- a/block/bfq-sq-iosched.c -+++ b/block/bfq-sq-iosched.c -@@ -3089,37 +3089,78 @@ static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq, - * whereas soft_rt_next_start is set to infinity for applications that do - * not. - * -- * Unfortunately, even a greedy application may happen to behave in an -- * isochronous way if the CPU load is high. In fact, the application may -- * stop issuing requests while the CPUs are busy serving other processes, -- * then restart, then stop again for a while, and so on. In addition, if -- * the disk achieves a low enough throughput with the request pattern -- * issued by the application (e.g., because the request pattern is random -- * and/or the device is slow), then the application may meet the above -- * bandwidth requirement too. To prevent such a greedy application to be -- * deemed as soft real-time, a further rule is used in the computation of -- * soft_rt_next_start: soft_rt_next_start must be higher than the current -- * time plus the maximum time for which the arrival of a request is waited -- * for when a sync queue becomes idle, namely bfqd->bfq_slice_idle. -- * This filters out greedy applications, as the latter issue instead their -- * next request as soon as possible after the last one has been completed -- * (in contrast, when a batch of requests is completed, a soft real-time -- * application spends some time processing data). -+ * Unfortunately, even a greedy (i.e., I/O-bound) application may -+ * happen to meet, occasionally or systematically, both the above -+ * bandwidth and isochrony requirements. This may happen at least in -+ * the following circumstances. First, if the CPU load is high. The -+ * application may stop issuing requests while the CPUs are busy -+ * serving other processes, then restart, then stop again for a while, -+ * and so on. The other circumstances are related to the storage -+ * device: the storage device is highly loaded or reaches a low-enough -+ * throughput with the I/O of the application (e.g., because the I/O -+ * is random and/or the device is slow). In all these cases, the -+ * I/O of the application may be simply slowed down enough to meet -+ * the bandwidth and isochrony requirements. To reduce the probability -+ * that greedy applications are deemed as soft real-time in these -+ * corner cases, a further rule is used in the computation of -+ * soft_rt_next_start: the return value of this function is forced to -+ * be higher than the maximum between the following two quantities. - * -- * Unfortunately, the last filter may easily generate false positives if -- * only bfqd->bfq_slice_idle is used as a reference time interval and one -- * or both the following cases occur: -- * 1) HZ is so low that the duration of a jiffy is comparable to or higher -- * than bfqd->bfq_slice_idle. This happens, e.g., on slow devices with -- * HZ=100. -+ * (a) Current time plus: (1) the maximum time for which the arrival -+ * of a request is waited for when a sync queue becomes idle, -+ * namely bfqd->bfq_slice_idle, and (2) a few extra jiffies. We -+ * postpone for a moment the reason for adding a few extra -+ * jiffies; we get back to it after next item (b). Lower-bounding -+ * the return value of this function with the current time plus -+ * bfqd->bfq_slice_idle tends to filter out greedy applications, -+ * because the latter issue their next request as soon as possible -+ * after the last one has been completed. In contrast, a soft -+ * real-time application spends some time processing data, after a -+ * batch of its requests has been completed. -+ * -+ * (b) Current value of bfqq->soft_rt_next_start. As pointed out -+ * above, greedy applications may happen to meet both the -+ * bandwidth and isochrony requirements under heavy CPU or -+ * storage-device load. In more detail, in these scenarios, these -+ * applications happen, only for limited time periods, to do I/O -+ * slowly enough to meet all the requirements described so far, -+ * including the filtering in above item (a). These slow-speed -+ * time intervals are usually interspersed between other time -+ * intervals during which these applications do I/O at a very high -+ * speed. Fortunately, exactly because of the high speed of the -+ * I/O in the high-speed intervals, the values returned by this -+ * function happen to be so high, near the end of any such -+ * high-speed interval, to be likely to fall *after* the end of -+ * the low-speed time interval that follows. These high values are -+ * stored in bfqq->soft_rt_next_start after each invocation of -+ * this function. As a consequence, if the last value of -+ * bfqq->soft_rt_next_start is constantly used to lower-bound the -+ * next value that this function may return, then, from the very -+ * beginning of a low-speed interval, bfqq->soft_rt_next_start is -+ * likely to be constantly kept so high that any I/O request -+ * issued during the low-speed interval is considered as arriving -+ * to soon for the application to be deemed as soft -+ * real-time. Then, in the high-speed interval that follows, the -+ * application will not be deemed as soft real-time, just because -+ * it will do I/O at a high speed. And so on. -+ * -+ * Getting back to the filtering in item (a), in the following two -+ * cases this filtering might be easily passed by a greedy -+ * application, if the reference quantity was just -+ * bfqd->bfq_slice_idle: -+ * 1) HZ is so low that the duration of a jiffy is comparable to or -+ * higher than bfqd->bfq_slice_idle. This happens, e.g., on slow -+ * devices with HZ=100. The time granularity may be so coarse -+ * that the approximation, in jiffies, of bfqd->bfq_slice_idle -+ * is rather lower than the exact value. - * 2) jiffies, instead of increasing at a constant rate, may stop increasing - * for a while, then suddenly 'jump' by several units to recover the lost - * increments. This seems to happen, e.g., inside virtual machines. -- * To address this issue, we do not use as a reference time interval just -- * bfqd->bfq_slice_idle, but bfqd->bfq_slice_idle plus a few jiffies. In -- * particular we add the minimum number of jiffies for which the filter -- * seems to be quite precise also in embedded systems and KVM/QEMU virtual -- * machines. -+ * To address this issue, in the filtering in (a) we do not use as a -+ * reference time interval just bfqd->bfq_slice_idle, but -+ * bfqd->bfq_slice_idle plus a few jiffies. In particular, we add the -+ * minimum number of jiffies for which the filter seems to be quite -+ * precise also in embedded systems and KVM/QEMU virtual machines. - */ - static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd, - struct bfq_queue *bfqq) -@@ -3131,10 +3172,11 @@ static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd, - jiffies_to_msecs(HZ * bfqq->service_from_backlogged / - bfqd->bfq_wr_max_softrt_rate)); - -- return max(bfqq->last_idle_bklogged + -- HZ * bfqq->service_from_backlogged / -- bfqd->bfq_wr_max_softrt_rate, -- jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4); -+ return max3(bfqq->soft_rt_next_start, -+ bfqq->last_idle_bklogged + -+ HZ * bfqq->service_from_backlogged / -+ bfqd->bfq_wr_max_softrt_rate, -+ jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4); - } - - /** -@@ -4167,10 +4209,15 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, - bfqq->split_time = bfq_smallest_from_now(); - - /* -- * Set to the value for which bfqq will not be deemed as -- * soft rt when it becomes backlogged. -+ * To not forget the possibly high bandwidth consumed by a -+ * process/queue in the recent past, -+ * bfq_bfqq_softrt_next_start() returns a value at least equal -+ * to the current value of bfqq->soft_rt_next_start (see -+ * comments on bfq_bfqq_softrt_next_start). Set -+ * soft_rt_next_start to now, to mean that bfqq has consumed -+ * no bandwidth so far. - */ -- bfqq->soft_rt_next_start = bfq_greatest_from_now(); -+ bfqq->soft_rt_next_start = jiffies; - - /* first request is almost certainly seeky */ - bfqq->seek_history = 1; - -From 2a09b505660c81dbb80a5d68c9bc558c326d041f Mon Sep 17 00:00:00 2001 -From: Chiara Bruschi -Date: Thu, 7 Dec 2017 09:57:19 +0100 -Subject: [PATCH 03/23] block, bfq-mq: fix occurrences of request - prepare/finish methods' old names - -Commits 'b01f1fa3bb19' (Port of "blk-mq-sched: unify request prepare -methods") and 'cc10d2d7d2c1' (Port of "blk-mq-sched: unify request -finished methods") changed the old names of current bfq_prepare_request -and bfq_finish_request methods, but left them unchanged elsewhere in -the code (related comments, part of function name bfq_put_rq_priv_body). - -This commit fixes every occurrence of the old names of these methods -by changing them into the current names. - -Fixes: b01f1fa3bb19 (Port of "blk-mq-sched: unify request prepare methods") -Fixes: cc10d2d7d2c1 (Port of "blk-mq-sched: unify request finished methods") -Reviewed-by: Paolo Valente -Signed-off-by: Federico Motta -Signed-off-by: Chiara Bruschi ---- - block/bfq-mq-iosched.c | 38 +++++++++++++++++++------------------- - 1 file changed, 19 insertions(+), 19 deletions(-) - -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index 4d06d900f45e..8f8d5eccb016 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -4018,20 +4018,20 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) - /* - * TESTING: reset DISP_LIST flag, because: 1) - * this rq this request has passed through -- * get_rq_private, 2) then it will have -- * put_rq_private invoked on it, and 3) in -- * put_rq_private we use this flag to check -- * that put_rq_private is not invoked on -- * requests for which get_rq_private has been -- * invoked. -+ * bfq_prepare_request, 2) then it will have -+ * bfq_finish_request invoked on it, and 3) in -+ * bfq_finish_request we use this flag to check -+ * that bfq_finish_request is not invoked on -+ * requests for which bfq_prepare_request has -+ * been invoked. - */ - rq->rq_flags &= ~RQF_DISP_LIST; - goto inc_in_driver_start_rq; - } - - /* -- * We exploit the put_rq_private hook to decrement -- * rq_in_driver, but put_rq_private will not be -+ * We exploit the bfq_finish_request hook to decrement -+ * rq_in_driver, but bfq_finish_request will not be - * invoked on this request. So, to avoid unbalance, - * just start this request, without incrementing - * rq_in_driver. As a negative consequence, -@@ -4040,14 +4040,14 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) - * bfq_schedule_dispatch to be invoked uselessly. - * - * As for implementing an exact solution, the -- * put_request hook, if defined, is probably invoked -- * also on this request. So, by exploiting this hook, -- * we could 1) increment rq_in_driver here, and 2) -- * decrement it in put_request. Such a solution would -- * let the value of the counter be always accurate, -- * but it would entail using an extra interface -- * function. This cost seems higher than the benefit, -- * being the frequency of non-elevator-private -+ * bfq_finish_request hook, if defined, is probably -+ * invoked also on this request. So, by exploiting -+ * this hook, we could 1) increment rq_in_driver here, -+ * and 2) decrement it in bfq_finish_request. Such a -+ * solution would let the value of the counter be -+ * always accurate, but it would entail using an extra -+ * interface function. This cost seems higher than the -+ * benefit, being the frequency of non-elevator-private - * requests very low. - */ - goto start_rq; -@@ -4963,7 +4963,7 @@ static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd) - } - } - --static void bfq_put_rq_priv_body(struct bfq_queue *bfqq) -+static void bfq_finish_request_body(struct bfq_queue *bfqq) - { - bfq_log_bfqq(bfqq->bfqd, bfqq, - "put_request_body: allocated %d", bfqq->allocated); -@@ -5019,7 +5019,7 @@ static void bfq_finish_request(struct request *rq) - spin_lock_irqsave(&bfqd->lock, flags); - - bfq_completed_request(bfqq, bfqd); -- bfq_put_rq_priv_body(bfqq); -+ bfq_finish_request_body(bfqq); - - spin_unlock_irqrestore(&bfqd->lock, flags); - } else { -@@ -5042,7 +5042,7 @@ static void bfq_finish_request(struct request *rq) - bfqg_stats_update_io_remove(bfqq_group(bfqq), - rq->cmd_flags); - } -- bfq_put_rq_priv_body(bfqq); -+ bfq_finish_request_body(bfqq); - } - - rq->elv.priv[0] = NULL; - -From 4df19943c3a767df453abea3d2ac3433c3326ce0 Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Thu, 16 Nov 2017 18:38:13 +0100 -Subject: [PATCH 04/23] block, bfq-sq, bfq-mq: add missing rq_pos_tree update - on rq removal - -If two processes do I/O close to each other, then BFQ merges the -bfq_queues associated with these processes, to get a more sequential -I/O, and thus a higher throughput. In this respect, to detect whether -two processes are doing I/O close to each other, BFQ keeps a list of -the head-of-line I/O requests of all active bfq_queues. The list is -ordered by initial sectors, and implemented through a red-black tree -(rq_pos_tree). - -Unfortunately, the update of the rq_pos_tree was incomplete, because -the tree was not updated on the removal of the head-of-line I/O -request of a bfq_queue, in case the queue did not remain empty. This -commit adds the missing update. - -Signed-off-by: Paolo Valente -Signed-off-by: Angelo Ruocco ---- - block/bfq-mq-iosched.c | 3 +++ - block/bfq-sq-iosched.c | 3 +++ - 2 files changed, 6 insertions(+) - -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index 8f8d5eccb016..603191c9008f 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -1729,6 +1729,9 @@ static void bfq_remove_request(struct request_queue *q, - rb_erase(&bfqq->pos_node, bfqq->pos_root); - bfqq->pos_root = NULL; - } -+ } else { -+ BUG_ON(!bfqq->next_rq); -+ bfq_pos_tree_add_move(bfqd, bfqq); - } - - if (rq->cmd_flags & REQ_META) { -diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c -index 987dc255c82c..ea90ace79e49 100644 ---- a/block/bfq-sq-iosched.c -+++ b/block/bfq-sq-iosched.c -@@ -1669,6 +1669,9 @@ static void bfq_remove_request(struct request *rq) - rb_erase(&bfqq->pos_node, bfqq->pos_root); - bfqq->pos_root = NULL; - } -+ } else { -+ BUG_ON(!bfqq->next_rq); -+ bfq_pos_tree_add_move(bfqd, bfqq); - } - - if (rq->cmd_flags & REQ_META) { - -From b844e345140aaea957d84a21d2aa67588b020cd5 Mon Sep 17 00:00:00 2001 -From: Angelo Ruocco -Date: Mon, 18 Dec 2017 08:28:08 +0100 -Subject: [PATCH 05/23] block, bfq-sq, bfq-mq: check low_latency flag in - bfq_bfqq_save_state() - -A just-created bfq_queue will certainly be deemed as interactive on -the arrival of its first I/O request, if the low_latency flag is -set. Yet, if the queue is merged with another queue on the arrival of -its first I/O request, it will not have the chance to be flagged as -interactive. Nevertheless, if the queue is then split soon enough, it -has to be flagged as interactive after the split. - -To handle this early-merge scenario correctly, BFQ saves the state of -the queue, on the merge, as if the latter had already been deemed -interactive. So, if the queue is split soon, it will get -weight-raised, because the previous state of the queue is resumed on -the split. - -Unfortunately, in the act of saving the state of the newly-created -queue, BFQ doesn't check whether the low_latency flag is set, and this -causes early-merged queues to be then weight-raised, on queue splits, -even if low_latency is off. This commit addresses this problem by -adding the missing check. - -Signed-off-by: Angelo Ruocco -Signed-off-by: Paolo Valente ---- - block/bfq-mq-iosched.c | 3 ++- - block/bfq-sq-iosched.c | 3 ++- - 2 files changed, 4 insertions(+), 2 deletions(-) - -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index 603191c9008f..ff9776c8836a 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -2231,7 +2231,8 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq) - bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq); - bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node); - if (unlikely(bfq_bfqq_just_created(bfqq) && -- !bfq_bfqq_in_large_burst(bfqq))) { -+ !bfq_bfqq_in_large_burst(bfqq) && -+ bfqq->bfqd->low_latency)) { - /* - * bfqq being merged ritgh after being created: bfqq - * would have deserved interactive weight raising, but -diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c -index ea90ace79e49..3a2d764e760c 100644 ---- a/block/bfq-sq-iosched.c -+++ b/block/bfq-sq-iosched.c -@@ -2109,7 +2109,8 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq) - bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq); - bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node); - if (unlikely(bfq_bfqq_just_created(bfqq) && -- !bfq_bfqq_in_large_burst(bfqq))) { -+ !bfq_bfqq_in_large_burst(bfqq) && -+ bfqq->bfqd->low_latency)) { - /* - * bfqq being merged ritgh after being created: bfqq - * would have deserved interactive weight raising, but - -From 4cc6896fe1de2e0b4de151a6e70658f10b9ec2fa Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Fri, 27 Oct 2017 11:12:14 +0200 -Subject: [PATCH 06/23] block, bfq-sq, bfq-mq: let a queue be merged only - shortly after starting I/O - -In BFQ and CFQ, two processes are said to be cooperating if they do -I/O in such a way that the union of their I/O requests yields a -sequential I/O pattern. To get such a sequential I/O pattern out of -the non-sequential pattern of each cooperating process, BFQ and CFQ -merge the queues associated with these processes. In more detail, -cooperating processes, and thus their associated queues, usually -start, or restart, to do I/O shortly after each other. This is the -case, e.g., for the I/O threads of KVM/QEMU and of the dump -utility. Basing on this assumption, this commit allows a bfq_queue to -be merged only during a short time interval (100ms) after it starts, -or re-starts, to do I/O. This filtering provides two important -benefits. - -First, it greatly reduces the probability that two non-cooperating -processes have their queues merged by mistake, if they just happen to -do I/O close to each other for a short time interval. These spurious -merges cause loss of service guarantees. A low-weight bfq_queue may -unjustly get more than its expected share of the throughput: if such a -low-weight queue is merged with a high-weight queue, then the I/O for -the low-weight queue is served as if the queue had a high weight. This -may damage other high-weight queues unexpectedly. For instance, -because of this issue, lxterminal occasionally took 7.5 seconds to -start, instead of 6.5 seconds, when some sequential readers and -writers did I/O in the background on a FUJITSU MHX2300BT HDD. The -reason is that the bfq_queues associated with some of the readers or -the writers were merged with the high-weight queues of some processes -that had to do some urgent but little I/O. The readers then exploited -the inherited high weight for all or most of their I/O, during the -start-up of terminal. The filtering introduced by this commit -eliminated any outlier caused by spurious queue merges in our start-up -time tests. - -This filtering also provides a little boost of the throughput -sustainable by BFQ: 3-4%, depending on the CPU. The reason is that, -once a bfq_queue cannot be merged any longer, this commit makes BFQ -stop updating the data needed to handle merging for the queue. - -Signed-off-by: Paolo Valente -Signed-off-by: Angelo Ruocco ---- - block/bfq-mq-iosched.c | 64 +++++++++++++++++++++++++++++++++++++++++--------- - block/bfq-mq.h | 1 + - block/bfq-sched.c | 4 ++++ - block/bfq-sq-iosched.c | 64 +++++++++++++++++++++++++++++++++++++++++--------- - block/bfq.h | 2 ++ - 5 files changed, 113 insertions(+), 22 deletions(-) - -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index ff9776c8836a..8b17b25a3c30 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -119,6 +119,20 @@ static const int bfq_async_charge_factor = 10; - /* Default timeout values, in jiffies, approximating CFQ defaults. */ - static const int bfq_timeout = (HZ / 8); - -+/* -+ * Time limit for merging (see comments in bfq_setup_cooperator). Set -+ * to the slowest value that, in our tests, proved to be effective in -+ * removing false positives, while not causing true positives to miss -+ * queue merging. -+ * -+ * As can be deduced from the low time limit below, queue merging, if -+ * successful, happens at the very beggining of the I/O of the involved -+ * cooperating processes, as a consequence of the arrival of the very -+ * first requests from each cooperator. After that, there is very -+ * little chance to find cooperators. -+ */ -+static const unsigned long bfq_merge_time_limit = HZ/10; -+ - static struct kmem_cache *bfq_pool; - - /* Below this threshold (in ns), we consider thinktime immediate. */ -@@ -389,6 +403,13 @@ bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root, - return bfqq; - } - -+static bool bfq_too_late_for_merging(struct bfq_queue *bfqq) -+{ -+ return bfqq->service_from_backlogged > 0 && -+ time_is_before_jiffies(bfqq->first_IO_time + -+ bfq_merge_time_limit); -+} -+ - static void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq) - { - struct rb_node **p, *parent; -@@ -399,6 +420,14 @@ static void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq) - bfqq->pos_root = NULL; - } - -+ /* -+ * bfqq cannot be merged any longer (see comments in -+ * bfq_setup_cooperator): no point in adding bfqq into the -+ * position tree. -+ */ -+ if (bfq_too_late_for_merging(bfqq)) -+ return; -+ - if (bfq_class_idle(bfqq)) - return; - if (!bfqq->next_rq) -@@ -2081,6 +2110,13 @@ bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) - static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq, - struct bfq_queue *new_bfqq) - { -+ if (bfq_too_late_for_merging(new_bfqq)) { -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "[%s] too late for bfq%d to be merged", -+ __func__, new_bfqq->pid); -+ return false; -+ } -+ - if (bfq_class_idle(bfqq) || bfq_class_idle(new_bfqq) || - (bfqq->ioprio_class != new_bfqq->ioprio_class)) - return false; -@@ -2149,6 +2185,23 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, - { - struct bfq_queue *in_service_bfqq, *new_bfqq; - -+ /* -+ * Prevent bfqq from being merged if it has been created too -+ * long ago. The idea is that true cooperating processes, and -+ * thus their associated bfq_queues, are supposed to be -+ * created shortly after each other. This is the case, e.g., -+ * for KVM/QEMU and dump I/O threads. Basing on this -+ * assumption, the following filtering greatly reduces the -+ * probability that two non-cooperating processes, which just -+ * happen to do close I/O for some short time interval, have -+ * their queues merged by mistake. -+ */ -+ if (bfq_too_late_for_merging(bfqq)) { -+ bfq_log_bfqq(bfqd, bfqq, -+ "would have looked for coop, but too late"); -+ return NULL; -+ } -+ - if (bfqq->new_bfqq) - return bfqq->new_bfqq; - -@@ -3338,17 +3391,6 @@ static void bfq_bfqq_expire(struct bfq_data *bfqd, - */ - slow = bfq_bfqq_is_slow(bfqd, bfqq, compensate, reason, &delta); - -- /* -- * Increase service_from_backlogged before next statement, -- * because the possible next invocation of -- * bfq_bfqq_charge_time would likely inflate -- * entity->service. In contrast, service_from_backlogged must -- * contain real service, to enable the soft real-time -- * heuristic to correctly compute the bandwidth consumed by -- * bfqq. -- */ -- bfqq->service_from_backlogged += entity->service; -- - /* - * As above explained, charge slow (typically seeky) and - * timed-out queues with the time and not the service -diff --git a/block/bfq-mq.h b/block/bfq-mq.h -index 1cb05bb853d2..a5947b203ef2 100644 ---- a/block/bfq-mq.h -+++ b/block/bfq-mq.h -@@ -337,6 +337,7 @@ struct bfq_queue { - unsigned long wr_start_at_switch_to_srt; - - unsigned long split_time; /* time of last split */ -+ unsigned long first_IO_time; /* time of first I/O for this queue */ - }; - - /** -diff --git a/block/bfq-sched.c b/block/bfq-sched.c -index 616c0692335a..9d261dd428e4 100644 ---- a/block/bfq-sched.c -+++ b/block/bfq-sched.c -@@ -939,6 +939,10 @@ static void bfq_bfqq_served(struct bfq_queue *bfqq, int served) - struct bfq_entity *entity = &bfqq->entity; - struct bfq_service_tree *st; - -+ if (!bfqq->service_from_backlogged) -+ bfqq->first_IO_time = jiffies; -+ -+ bfqq->service_from_backlogged += served; - for_each_entity(entity) { - st = bfq_entity_service_tree(entity); - -diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c -index 3a2d764e760c..cd00a41ca35d 100644 ---- a/block/bfq-sq-iosched.c -+++ b/block/bfq-sq-iosched.c -@@ -113,6 +113,20 @@ static const int bfq_async_charge_factor = 10; - /* Default timeout values, in jiffies, approximating CFQ defaults. */ - static const int bfq_timeout = (HZ / 8); - -+/* -+ * Time limit for merging (see comments in bfq_setup_cooperator). Set -+ * to the slowest value that, in our tests, proved to be effective in -+ * removing false positives, while not causing true positives to miss -+ * queue merging. -+ * -+ * As can be deduced from the low time limit below, queue merging, if -+ * successful, happens at the very beggining of the I/O of the involved -+ * cooperating processes, as a consequence of the arrival of the very -+ * first requests from each cooperator. After that, there is very -+ * little chance to find cooperators. -+ */ -+static const unsigned long bfq_merge_time_limit = HZ/10; -+ - static struct kmem_cache *bfq_pool; - - /* Below this threshold (in ns), we consider thinktime immediate. */ -@@ -351,6 +365,13 @@ bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root, - return bfqq; - } - -+static bool bfq_too_late_for_merging(struct bfq_queue *bfqq) -+{ -+ return bfqq->service_from_backlogged > 0 && -+ time_is_before_jiffies(bfqq->first_IO_time + -+ bfq_merge_time_limit); -+} -+ - static void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq) - { - struct rb_node **p, *parent; -@@ -361,6 +382,14 @@ static void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq) - bfqq->pos_root = NULL; - } - -+ /* -+ * bfqq cannot be merged any longer (see comments in -+ * bfq_setup_cooperator): no point in adding bfqq into the -+ * position tree. -+ */ -+ if (bfq_too_late_for_merging(bfqq)) -+ return; -+ - if (bfq_class_idle(bfqq)) - return; - if (!bfqq->next_rq) -@@ -1960,6 +1989,13 @@ bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) - static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq, - struct bfq_queue *new_bfqq) - { -+ if (bfq_too_late_for_merging(new_bfqq)) { -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "[%s] too late for bfq%d to be merged", -+ __func__, new_bfqq->pid); -+ return false; -+ } -+ - if (bfq_class_idle(bfqq) || bfq_class_idle(new_bfqq) || - (bfqq->ioprio_class != new_bfqq->ioprio_class)) - return false; -@@ -2028,6 +2064,23 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, - { - struct bfq_queue *in_service_bfqq, *new_bfqq; - -+ /* -+ * Prevent bfqq from being merged if it has been created too -+ * long ago. The idea is that true cooperating processes, and -+ * thus their associated bfq_queues, are supposed to be -+ * created shortly after each other. This is the case, e.g., -+ * for KVM/QEMU and dump I/O threads. Basing on this -+ * assumption, the following filtering greatly reduces the -+ * probability that two non-cooperating processes, which just -+ * happen to do close I/O for some short time interval, have -+ * their queues merged by mistake. -+ */ -+ if (bfq_too_late_for_merging(bfqq)) { -+ bfq_log_bfqq(bfqd, bfqq, -+ "would have looked for coop, but too late"); -+ return NULL; -+ } -+ - if (bfqq->new_bfqq) - return bfqq->new_bfqq; - -@@ -3226,17 +3279,6 @@ static void bfq_bfqq_expire(struct bfq_data *bfqd, - */ - slow = bfq_bfqq_is_slow(bfqd, bfqq, compensate, reason, &delta); - -- /* -- * Increase service_from_backlogged before next statement, -- * because the possible next invocation of -- * bfq_bfqq_charge_time would likely inflate -- * entity->service. In contrast, service_from_backlogged must -- * contain real service, to enable the soft real-time -- * heuristic to correctly compute the bandwidth consumed by -- * bfqq. -- */ -- bfqq->service_from_backlogged += entity->service; -- - /* - * As above explained, charge slow (typically seeky) and - * timed-out queues with the time and not the service -diff --git a/block/bfq.h b/block/bfq.h -index 47cd4d5a8c32..59539adc00a5 100644 ---- a/block/bfq.h -+++ b/block/bfq.h -@@ -329,6 +329,8 @@ struct bfq_queue { - unsigned long wr_start_at_switch_to_srt; - - unsigned long split_time; /* time of last split */ -+ -+ unsigned long first_IO_time; /* time of first I/O for this queue */ - }; - - /** - -From 157f39c43ab182280634cd4f6335d0187b3741a0 Mon Sep 17 00:00:00 2001 -From: Angelo Ruocco -Date: Mon, 11 Dec 2017 14:19:54 +0100 -Subject: [PATCH 07/23] block, bfq-sq, bfq-mq: remove superfluous check in - queue-merging setup - -When two or more processes do I/O in a way that the their requests are -sequential in respect to one another, BFQ merges the bfq_queues associated -with the processes. This way the overall I/O pattern becomes sequential, -and thus there is a boost in througput. -These cooperating processes usually start or restart to do I/O shortly -after each other. So, in order to avoid merging non-cooperating processes, -BFQ ensures that none of these queues has been in weight raising for too -long. - -In this respect, from commit "block, bfq-sq, bfq-mq: let a queue be merged -only shortly after being created", BFQ checks whether any queue (and not -only weight-raised ones) is doing I/O continuously from too long to be -merged. - -This new additional check makes the first one useless: a queue doing -I/O from long enough, if being weight-raised, is also a queue in -weight raising for too long to be merged. Accordingly, this commit -removes the first check. - -Signed-off-by: Angelo Ruocco -Signed-off-by: Paolo Valente ---- - block/bfq-mq-iosched.c | 53 ++++---------------------------------------------- - block/bfq-sq-iosched.c | 53 ++++---------------------------------------------- - 2 files changed, 8 insertions(+), 98 deletions(-) - -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index 8b17b25a3c30..f5db8613a70f 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -2140,20 +2140,6 @@ static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq, - return true; - } - --/* -- * If this function returns true, then bfqq cannot be merged. The idea -- * is that true cooperation happens very early after processes start -- * to do I/O. Usually, late cooperations are just accidental false -- * positives. In case bfqq is weight-raised, such false positives -- * would evidently degrade latency guarantees for bfqq. -- */ --static bool wr_from_too_long(struct bfq_queue *bfqq) --{ -- return bfqq->wr_coeff > 1 && -- time_is_before_jiffies(bfqq->last_wr_start_finish + -- msecs_to_jiffies(100)); --} -- - /* - * Attempt to schedule a merge of bfqq with the currently in-service - * queue or with a close queue among the scheduled queues. Return -@@ -2167,11 +2153,6 @@ static bool wr_from_too_long(struct bfq_queue *bfqq) - * to maintain. Besides, in such a critical condition as an out of memory, - * the benefits of queue merging may be little relevant, or even negligible. - * -- * Weight-raised queues can be merged only if their weight-raising -- * period has just started. In fact cooperating processes are usually -- * started together. Thus, with this filter we avoid false positives -- * that would jeopardize low-latency guarantees. -- * - * WARNING: queue merging may impair fairness among non-weight raised - * queues, for at least two reasons: 1) the original weight of a - * merged queue may change during the merged state, 2) even being the -@@ -2205,15 +2186,7 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, - if (bfqq->new_bfqq) - return bfqq->new_bfqq; - -- if (io_struct && wr_from_too_long(bfqq) && -- likely(bfqq != &bfqd->oom_bfqq)) -- bfq_log_bfqq(bfqd, bfqq, -- "would have looked for coop, but bfq%d wr", -- bfqq->pid); -- -- if (!io_struct || -- wr_from_too_long(bfqq) || -- unlikely(bfqq == &bfqd->oom_bfqq)) -+ if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq)) - return NULL; - - /* If there is only one backlogged queue, don't search. */ -@@ -2223,17 +2196,8 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, - in_service_bfqq = bfqd->in_service_queue; - - if (in_service_bfqq && in_service_bfqq != bfqq && -- wr_from_too_long(in_service_bfqq) -- && likely(in_service_bfqq == &bfqd->oom_bfqq)) -- bfq_log_bfqq(bfqd, bfqq, -- "would have tried merge with in-service-queue, but wr"); -- -- if (!in_service_bfqq || in_service_bfqq == bfqq -- || wr_from_too_long(in_service_bfqq) || -- unlikely(in_service_bfqq == &bfqd->oom_bfqq)) -- goto check_scheduled; -- -- if (bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) && -+ likely(in_service_bfqq != &bfqd->oom_bfqq) && -+ bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) && - bfqq->entity.parent == in_service_bfqq->entity.parent && - bfq_may_be_close_cooperator(bfqq, in_service_bfqq)) { - new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq); -@@ -2245,21 +2209,12 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, - * queues. The only thing we need is that the bio/request is not - * NULL, as we need it to establish whether a cooperator exists. - */ --check_scheduled: - new_bfqq = bfq_find_close_cooperator(bfqd, bfqq, - bfq_io_struct_pos(io_struct, request)); - - BUG_ON(new_bfqq && bfqq->entity.parent != new_bfqq->entity.parent); - -- if (new_bfqq && wr_from_too_long(new_bfqq) && -- likely(new_bfqq != &bfqd->oom_bfqq) && -- bfq_may_be_close_cooperator(bfqq, new_bfqq)) -- bfq_log_bfqq(bfqd, bfqq, -- "would have merged with bfq%d, but wr", -- new_bfqq->pid); -- -- if (new_bfqq && !wr_from_too_long(new_bfqq) && -- likely(new_bfqq != &bfqd->oom_bfqq) && -+ if (new_bfqq && likely(new_bfqq != &bfqd->oom_bfqq) && - bfq_may_be_close_cooperator(bfqq, new_bfqq)) - return bfq_setup_merge(bfqq, new_bfqq); - -diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c -index cd00a41ca35d..d8a358e5e284 100644 ---- a/block/bfq-sq-iosched.c -+++ b/block/bfq-sq-iosched.c -@@ -2019,20 +2019,6 @@ static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq, - return true; - } - --/* -- * If this function returns true, then bfqq cannot be merged. The idea -- * is that true cooperation happens very early after processes start -- * to do I/O. Usually, late cooperations are just accidental false -- * positives. In case bfqq is weight-raised, such false positives -- * would evidently degrade latency guarantees for bfqq. -- */ --static bool wr_from_too_long(struct bfq_queue *bfqq) --{ -- return bfqq->wr_coeff > 1 && -- time_is_before_jiffies(bfqq->last_wr_start_finish + -- msecs_to_jiffies(100)); --} -- - /* - * Attempt to schedule a merge of bfqq with the currently in-service - * queue or with a close queue among the scheduled queues. Return -@@ -2046,11 +2032,6 @@ static bool wr_from_too_long(struct bfq_queue *bfqq) - * to maintain. Besides, in such a critical condition as an out of memory, - * the benefits of queue merging may be little relevant, or even negligible. - * -- * Weight-raised queues can be merged only if their weight-raising -- * period has just started. In fact cooperating processes are usually -- * started together. Thus, with this filter we avoid false positives -- * that would jeopardize low-latency guarantees. -- * - * WARNING: queue merging may impair fairness among non-weight raised - * queues, for at least two reasons: 1) the original weight of a - * merged queue may change during the merged state, 2) even being the -@@ -2084,15 +2065,7 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, - if (bfqq->new_bfqq) - return bfqq->new_bfqq; - -- if (io_struct && wr_from_too_long(bfqq) && -- likely(bfqq != &bfqd->oom_bfqq)) -- bfq_log_bfqq(bfqd, bfqq, -- "would have looked for coop, but bfq%d wr", -- bfqq->pid); -- -- if (!io_struct || -- wr_from_too_long(bfqq) || -- unlikely(bfqq == &bfqd->oom_bfqq)) -+ if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq)) - return NULL; - - /* If there is only one backlogged queue, don't search. */ -@@ -2102,17 +2075,8 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, - in_service_bfqq = bfqd->in_service_queue; - - if (in_service_bfqq && in_service_bfqq != bfqq && -- bfqd->in_service_bic && wr_from_too_long(in_service_bfqq) -- && likely(in_service_bfqq == &bfqd->oom_bfqq)) -- bfq_log_bfqq(bfqd, bfqq, -- "would have tried merge with in-service-queue, but wr"); -- -- if (!in_service_bfqq || in_service_bfqq == bfqq || -- !bfqd->in_service_bic || wr_from_too_long(in_service_bfqq) || -- unlikely(in_service_bfqq == &bfqd->oom_bfqq)) -- goto check_scheduled; -- -- if (bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) && -+ likely(in_service_bfqq != &bfqd->oom_bfqq) && -+ bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) && - bfqq->entity.parent == in_service_bfqq->entity.parent && - bfq_may_be_close_cooperator(bfqq, in_service_bfqq)) { - new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq); -@@ -2124,21 +2088,12 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, - * queues. The only thing we need is that the bio/request is not - * NULL, as we need it to establish whether a cooperator exists. - */ --check_scheduled: - new_bfqq = bfq_find_close_cooperator(bfqd, bfqq, - bfq_io_struct_pos(io_struct, request)); - - BUG_ON(new_bfqq && bfqq->entity.parent != new_bfqq->entity.parent); - -- if (new_bfqq && wr_from_too_long(new_bfqq) && -- likely(new_bfqq != &bfqd->oom_bfqq) && -- bfq_may_be_close_cooperator(bfqq, new_bfqq)) -- bfq_log_bfqq(bfqd, bfqq, -- "would have merged with bfq%d, but wr", -- new_bfqq->pid); -- -- if (new_bfqq && !wr_from_too_long(new_bfqq) && -- likely(new_bfqq != &bfqd->oom_bfqq) && -+ if (new_bfqq && likely(new_bfqq != &bfqd->oom_bfqq) && - bfq_may_be_close_cooperator(bfqq, new_bfqq)) - return bfq_setup_merge(bfqq, new_bfqq); - - -From b82eb91d87f172aba7eb5eb98e8d5e2a621adf51 Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Thu, 30 Nov 2017 17:48:28 +0100 -Subject: [PATCH 08/23] block, bfq-sq, bfq-mq: increase threshold to deem I/O - as random - -If two processes do I/O close to each other, i.e., are cooperating -processes in BFQ (and CFQ'S) nomenclature, then BFQ merges their -associated bfq_queues, so as to get sequential I/O from the union of -the I/O requests of the processes, and thus reach a higher -throughput. A merged queue is then split if its I/O stops being -sequential. In this respect, BFQ deems the I/O of a bfq_queue as -(mostly) sequential only if less than 4 I/O requests are random, out -of the last 32 requests inserted into the queue. - -Unfortunately, extensive testing (with the interleaved_io benchmark of -the S suite [1], and with real applications spawning cooperating -processes) has clearly shown that, with such a low threshold, only a -rather low I/O throughput may be reached when several cooperating -processes do I/O. In particular, the outcome of each test run was -bimodal: if queue merging occurred and was stable during the test, -then the throughput was close to the peak rate of the storage device, -otherwise the throughput was arbitrarily low (usually around 1/10 of -the peak rate with a rotational device). The probability to get the -unlucky outcomes grew with the number of cooperating processes: it was -already significant with 5 processes, and close to one with 7 or more -processes. - -The cause of the low throughput in the unlucky runs was that the -merged queues containing the I/O of these cooperating processes were -soon split, because they contained more random I/O requests than those -tolerated by the 4/32 threshold, but -- that I/O would have however allowed the storage device to reach - peak throughput or almost peak throughput; -- in contrast, the I/O of these processes, if served individually - (from separate queues) yielded a rather low throughput. - -So we repeated our tests with increasing values of the threshold, -until we found the minimum value (19) for which we obtained maximum -throughput, reliably, with at least up to 9 cooperating -processes. Then we checked that the use of that higher threshold value -did not cause any regression for any other benchmark in the suite [1]. -This commit raises the threshold to such a higher value. - -[1] https://github.com/Algodev-github/S - -Signed-off-by: Angelo Ruocco -Signed-off-by: Paolo Valente ---- - block/bfq-mq-iosched.c | 2 +- - block/bfq-sq-iosched.c | 2 +- - 2 files changed, 2 insertions(+), 2 deletions(-) - -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index f5db8613a70f..cb5f49ddecb6 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -145,7 +145,7 @@ static struct kmem_cache *bfq_pool; - #define BFQQ_SEEK_THR (sector_t)(8 * 100) - #define BFQQ_SECT_THR_NONROT (sector_t)(2 * 32) - #define BFQQ_CLOSE_THR (sector_t)(8 * 1024) --#define BFQQ_SEEKY(bfqq) (hweight32(bfqq->seek_history) > 32/8) -+#define BFQQ_SEEKY(bfqq) (hweight32(bfqq->seek_history) > 19) - - /* Min number of samples required to perform peak-rate update */ - #define BFQ_RATE_MIN_SAMPLES 32 -diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c -index d8a358e5e284..e1c6dc651be1 100644 ---- a/block/bfq-sq-iosched.c -+++ b/block/bfq-sq-iosched.c -@@ -139,7 +139,7 @@ static struct kmem_cache *bfq_pool; - #define BFQQ_SEEK_THR (sector_t)(8 * 100) - #define BFQQ_SECT_THR_NONROT (sector_t)(2 * 32) - #define BFQQ_CLOSE_THR (sector_t)(8 * 1024) --#define BFQQ_SEEKY(bfqq) (hweight32(bfqq->seek_history) > 32/8) -+#define BFQQ_SEEKY(bfqq) (hweight32(bfqq->seek_history) > 19) - - /* Min number of samples required to perform peak-rate update */ - #define BFQ_RATE_MIN_SAMPLES 32 - -From b739dda4e4b3a1cbbc905f86f9fbb0860b068ce7 Mon Sep 17 00:00:00 2001 -From: Chiara Bruschi -Date: Mon, 11 Dec 2017 18:55:26 +0100 -Subject: [PATCH 09/23] block, bfq-sq, bfq-mq: specify usage condition of - delta_us in bfq_log_bfqq call - -Inside the function bfq_completed_request the value of a variable -called delta_us is computed as current request completion time. -delta_us is used inside a call to the function bfq_log_bfqq as divisor -in a division operation to compute a rate value, but no check makes -sure that delta_us has non-zero value. A divisor with value 0 leads -to a division error that could result in a kernel oops (therefore -unstable/unreliable system state) and consequently cause kernel panic -if resources are unavailable after the system fault. - -This commit fixes this call to bfq_log_bfqq specifying the condition -that allows delta_us to be safely used as divisor. - -Signed-off-by: Paolo Valente -Signed-off-by: Chiara Bruschi ---- - block/bfq-mq-iosched.c | 5 ++++- - block/bfq-sq-iosched.c | 5 ++++- - 2 files changed, 8 insertions(+), 2 deletions(-) - -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index cb5f49ddecb6..6ce2c0789046 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -4904,9 +4904,12 @@ static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd) - bfq_log_bfqq(bfqd, bfqq, - "rq_completed: delta %uus/%luus max_size %u rate %llu/%llu", - delta_us, BFQ_MIN_TT/NSEC_PER_USEC, bfqd->last_rq_max_size, -+ delta_us > 0 ? - (USEC_PER_SEC* - (u64)((bfqd->last_rq_max_size<>BFQ_RATE_SHIFT, -+ >>BFQ_RATE_SHIFT : -+ (USEC_PER_SEC* -+ (u64)(bfqd->last_rq_max_size<>BFQ_RATE_SHIFT, - (USEC_PER_SEC*(u64)(1UL<<(BFQ_RATE_SHIFT-10)))>>BFQ_RATE_SHIFT); - - /* -diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c -index e1c6dc651be1..eff4c4edf5a0 100644 ---- a/block/bfq-sq-iosched.c -+++ b/block/bfq-sq-iosched.c -@@ -4565,9 +4565,12 @@ static void bfq_completed_request(struct request_queue *q, struct request *rq) - - bfq_log(bfqd, "rq_completed: delta %uus/%luus max_size %u rate %llu/%llu", - delta_us, BFQ_MIN_TT/NSEC_PER_USEC, bfqd->last_rq_max_size, -+ delta_us > 0 ? - (USEC_PER_SEC* - (u64)((bfqd->last_rq_max_size<>BFQ_RATE_SHIFT, -+ >>BFQ_RATE_SHIFT : -+ (USEC_PER_SEC* -+ (u64)(bfqd->last_rq_max_size<>BFQ_RATE_SHIFT, - (USEC_PER_SEC*(u64)(1UL<<(BFQ_RATE_SHIFT-10)))>>BFQ_RATE_SHIFT); - - /* - -From ae4310c13eca762644734d53074d8456c85e2dec Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Tue, 19 Dec 2017 12:07:12 +0100 -Subject: [PATCH 10/23] block, bfq-mq: limit tags for writes and async I/O - -Asynchronous I/O can easily starve synchronous I/O (both sync reads -and sync writes), by consuming all request tags. Similarly, storms of -synchronous writes, such as those that sync(2) may trigger, can starve -synchronous reads. In their turn, these two problems may also cause -BFQ to loose control on latency for interactive and soft real-time -applications. For example, on a PLEXTOR PX-256M5S SSD, LibreOffice -Writer takes 0.6 seconds to start if the device is idle, but it takes -more than 45 seconds (!) if there are sequential writes in the -background. - -This commit addresses this issue by limiting the maximum percentage of -tags that asynchronous I/O requests and synchronous write requests can -consume. In particular, this commit grants a higher threshold to -synchronous writes, to prevent the latter from being starved by -asynchronous I/O. - -According to the above test, LibreOffice Writer now starts in about -1.2 seconds on average, regardless of the background workload, and -apart from some rare outlier. To check this improvement, run, e.g., -sudo ./comm_startup_lat.sh bfq-mq 5 5 seq 10 "lowriter --terminate_after_init" -for the comm_startup_lat benchmark in the S suite [1]. - -[1] https://github.com/Algodev-github/S - -Signed-off-by: Paolo Valente ---- - block/bfq-mq-iosched.c | 77 ++++++++++++++++++++++++++++++++++++++++++++++++++ - block/bfq-mq.h | 12 ++++++++ - 2 files changed, 89 insertions(+) - -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index 6ce2c0789046..f384f5566672 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -362,6 +362,82 @@ static struct request *bfq_choose_req(struct bfq_data *bfqd, - } - } - -+/* -+ * See the comments on bfq_limit_depth for the purpose of -+ * the depths set in the function. -+ */ -+static void bfq_update_depths(struct bfq_data *bfqd, struct sbitmap_queue *bt) -+{ -+ bfqd->sb_shift = bt->sb.shift; -+ -+ /* -+ * In-word depths if no bfq_queue is being weight-raised: -+ * leaving 25% of tags only for sync reads. -+ * -+ * In next formulas, right-shift the value -+ * (1U<sb_shift), instead of computing directly -+ * (1U<<(bfqd->sb_shift - something)), to be robust against -+ * any possible value of bfqd->sb_shift, without having to -+ * limit 'something'. -+ */ -+ /* no more than 50% of tags for async I/O */ -+ bfqd->word_depths[0][0] = max((1U<sb_shift)>>1, 1U); -+ /* -+ * no more than 75% of tags for sync writes (25% extra tags -+ * w.r.t. async I/O, to prevent async I/O from starving sync -+ * writes) -+ */ -+ bfqd->word_depths[0][1] = max(((1U<sb_shift) * 3)>>2, 1U); -+ -+ /* -+ * In-word depths in case some bfq_queue is being weight- -+ * raised: leaving ~63% of tags for sync reads. This is the -+ * highest percentage for which, in our tests, application -+ * start-up times didn't suffer from any regression due to tag -+ * shortage. -+ */ -+ /* no more than ~18% of tags for async I/O */ -+ bfqd->word_depths[1][0] = max(((1U<sb_shift) * 3)>>4, 1U); -+ /* no more than ~37% of tags for sync writes (~20% extra tags) */ -+ bfqd->word_depths[1][1] = max(((1U<sb_shift) * 6)>>4, 1U); -+} -+ -+/* -+ * Async I/O can easily starve sync I/O (both sync reads and sync -+ * writes), by consuming all tags. Similarly, storms of sync writes, -+ * such as those that sync(2) may trigger, can starve sync reads. -+ * Limit depths of async I/O and sync writes so as to counter both -+ * problems. -+ */ -+static void bfq_limit_depth(unsigned int op, struct blk_mq_alloc_data *data) -+{ -+ struct blk_mq_tags *tags = blk_mq_tags_from_data(data); -+ struct bfq_data *bfqd = data->q->elevator->elevator_data; -+ struct sbitmap_queue *bt; -+ -+ if (op_is_sync(op) && !op_is_write(op)) -+ return; -+ -+ if (data->flags & BLK_MQ_REQ_RESERVED) { -+ if (unlikely(!tags->nr_reserved_tags)) { -+ WARN_ON_ONCE(1); -+ return; -+ } -+ bt = &tags->breserved_tags; -+ } else -+ bt = &tags->bitmap_tags; -+ -+ if (unlikely(bfqd->sb_shift != bt->sb.shift)) -+ bfq_update_depths(bfqd, bt); -+ -+ data->shallow_depth = -+ bfqd->word_depths[!!bfqd->wr_busy_queues][op_is_sync(op)]; -+ -+ bfq_log(bfqd, "[%s] wr_busy %d sync %d depth %u", -+ __func__, bfqd->wr_busy_queues, op_is_sync(op), -+ data->shallow_depth); -+} -+ - static struct bfq_queue * - bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root, - sector_t sector, struct rb_node **ret_parent, -@@ -5812,6 +5888,7 @@ static struct elv_fs_entry bfq_attrs[] = { - - static struct elevator_type iosched_bfq_mq = { - .ops.mq = { -+ .limit_depth = bfq_limit_depth, - .prepare_request = bfq_prepare_request, - .finish_request = bfq_finish_request, - .exit_icq = bfq_exit_icq, -diff --git a/block/bfq-mq.h b/block/bfq-mq.h -index a5947b203ef2..458099ee0308 100644 ---- a/block/bfq-mq.h -+++ b/block/bfq-mq.h -@@ -619,6 +619,18 @@ struct bfq_data { - struct bfq_queue *bio_bfqq; - /* Extra flag used only for TESTING */ - bool bio_bfqq_set; -+ -+ /* -+ * Cached sbitmap shift, used to compute depth limits in -+ * bfq_update_depths. -+ */ -+ unsigned int sb_shift; -+ -+ /* -+ * Depth limits used in bfq_limit_depth (see comments on the -+ * function) -+ */ -+ unsigned int word_depths[2][2]; - }; - - enum bfqq_state_flags { - -From 402e5f6b59662d290ab2b3c10b0016207a63ad21 Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Thu, 21 Dec 2017 15:51:39 +0100 -Subject: [PATCH 11/23] bfq-sq, bfq-mq: limit sectors served with interactive - weight raising - -To maximise responsiveness, BFQ raises the weight, and performs device -idling, for bfq_queues associated with processes deemed as -interactive. In particular, weight raising has a maximum duration, -equal to the time needed to start a large application. If a -weight-raised process goes on doing I/O beyond this maximum duration, -it loses weight-raising. - -This mechanism is evidently vulnerable to the following false -positives: I/O-bound applications that will go on doing I/O for much -longer than the duration of weight-raising. These applications have -basically no benefit from being weight-raised at the beginning of -their I/O. On the opposite end, while being weight-raised, these -applications -a) unjustly steal throughput to applications that may truly need -low latency; -b) make BFQ uselessly perform device idling; device idling results -in loss of device throughput with most flash-based storage, and may -increase latencies when used purposelessly. - -This commit adds a countermeasure to reduce both the above -problems. To introduce this countermeasure, we provide the following -extra piece of information (full details in the comments added by this -commit). During the start-up of the large application used as a -reference to set the duration of weight-raising, involved processes -transfer at most ~110K sectors each. Accordingly, a process initially -deemed as interactive has no right to be weight-raised any longer, -once transferred 110K sectors or more. - -Basing on this consideration, this commit early-ends weight-raising -for a bfq_queue if the latter happens to have received an amount of -service at least equal to 110K sectors (actually, a little bit more, -to keep a safety margin). I/O-bound applications that reach a high -throughput, such as file copy, get to this threshold much before the -allowed weight-raising period finishes. Thus this early ending of -weight-raising reduces the amount of time during which these -applications cause the problems described above. - -Signed-off-by: Paolo Valente ---- - block/bfq-mq-iosched.c | 84 ++++++++++++++++++++++++++++++++++++++++++++------ - block/bfq-mq.h | 5 +++ - block/bfq-sched.c | 3 ++ - block/bfq-sq-iosched.c | 84 ++++++++++++++++++++++++++++++++++++++++++++------ - block/bfq.h | 5 +++ - 5 files changed, 163 insertions(+), 18 deletions(-) - -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index f384f5566672..63fdd16dec3c 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -162,15 +162,17 @@ static struct kmem_cache *bfq_pool; - * interactive applications automatically, using the following formula: - * duration = (R / r) * T, where r is the peak rate of the device, and - * R and T are two reference parameters. -- * In particular, R is the peak rate of the reference device (see below), -- * and T is a reference time: given the systems that are likely to be -- * installed on the reference device according to its speed class, T is -- * about the maximum time needed, under BFQ and while reading two files in -- * parallel, to load typical large applications on these systems. -- * In practice, the slower/faster the device at hand is, the more/less it -- * takes to load applications with respect to the reference device. -- * Accordingly, the longer/shorter BFQ grants weight raising to interactive -- * applications. -+ * In particular, R is the peak rate of the reference device (see -+ * below), and T is a reference time: given the systems that are -+ * likely to be installed on the reference device according to its -+ * speed class, T is about the maximum time needed, under BFQ and -+ * while reading two files in parallel, to load typical large -+ * applications on these systems (see the comments on -+ * max_service_from_wr below, for more details on how T is obtained). -+ * In practice, the slower/faster the device at hand is, the more/less -+ * it takes to load applications with respect to the reference device. -+ * Accordingly, the longer/shorter BFQ grants weight raising to -+ * interactive applications. - * - * BFQ uses four different reference pairs (R, T), depending on: - * . whether the device is rotational or non-rotational; -@@ -207,6 +209,60 @@ static int T_slow[2]; - static int T_fast[2]; - static int device_speed_thresh[2]; - -+/* -+ * BFQ uses the above-detailed, time-based weight-raising mechanism to -+ * privilege interactive tasks. This mechanism is vulnerable to the -+ * following false positives: I/O-bound applications that will go on -+ * doing I/O for much longer than the duration of weight -+ * raising. These applications have basically no benefit from being -+ * weight-raised at the beginning of their I/O. On the opposite end, -+ * while being weight-raised, these applications -+ * a) unjustly steal throughput to applications that may actually need -+ * low latency; -+ * b) make BFQ uselessly perform device idling; device idling results -+ * in loss of device throughput with most flash-based storage, and may -+ * increase latencies when used purposelessly. -+ * -+ * BFQ tries to reduce these problems, by adopting the following -+ * countermeasure. To introduce this countermeasure, we need first to -+ * finish explaining how the duration of weight-raising for -+ * interactive tasks is computed. -+ * -+ * For a bfq_queue deemed as interactive, the duration of weight -+ * raising is dynamically adjusted, as a function of the estimated -+ * peak rate of the device, so as to be equal to the time needed to -+ * execute the 'largest' interactive task we benchmarked so far. By -+ * largest task, we mean the task for which each involved process has -+ * to do more I/O than for any of the other tasks we benchmarked. This -+ * reference interactive task is the start-up of LibreOffice Writer, -+ * and in this task each process/bfq_queue needs to have at most ~110K -+ * sectors transferred. -+ * -+ * This last piece of information enables BFQ to reduce the actual -+ * duration of weight-raising for at least one class of I/O-bound -+ * applications: those doing sequential or quasi-sequential I/O. An -+ * example is file copy. In fact, once started, the main I/O-bound -+ * processes of these applications usually consume the above 110K -+ * sectors in much less time than the processes of an application that -+ * is starting, because these I/O-bound processes will greedily devote -+ * almost all their CPU cycles only to their target, -+ * throughput-friendly I/O operations. This is even more true if BFQ -+ * happens to be underestimating the device peak rate, and thus -+ * overestimating the duration of weight raising. But, according to -+ * our measurements, once transferred 110K sectors, these processes -+ * have no right to be weight-raised any longer. -+ * -+ * Basing on the last consideration, BFQ ends weight-raising for a -+ * bfq_queue if the latter happens to have received an amount of -+ * service at least equal to the following constant. The constant is -+ * set to slightly more than 110K, to have a minimum safety margin. -+ * -+ * This early ending of weight-raising reduces the amount of time -+ * during which interactive false positives cause the two problems -+ * described at the beginning of these comments. -+ */ -+static const unsigned long max_service_from_wr = 120000; -+ - #define BFQ_SERVICE_TREE_INIT ((struct bfq_service_tree) \ - { RB_ROOT, RB_ROOT, NULL, NULL, 0, 0 }) - -@@ -1361,6 +1417,7 @@ static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd, - if (old_wr_coeff == 1 && wr_or_deserves_wr) { - /* start a weight-raising period */ - if (interactive) { -+ bfqq->service_from_wr = 0; - bfqq->wr_coeff = bfqd->bfq_wr_coeff; - bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); - } else { -@@ -3980,6 +4037,15 @@ static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq) - "back to interactive wr"); - } - } -+ if (bfqq->wr_coeff > 1 && -+ bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time && -+ bfqq->service_from_wr > max_service_from_wr) { -+ /* see comments on max_service_from_wr */ -+ bfq_bfqq_end_wr(bfqq); -+ bfq_log_bfqq(bfqd, bfqq, -+ "[%s] too much service", -+ __func__); -+ } - } - /* - * To improve latency (for this or other queues), immediately -diff --git a/block/bfq-mq.h b/block/bfq-mq.h -index 458099ee0308..9a5ce1168ff5 100644 ---- a/block/bfq-mq.h -+++ b/block/bfq-mq.h -@@ -331,6 +331,11 @@ struct bfq_queue { - * last transition from idle to backlogged. - */ - unsigned long service_from_backlogged; -+ /* -+ * Cumulative service received from the @bfq_queue since its -+ * last transition to weight-raised state. -+ */ -+ unsigned long service_from_wr; - /* - * Value of wr start time when switching to soft rt - */ -diff --git a/block/bfq-sched.c b/block/bfq-sched.c -index 9d261dd428e4..4e6c5232e2fb 100644 ---- a/block/bfq-sched.c -+++ b/block/bfq-sched.c -@@ -942,6 +942,9 @@ static void bfq_bfqq_served(struct bfq_queue *bfqq, int served) - if (!bfqq->service_from_backlogged) - bfqq->first_IO_time = jiffies; - -+ if (bfqq->wr_coeff > 1) -+ bfqq->service_from_wr += served; -+ - bfqq->service_from_backlogged += served; - for_each_entity(entity) { - st = bfq_entity_service_tree(entity); -diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c -index eff4c4edf5a0..486493aafaf8 100644 ---- a/block/bfq-sq-iosched.c -+++ b/block/bfq-sq-iosched.c -@@ -156,15 +156,17 @@ static struct kmem_cache *bfq_pool; - * interactive applications automatically, using the following formula: - * duration = (R / r) * T, where r is the peak rate of the device, and - * R and T are two reference parameters. -- * In particular, R is the peak rate of the reference device (see below), -- * and T is a reference time: given the systems that are likely to be -- * installed on the reference device according to its speed class, T is -- * about the maximum time needed, under BFQ and while reading two files in -- * parallel, to load typical large applications on these systems. -- * In practice, the slower/faster the device at hand is, the more/less it -- * takes to load applications with respect to the reference device. -- * Accordingly, the longer/shorter BFQ grants weight raising to interactive -- * applications. -+ * In particular, R is the peak rate of the reference device (see -+ * below), and T is a reference time: given the systems that are -+ * likely to be installed on the reference device according to its -+ * speed class, T is about the maximum time needed, under BFQ and -+ * while reading two files in parallel, to load typical large -+ * applications on these systems (see the comments on -+ * max_service_from_wr below, for more details on how T is obtained). -+ * In practice, the slower/faster the device at hand is, the more/less -+ * it takes to load applications with respect to the reference device. -+ * Accordingly, the longer/shorter BFQ grants weight raising to -+ * interactive applications. - * - * BFQ uses four different reference pairs (R, T), depending on: - * . whether the device is rotational or non-rotational; -@@ -201,6 +203,60 @@ static int T_slow[2]; - static int T_fast[2]; - static int device_speed_thresh[2]; - -+/* -+ * BFQ uses the above-detailed, time-based weight-raising mechanism to -+ * privilege interactive tasks. This mechanism is vulnerable to the -+ * following false positives: I/O-bound applications that will go on -+ * doing I/O for much longer than the duration of weight -+ * raising. These applications have basically no benefit from being -+ * weight-raised at the beginning of their I/O. On the opposite end, -+ * while being weight-raised, these applications -+ * a) unjustly steal throughput to applications that may actually need -+ * low latency; -+ * b) make BFQ uselessly perform device idling; device idling results -+ * in loss of device throughput with most flash-based storage, and may -+ * increase latencies when used purposelessly. -+ * -+ * BFQ tries to reduce these problems, by adopting the following -+ * countermeasure. To introduce this countermeasure, we need first to -+ * finish explaining how the duration of weight-raising for -+ * interactive tasks is computed. -+ * -+ * For a bfq_queue deemed as interactive, the duration of weight -+ * raising is dynamically adjusted, as a function of the estimated -+ * peak rate of the device, so as to be equal to the time needed to -+ * execute the 'largest' interactive task we benchmarked so far. By -+ * largest task, we mean the task for which each involved process has -+ * to do more I/O than for any of the other tasks we benchmarked. This -+ * reference interactive task is the start-up of LibreOffice Writer, -+ * and in this task each process/bfq_queue needs to have at most ~110K -+ * sectors transfered. -+ * -+ * This last piece of information enables BFQ to reduce the actual -+ * duration of weight-raising for at least one class of I/O-bound -+ * applications: those doing sequential or quasi-sequential I/O. An -+ * example is file copy. In fact, once started, the main I/O-bound -+ * processes of these applications usually consume the above 110K -+ * sectors in much less time than the processes of an application that -+ * is starting, because these I/O-bound processes will greedily devote -+ * almost all their CPU cycles only to their target, -+ * throughput-friendly I/O operations. This is even more true if BFQ -+ * happens to be underestimating the device peak rate, and thus -+ * overestimating the duration of weight raising. But, according to -+ * our measurements, once transferred 110K sectors, these processes -+ * have no right to be weight-raised any longer. -+ * -+ * Basing on the last consideration, BFQ ends weight-raising for a -+ * bfq_queue if the latter happens to have received an amount of -+ * service at least equal to the following constant. The constant is -+ * set to slightly more than 110K, to have a minimum safety margin. -+ * -+ * This early ending of weight-raising reduces the amount of time -+ * during which interactive false positives cause the two problems -+ * described at the beginning of these comments. -+ */ -+static const unsigned long max_service_from_wr = 120000; -+ - #define BFQ_SERVICE_TREE_INIT ((struct bfq_service_tree) \ - { RB_ROOT, RB_ROOT, NULL, NULL, 0, 0 }) - -@@ -1246,6 +1302,7 @@ static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd, - if (old_wr_coeff == 1 && wr_or_deserves_wr) { - /* start a weight-raising period */ - if (interactive) { -+ bfqq->service_from_wr = 0; - bfqq->wr_coeff = bfqd->bfq_wr_coeff; - bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); - } else { -@@ -3794,6 +3851,15 @@ static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq) - "back to interactive wr"); - } - } -+ if (bfqq->wr_coeff > 1 && -+ bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time && -+ bfqq->service_from_wr > max_service_from_wr) { -+ /* see comments on max_service_from_wr */ -+ bfq_bfqq_end_wr(bfqq); -+ bfq_log_bfqq(bfqd, bfqq, -+ "[%s] too much service", -+ __func__); -+ } - } - /* - * To improve latency (for this or other queues), immediately -diff --git a/block/bfq.h b/block/bfq.h -index 59539adc00a5..0cd7a3f251a7 100644 ---- a/block/bfq.h -+++ b/block/bfq.h -@@ -323,6 +323,11 @@ struct bfq_queue { - * last transition from idle to backlogged. - */ - unsigned long service_from_backlogged; -+ /* -+ * Cumulative service received from the @bfq_queue since its -+ * last transition to weight-raised state. -+ */ -+ unsigned long service_from_wr; - /* - * Value of wr start time when switching to soft rt - */ - -From 59efebb94b2f9bac653faf62dadb45b83bd27fa7 Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Thu, 4 Jan 2018 16:29:58 +0100 -Subject: [PATCH 12/23] bfq-sq, bfq-mq: put async queues for root bfq groups - too -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -For each pair [device for which bfq is selected as I/O scheduler, -group in blkio/io], bfq maintains a corresponding bfq group. Each such -bfq group contains a set of async queues, with each async queue -created on demand, i.e., when some I/O request arrives for it. On -creation, an async queue gets an extra reference, to make sure that -the queue is not freed as long as its bfq group exists. Accordingly, -to allow the queue to be freed after the group exited, this extra -reference must released on group exit. - -The above holds also for a bfq root group, i.e., for the bfq group -corresponding to the root blkio/io root for a given device. Yet, by -mistake, the references to the existing async queues of a root group -are not released when the latter exits. This causes a memory leak when -the instance of bfq for a given device exits. In a similar vein, -bfqg_stats_xfer_dead is not executed for a root group. - -This commit fixes bfq_pd_offline so that the latter executes the above -missing operations for a root group too. - -Reported-by: Holger Hoffstätte -Reported-by: Guoqing Jiang -Signed-off-by: Davide Ferrari -Signed-off-by: Paolo Valente ---- - block/bfq-cgroup-included.c | 8 +++++--- - 1 file changed, 5 insertions(+), 3 deletions(-) - -diff --git a/block/bfq-cgroup-included.c b/block/bfq-cgroup-included.c -index 562b0ce581a7..45fefb2e2d57 100644 ---- a/block/bfq-cgroup-included.c -+++ b/block/bfq-cgroup-included.c -@@ -885,13 +885,13 @@ static void bfq_pd_offline(struct blkg_policy_data *pd) - - entity = bfqg->my_entity; - -- if (!entity) /* root group */ -- return; -- - #ifdef BFQ_MQ - spin_lock_irqsave(&bfqd->lock, flags); - #endif - -+ if (!entity) /* root group */ -+ goto put_async_queues; -+ - /* - * Empty all service_trees belonging to this group before - * deactivating the group itself. -@@ -926,6 +926,8 @@ static void bfq_pd_offline(struct blkg_policy_data *pd) - BUG_ON(bfqg->sched_data.in_service_entity); - - __bfq_deactivate_entity(entity, false); -+ -+put_async_queues: - bfq_put_async_queues(bfqd, bfqg); - - #ifdef BFQ_MQ - -From 2dfbaaaf95054e2da3ededc0deb1ba5a4f589e53 Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Mon, 8 Jan 2018 19:38:45 +0100 -Subject: [PATCH 13/23] bfq-sq, bfq-mq: release oom-queue ref to root group on - exit - -On scheduler init, a reference to the root group, and a reference to -its corresponding blkg are taken for the oom queue. Yet these -references are not released on scheduler exit, which prevents these -objects from be freed. This commit adds the missing reference -releases. - -Reported-by: Davide Ferrari -Signed-off-by: Paolo Valente ---- - block/bfq-mq-iosched.c | 3 +++ - block/bfq-sq-iosched.c | 3 +++ - 2 files changed, 6 insertions(+) - -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index 63fdd16dec3c..b82c52fabf91 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -5507,6 +5507,9 @@ static void bfq_exit_queue(struct elevator_queue *e) - - BUG_ON(hrtimer_active(&bfqd->idle_slice_timer)); - -+ /* release oom-queue reference to root group */ -+ bfqg_and_blkg_put(bfqd->root_group); -+ - #ifdef BFQ_GROUP_IOSCHED_ENABLED - blkcg_deactivate_policy(bfqd->queue, &blkcg_policy_bfq); - #else -diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c -index 486493aafaf8..851af055664d 100644 ---- a/block/bfq-sq-iosched.c -+++ b/block/bfq-sq-iosched.c -@@ -5052,6 +5052,9 @@ static void bfq_exit_queue(struct elevator_queue *e) - - BUG_ON(hrtimer_active(&bfqd->idle_slice_timer)); - -+ /* release oom-queue reference to root group */ -+ bfqg_put(bfqd->root_group); -+ - #ifdef BFQ_GROUP_IOSCHED_ENABLED - blkcg_deactivate_policy(q, &blkcg_policy_bfq); - #else - -From 13efe00c8292d78d223e1090a7f36426e360eb38 Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Mon, 8 Jan 2018 19:40:38 +0100 -Subject: [PATCH 14/23] block, bfq-sq, bfq-mq: trace get and put of bfq groups - -Signed-off-by: Paolo Valente ---- - block/bfq-cgroup-included.c | 15 +++++++++++++++ - block/bfq-mq-iosched.c | 3 ++- - 2 files changed, 17 insertions(+), 1 deletion(-) - -diff --git a/block/bfq-cgroup-included.c b/block/bfq-cgroup-included.c -index 45fefb2e2d57..f94743fb2e7d 100644 ---- a/block/bfq-cgroup-included.c -+++ b/block/bfq-cgroup-included.c -@@ -267,6 +267,8 @@ static struct bfq_group *bfqq_group(struct bfq_queue *bfqq) - - static void bfqg_get(struct bfq_group *bfqg) - { -+ trace_printk("bfqg %p\n", bfqg); -+ - #ifdef BFQ_MQ - bfqg->ref++; - #else -@@ -280,6 +282,9 @@ static void bfqg_put(struct bfq_group *bfqg) - bfqg->ref--; - - BUG_ON(bfqg->ref < 0); -+ trace_printk("putting bfqg %p %s\n", bfqg, -+ bfqg->ref == 0 ? "and freeing it" : ""); -+ - if (bfqg->ref == 0) - kfree(bfqg); - #else -@@ -293,6 +298,7 @@ static void bfqg_and_blkg_get(struct bfq_group *bfqg) - /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */ - bfqg_get(bfqg); - -+ trace_printk("getting blkg for bfqg %p\n", bfqg); - blkg_get(bfqg_to_blkg(bfqg)); - } - -@@ -300,6 +306,7 @@ static void bfqg_and_blkg_put(struct bfq_group *bfqg) - { - bfqg_put(bfqg); - -+ trace_printk("putting blkg for bfqg %p\n", bfqg); - blkg_put(bfqg_to_blkg(bfqg)); - } - #endif -@@ -382,6 +389,8 @@ static void bfq_init_entity(struct bfq_entity *entity, - * Make sure that bfqg and its associated blkg do not - * disappear before entity. - */ -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "[%s] getting bfqg %p and blkg\n", __func__, bfqg); -+ - bfqg_and_blkg_get(bfqg); - #else - bfqg_get(bfqg); -@@ -475,6 +484,7 @@ static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node) - kfree(bfqg); - return NULL; - } -+ trace_printk("bfqg %p\n", bfqg); - - #ifdef BFQ_MQ - /* see comments in bfq_bic_update_cgroup for why refcounting */ -@@ -513,6 +523,7 @@ static void bfq_pd_init(struct blkg_policy_data *pd) - static void bfq_pd_free(struct blkg_policy_data *pd) - { - struct bfq_group *bfqg = pd_to_bfqg(pd); -+ trace_printk("bfqg %p\n", bfqg); - - bfqg_stats_exit(&bfqg->stats); - #ifdef BFQ_MQ -@@ -650,6 +661,8 @@ static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, - bfq_put_idle_entity(bfq_entity_service_tree(entity), entity); - } - #ifdef BFQ_MQ -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "[%s] putting blkg and bfqg %p\n", __func__, bfqg); -+ - bfqg_and_blkg_put(bfqq_group(bfqq)); - #else - bfqg_put(bfqq_group(bfqq)); -@@ -658,6 +671,8 @@ static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, - entity->parent = bfqg->my_entity; - entity->sched_data = &bfqg->sched_data; - #ifdef BFQ_MQ -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "[%s] getting blkg and bfqg %p\n", __func__, bfqg); -+ - /* pin down bfqg and its associated blkg */ - bfqg_and_blkg_get(bfqg); - #else -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index b82c52fabf91..d5b7a6b985d7 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -4385,10 +4385,11 @@ static void bfq_put_queue(struct bfq_queue *bfqq) - if (bfqq->bfqd) - bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p freed", bfqq); - -- kmem_cache_free(bfq_pool, bfqq); - #ifdef BFQ_GROUP_IOSCHED_ENABLED -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "[%s] putting blkg and bfqg %p\n", __func__, bfqg); - bfqg_and_blkg_put(bfqg); - #endif -+ kmem_cache_free(bfq_pool, bfqq); - } - - static void bfq_put_cooperator(struct bfq_queue *bfqq) - -From 816b77fba966171974eb5ee25d81bc4e19eaf1b4 Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Wed, 10 Jan 2018 09:08:22 +0100 -Subject: [PATCH 15/23] bfq-sq, bfq-mq: compile group put for oom queue only if - BFQ_GROUP_IOSCHED is set - -Commit ("bfq-sq, bfq-mq: release oom-queue ref to root group on exit") -added a missing put of the root bfq group for the oom queue. That put -has to be, and can be, performed only if CONFIG_BFQ_GROUP_IOSCHED is -defined: the function doing the put is even not defined at all if -CONFIG_BFQ_GROUP_IOSCHED is not defined. But that commit makes that -put be invoked regardless of whether CONFIG_BFQ_GROUP_IOSCHED is -defined. This commit fixes this mistake, by making that invocation be -compiled only if CONFIG_BFQ_GROUP_IOSCHED is actually defined. - -Fixes ("block, bfq: release oom-queue ref to root group on exit") -Reported-by: Jan Alexander Steffens -Signed-off-by: Paolo Valente ---- - block/bfq-mq-iosched.c | 2 +- - block/bfq-sq-iosched.c | 2 +- - 2 files changed, 2 insertions(+), 2 deletions(-) - -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index d5b7a6b985d7..2581fe0f6f2f 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -5508,10 +5508,10 @@ static void bfq_exit_queue(struct elevator_queue *e) - - BUG_ON(hrtimer_active(&bfqd->idle_slice_timer)); - -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - /* release oom-queue reference to root group */ - bfqg_and_blkg_put(bfqd->root_group); - --#ifdef BFQ_GROUP_IOSCHED_ENABLED - blkcg_deactivate_policy(bfqd->queue, &blkcg_policy_bfq); - #else - spin_lock_irq(&bfqd->lock); -diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c -index 851af055664d..c4df156b1fb4 100644 ---- a/block/bfq-sq-iosched.c -+++ b/block/bfq-sq-iosched.c -@@ -5052,10 +5052,10 @@ static void bfq_exit_queue(struct elevator_queue *e) - - BUG_ON(hrtimer_active(&bfqd->idle_slice_timer)); - -+#ifdef BFQ_GROUP_IOSCHED_ENABLED - /* release oom-queue reference to root group */ - bfqg_put(bfqd->root_group); - --#ifdef BFQ_GROUP_IOSCHED_ENABLED - blkcg_deactivate_policy(q, &blkcg_policy_bfq); - #else - bfq_put_async_queues(bfqd, bfqd->root_group); - -From 643a89c659172b2c9ae16adfe03af4e3e88e1326 Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Sat, 13 Jan 2018 18:48:41 +0100 -Subject: [PATCH 16/23] block, bfq-sq, bfq-mq: remove trace_printks - -Commit ("block, bfq-sq, bfq-mq: trace get and put of bfq groups") -unwisely added some invocations of the function trace_printk, which -is inappropriate in production kernels. This commit removes those -invocations. - -Signed-off-by: Paolo Valente ---- - block/bfq-cgroup-included.c | 10 ---------- - 1 file changed, 10 deletions(-) - -diff --git a/block/bfq-cgroup-included.c b/block/bfq-cgroup-included.c -index f94743fb2e7d..a4f8a03edfc9 100644 ---- a/block/bfq-cgroup-included.c -+++ b/block/bfq-cgroup-included.c -@@ -267,8 +267,6 @@ static struct bfq_group *bfqq_group(struct bfq_queue *bfqq) - - static void bfqg_get(struct bfq_group *bfqg) - { -- trace_printk("bfqg %p\n", bfqg); -- - #ifdef BFQ_MQ - bfqg->ref++; - #else -@@ -282,9 +280,6 @@ static void bfqg_put(struct bfq_group *bfqg) - bfqg->ref--; - - BUG_ON(bfqg->ref < 0); -- trace_printk("putting bfqg %p %s\n", bfqg, -- bfqg->ref == 0 ? "and freeing it" : ""); -- - if (bfqg->ref == 0) - kfree(bfqg); - #else -@@ -298,7 +293,6 @@ static void bfqg_and_blkg_get(struct bfq_group *bfqg) - /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */ - bfqg_get(bfqg); - -- trace_printk("getting blkg for bfqg %p\n", bfqg); - blkg_get(bfqg_to_blkg(bfqg)); - } - -@@ -306,7 +300,6 @@ static void bfqg_and_blkg_put(struct bfq_group *bfqg) - { - bfqg_put(bfqg); - -- trace_printk("putting blkg for bfqg %p\n", bfqg); - blkg_put(bfqg_to_blkg(bfqg)); - } - #endif -@@ -484,8 +477,6 @@ static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node) - kfree(bfqg); - return NULL; - } -- trace_printk("bfqg %p\n", bfqg); -- - #ifdef BFQ_MQ - /* see comments in bfq_bic_update_cgroup for why refcounting */ - bfqg_get(bfqg); -@@ -523,7 +514,6 @@ static void bfq_pd_init(struct blkg_policy_data *pd) - static void bfq_pd_free(struct blkg_policy_data *pd) - { - struct bfq_group *bfqg = pd_to_bfqg(pd); -- trace_printk("bfqg %p\n", bfqg); - - bfqg_stats_exit(&bfqg->stats); - #ifdef BFQ_MQ - -From ce050275e24fecec800f346c09d9494563e9fc8a Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Mon, 15 Jan 2018 15:07:05 +0100 -Subject: [PATCH 17/23] block, bfq-mq: add requeue-request hook - -Commit 'a6a252e64914 ("blk-mq-sched: decide how to handle flush rq via -RQF_FLUSH_SEQ")' makes all non-flush re-prepared requests for a device -be re-inserted into the active I/O scheduler for that device. As a -consequence, I/O schedulers may get the same request inserted again, -even several times, without a finish_request invoked on that request -before each re-insertion. - -This fact is the cause of the failure reported in [1]. For an I/O -scheduler, every re-insertion of the same re-prepared request is -equivalent to the insertion of a new request. For schedulers like -mq-deadline or kyber, this fact causes no harm. In contrast, it -confuses a stateful scheduler like BFQ, which keeps state for an I/O -request, until the finish_request hook is invoked on the request. In -particular, BFQ may get stuck, waiting forever for the number of -request dispatches, of the same request, to be balanced by an equal -number of request completions (while there will be one completion for -that request). In this state, BFQ may refuse to serve I/O requests -from other bfq_queues. The hang reported in [1] then follows. - -However, the above re-prepared requests undergo a requeue, thus the -requeue_request hook of the active elevator is invoked for these -requests, if set. This commit then addresses the above issue by -properly implementing the hook requeue_request in BFQ. - -[1] https://marc.info/?l=linux-block&m=151211117608676 - -Reported-by: Ivan Kozik -Reported-by: Alban Browaeys -Signed-off-by: Paolo Valente -Signed-off-by: Serena Ziviani ---- - block/bfq-mq-iosched.c | 90 ++++++++++++++++++++++++++++++++++++++++---------- - 1 file changed, 73 insertions(+), 17 deletions(-) - -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index 2581fe0f6f2f..bb7ccc2f1165 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -4162,9 +4162,9 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) - * TESTING: reset DISP_LIST flag, because: 1) - * this rq this request has passed through - * bfq_prepare_request, 2) then it will have -- * bfq_finish_request invoked on it, and 3) in -- * bfq_finish_request we use this flag to check -- * that bfq_finish_request is not invoked on -+ * bfq_finish_requeue_request invoked on it, and 3) in -+ * bfq_finish_requeue_request we use this flag to check -+ * that bfq_finish_requeue_request is not invoked on - * requests for which bfq_prepare_request has - * been invoked. - */ -@@ -4173,8 +4173,8 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) - } - - /* -- * We exploit the bfq_finish_request hook to decrement -- * rq_in_driver, but bfq_finish_request will not be -+ * We exploit the bfq_finish_requeue_request hook to decrement -+ * rq_in_driver, but bfq_finish_requeue_request will not be - * invoked on this request. So, to avoid unbalance, - * just start this request, without incrementing - * rq_in_driver. As a negative consequence, -@@ -4183,10 +4183,10 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) - * bfq_schedule_dispatch to be invoked uselessly. - * - * As for implementing an exact solution, the -- * bfq_finish_request hook, if defined, is probably -+ * bfq_finish_requeue_request hook, if defined, is probably - * invoked also on this request. So, by exploiting - * this hook, we could 1) increment rq_in_driver here, -- * and 2) decrement it in bfq_finish_request. Such a -+ * and 2) decrement it in bfq_finish_requeue_request. Such a - * solution would let the value of the counter be - * always accurate, but it would entail using an extra - * interface function. This cost seems higher than the -@@ -4878,6 +4878,8 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq) - return idle_timer_disabled; - } - -+static void bfq_prepare_request(struct request *rq, struct bio *bio); -+ - static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, - bool at_head) - { -@@ -4919,6 +4921,20 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, - BUG_ON(!(rq->rq_flags & RQF_GOT)); - rq->rq_flags &= ~RQF_GOT; - -+ if (!bfqq) { -+ /* -+ * This should never happen. Most likely rq is -+ * a requeued regular request, being -+ * re-inserted without being first -+ * re-prepared. Do a prepare, to avoid -+ * failure. -+ */ -+ pr_warn("Regular request associated with no queue"); -+ WARN_ON(1); -+ bfq_prepare_request(rq, rq->bio); -+ bfqq = RQ_BFQQ(rq); -+ } -+ - #if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP) - idle_timer_disabled = __bfq_insert_request(bfqd, rq); - /* -@@ -5110,7 +5126,7 @@ static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd) - } - } - --static void bfq_finish_request_body(struct bfq_queue *bfqq) -+static void bfq_finish_requeue_request_body(struct bfq_queue *bfqq) - { - bfq_log_bfqq(bfqq->bfqd, bfqq, - "put_request_body: allocated %d", bfqq->allocated); -@@ -5120,7 +5136,13 @@ static void bfq_finish_request_body(struct bfq_queue *bfqq) - bfq_put_queue(bfqq); - } - --static void bfq_finish_request(struct request *rq) -+/* -+ * Handle either a requeue or a finish for rq. The things to do are -+ * the same in both cases: all references to rq are to be dropped. In -+ * particular, rq is considered completed from the point of view of -+ * the scheduler. -+ */ -+static void bfq_finish_requeue_request(struct request *rq) - { - struct bfq_queue *bfqq; - struct bfq_data *bfqd; -@@ -5128,11 +5150,27 @@ static void bfq_finish_request(struct request *rq) - - BUG_ON(!rq); - -- if (!rq->elv.icq) -+ bfqq = RQ_BFQQ(rq); -+ -+ /* -+ * Requeue and finish hooks are invoked in blk-mq without -+ * checking whether the involved request is actually still -+ * referenced in the scheduler. To handle this fact, the -+ * following two checks make this function exit in case of -+ * spurious invocations, for which there is nothing to do. -+ * -+ * First, check whether rq has nothing to do with an elevator. -+ */ -+ if (unlikely(!(rq->rq_flags & RQF_ELVPRIV))) - return; - -- bfqq = RQ_BFQQ(rq); -- BUG_ON(!bfqq); -+ /* -+ * rq either is not associated with any icq, or is an already -+ * requeued request that has not (yet) been re-inserted into -+ * a bfq_queue. -+ */ -+ if (!rq->elv.icq || !bfqq) -+ return; - - bic = RQ_BIC(rq); - BUG_ON(!bic); -@@ -5145,7 +5183,6 @@ static void bfq_finish_request(struct request *rq) - BUG(); - } - BUG_ON(rq->rq_flags & RQF_QUEUED); -- BUG_ON(!(rq->rq_flags & RQF_ELVPRIV)); - - bfq_log_bfqq(bfqd, bfqq, - "putting rq %p with %u sects left, STARTED %d", -@@ -5166,13 +5203,14 @@ static void bfq_finish_request(struct request *rq) - spin_lock_irqsave(&bfqd->lock, flags); - - bfq_completed_request(bfqq, bfqd); -- bfq_finish_request_body(bfqq); -+ bfq_finish_requeue_request_body(bfqq); - - spin_unlock_irqrestore(&bfqd->lock, flags); - } else { - /* - * Request rq may be still/already in the scheduler, -- * in which case we need to remove it. And we cannot -+ * in which case we need to remove it (this should -+ * never happen in case of requeue). And we cannot - * defer such a check and removal, to avoid - * inconsistencies in the time interval from the end - * of this function to the start of the deferred work. -@@ -5189,9 +5227,26 @@ static void bfq_finish_request(struct request *rq) - bfqg_stats_update_io_remove(bfqq_group(bfqq), - rq->cmd_flags); - } -- bfq_finish_request_body(bfqq); -+ bfq_finish_requeue_request_body(bfqq); - } - -+ /* -+ * Reset private fields. In case of a requeue, this allows -+ * this function to correctly do nothing if it is spuriously -+ * invoked again on this same request (see the check at the -+ * beginning of the function). Probably, a better general -+ * design would be to prevent blk-mq from invoking the requeue -+ * or finish hooks of an elevator, for a request that is not -+ * referred by that elevator. -+ * -+ * Resetting the following fields would break the -+ * request-insertion logic if rq is re-inserted into a bfq -+ * internal queue, without a re-preparation. Here we assume -+ * that re-insertions of requeued requests, without -+ * re-preparation, can happen only for pass_through or at_head -+ * requests (which are not re-inserted into bfq internal -+ * queues). -+ */ - rq->elv.priv[0] = NULL; - rq->elv.priv[1] = NULL; - } -@@ -5960,7 +6015,8 @@ static struct elevator_type iosched_bfq_mq = { - .ops.mq = { - .limit_depth = bfq_limit_depth, - .prepare_request = bfq_prepare_request, -- .finish_request = bfq_finish_request, -+ .requeue_request = bfq_finish_requeue_request, -+ .finish_request = bfq_finish_requeue_request, - .exit_icq = bfq_exit_icq, - .insert_requests = bfq_insert_requests, - .dispatch_request = bfq_dispatch_request, - -From 3e4f292191cc62b3844316b9741534c3f1b36f0a Mon Sep 17 00:00:00 2001 -From: Davide Paganelli -Date: Thu, 8 Feb 2018 12:19:24 +0100 -Subject: [PATCH 18/23] block, bfq-mq, bfq-sq: make log functions print names - of calling functions - -Add the macro __func__ as a parameter to the invocations of the functions -pr_crit, blk_add_trace_msg and blk_add_cgroup_trace_msg in bfq_log* -functions, in order to include automatically in the log messages -the names of the functions that call the log functions. -The programmer can then avoid doing it. - -Signed-off-by: Davide Paganelli -Signed-off-by: Paolo Valente ---- - block/bfq-cgroup-included.c | 9 +-- - block/bfq-mq-iosched.c | 167 ++++++++++++++++++++++---------------------- - block/bfq-mq.h | 33 ++++----- - block/bfq-sched.c | 54 +++++++------- - block/bfq-sq-iosched.c | 134 +++++++++++++++++------------------ - block/bfq.h | 33 ++++----- - 6 files changed, 214 insertions(+), 216 deletions(-) - -diff --git a/block/bfq-cgroup-included.c b/block/bfq-cgroup-included.c -index a4f8a03edfc9..613f154e9da5 100644 ---- a/block/bfq-cgroup-included.c -+++ b/block/bfq-cgroup-included.c -@@ -382,7 +382,8 @@ static void bfq_init_entity(struct bfq_entity *entity, - * Make sure that bfqg and its associated blkg do not - * disappear before entity. - */ -- bfq_log_bfqq(bfqq->bfqd, bfqq, "[%s] getting bfqg %p and blkg\n", __func__, bfqg); -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "getting bfqg %p and blkg\n", -+ bfqg); - - bfqg_and_blkg_get(bfqg); - #else -@@ -651,7 +652,7 @@ static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, - bfq_put_idle_entity(bfq_entity_service_tree(entity), entity); - } - #ifdef BFQ_MQ -- bfq_log_bfqq(bfqq->bfqd, bfqq, "[%s] putting blkg and bfqg %p\n", __func__, bfqg); -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "putting blkg and bfqg %p\n", bfqg); - - bfqg_and_blkg_put(bfqq_group(bfqq)); - #else -@@ -661,7 +662,7 @@ static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, - entity->parent = bfqg->my_entity; - entity->sched_data = &bfqg->sched_data; - #ifdef BFQ_MQ -- bfq_log_bfqq(bfqq->bfqd, bfqq, "[%s] getting blkg and bfqg %p\n", __func__, bfqg); -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "getting blkg and bfqg %p\n", bfqg); - - /* pin down bfqg and its associated blkg */ - bfqg_and_blkg_get(bfqg); -@@ -721,7 +722,7 @@ static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd, - if (entity->sched_data != &bfqg->sched_data) { - bic_set_bfqq(bic, NULL, 0); - bfq_log_bfqq(bfqd, async_bfqq, -- "bic_change_group: %p %d", -+ "%p %d", - async_bfqq, - async_bfqq->ref); - bfq_put_queue(async_bfqq); -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index bb7ccc2f1165..edc93b6af186 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -310,7 +310,7 @@ static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd, - static void bfq_schedule_dispatch(struct bfq_data *bfqd) - { - if (bfqd->queued != 0) { -- bfq_log(bfqd, "schedule dispatch"); -+ bfq_log(bfqd, ""); - blk_mq_run_hw_queues(bfqd->queue, true); - } - } -@@ -489,8 +489,8 @@ static void bfq_limit_depth(unsigned int op, struct blk_mq_alloc_data *data) - data->shallow_depth = - bfqd->word_depths[!!bfqd->wr_busy_queues][op_is_sync(op)]; - -- bfq_log(bfqd, "[%s] wr_busy %d sync %d depth %u", -- __func__, bfqd->wr_busy_queues, op_is_sync(op), -+ bfq_log(bfqd, "wr_busy %d sync %d depth %u", -+ bfqd->wr_busy_queues, op_is_sync(op), - data->shallow_depth); - } - -@@ -528,7 +528,7 @@ bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root, - if (rb_link) - *rb_link = p; - -- bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d", -+ bfq_log(bfqd, "%llu: returning %d", - (unsigned long long) sector, - bfqq ? bfqq->pid : 0); - -@@ -749,7 +749,7 @@ static struct request *bfq_check_fifo(struct bfq_queue *bfqq, - if (rq == last || ktime_get_ns() < rq->fifo_time) - return NULL; - -- bfq_log_bfqq(bfqq->bfqd, bfqq, "check_fifo: returned %p", rq); -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "returned %p", rq); - BUG_ON(RB_EMPTY_NODE(&rq->rb_node)); - return rq; - } -@@ -842,7 +842,7 @@ static void bfq_updated_next_req(struct bfq_data *bfqd, - bfq_serv_to_charge(next_rq, bfqq)); - if (entity->budget != new_budget) { - entity->budget = new_budget; -- bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu", -+ bfq_log_bfqq(bfqd, bfqq, "new budget %lu", - new_budget); - bfq_requeue_bfqq(bfqd, bfqq, false); - } -@@ -915,8 +915,7 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd, - BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish)); - - bfq_log_bfqq(bfqq->bfqd, bfqq, -- "[%s] bic %p wr_coeff %d start_finish %lu max_time %lu", -- __func__, -+ "bic %p wr_coeff %d start_finish %lu max_time %lu", - bic, bfqq->wr_coeff, bfqq->last_wr_start_finish, - bfqq->wr_cur_max_time); - -@@ -929,11 +928,11 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd, - bfq_wr_duration(bfqd))) { - switch_back_to_interactive_wr(bfqq, bfqd); - bfq_log_bfqq(bfqq->bfqd, bfqq, -- "resume state: switching back to interactive"); -+ "switching back to interactive"); - } else { - bfqq->wr_coeff = 1; - bfq_log_bfqq(bfqq->bfqd, bfqq, -- "resume state: switching off wr (%lu + %lu < %lu)", -+ "switching off wr (%lu + %lu < %lu)", - bfqq->last_wr_start_finish, bfqq->wr_cur_max_time, - jiffies); - } -@@ -985,7 +984,7 @@ static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq) - /* Increment burst size to take into account also bfqq */ - bfqd->burst_size++; - -- bfq_log_bfqq(bfqd, bfqq, "add_to_burst %d", bfqd->burst_size); -+ bfq_log_bfqq(bfqd, bfqq, "%d", bfqd->burst_size); - - BUG_ON(bfqd->burst_size > bfqd->bfq_large_burst_thresh); - -@@ -998,7 +997,7 @@ static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq) - * other to consider this burst as large. - */ - bfqd->large_burst = true; -- bfq_log_bfqq(bfqd, bfqq, "add_to_burst: large burst started"); -+ bfq_log_bfqq(bfqd, bfqq, "large burst started"); - - /* - * We can now mark all queues in the burst list as -@@ -1170,7 +1169,7 @@ static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq) - bfqd->large_burst = false; - bfq_reset_burst_list(bfqd, bfqq); - bfq_log_bfqq(bfqd, bfqq, -- "handle_burst: late activation or different group"); -+ "late activation or different group"); - goto end; - } - -@@ -1180,7 +1179,7 @@ static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq) - * bfqq as belonging to this large burst immediately. - */ - if (bfqd->large_burst) { -- bfq_log_bfqq(bfqd, bfqq, "handle_burst: marked in burst"); -+ bfq_log_bfqq(bfqd, bfqq, "marked in burst"); - bfq_mark_bfqq_in_large_burst(bfqq); - goto end; - } -@@ -1686,7 +1685,7 @@ static void bfq_add_request(struct request *rq) - unsigned int old_wr_coeff = bfqq->wr_coeff; - bool interactive = false; - -- bfq_log_bfqq(bfqd, bfqq, "add_request: size %u %s", -+ bfq_log_bfqq(bfqd, bfqq, "size %u %s", - blk_rq_sectors(rq), rq_is_sync(rq) ? "S" : "A"); - - if (bfqq->wr_coeff > 1) /* queue is being weight-raised */ -@@ -1952,7 +1951,7 @@ static int bfq_request_merge(struct request_queue *q, struct request **req, - __rq = bfq_find_rq_fmerge(bfqd, bio, q); - if (__rq && elv_bio_merge_ok(__rq, bio)) { - *req = __rq; -- bfq_log(bfqd, "request_merge: req %p", __rq); -+ bfq_log(bfqd, "req %p", __rq); - - return ELEVATOR_FRONT_MERGE; - } -@@ -1989,7 +1988,7 @@ static void bfq_request_merged(struct request_queue *q, struct request *req, - bfqq->next_rq = next_rq; - - bfq_log_bfqq(bfqd, bfqq, -- "request_merged: req %p prev %p next_rq %p bfqq %p", -+ "req %p prev %p next_rq %p bfqq %p", - req, prev, next_rq, bfqq); - - /* -@@ -2018,7 +2017,7 @@ static void bfq_requests_merged(struct request_queue *q, struct request *rq, - goto end; - - bfq_log_bfqq(bfqq->bfqd, bfqq, -- "requests_merged: rq %p next %p bfqq %p next_bfqq %p", -+ "rq %p next %p bfqq %p next_bfqq %p", - rq, next, bfqq, next_bfqq); - - spin_lock_irq(&bfqq->bfqd->lock); -@@ -2069,10 +2068,10 @@ static void bfq_bfqq_end_wr(struct bfq_queue *bfqq) - */ - bfqq->entity.prio_changed = 1; - bfq_log_bfqq(bfqq->bfqd, bfqq, -- "end_wr: wrais ending at %lu, rais_max_time %u", -+ "wrais ending at %lu, rais_max_time %u", - bfqq->last_wr_start_finish, - jiffies_to_msecs(bfqq->wr_cur_max_time)); -- bfq_log_bfqq(bfqq->bfqd, bfqq, "end_wr: wr_busy %d", -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "wr_busy %d", - bfqq->bfqd->wr_busy_queues); - } - -@@ -2245,8 +2244,8 @@ static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq, - { - if (bfq_too_late_for_merging(new_bfqq)) { - bfq_log_bfqq(bfqq->bfqd, bfqq, -- "[%s] too late for bfq%d to be merged", -- __func__, new_bfqq->pid); -+ "too late for bfq%d to be merged", -+ new_bfqq->pid); - return false; - } - -@@ -2395,8 +2394,7 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq) - } - BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish)); - bfq_log_bfqq(bfqq->bfqd, bfqq, -- "[%s] bic %p wr_coeff %d start_finish %lu max_time %lu", -- __func__, -+ "bic %p wr_coeff %d start_finish %lu max_time %lu", - bic, bfqq->wr_coeff, bfqq->last_wr_start_finish, - bfqq->wr_cur_max_time); - } -@@ -2453,7 +2451,7 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic, - - } - -- bfq_log_bfqq(bfqd, new_bfqq, "merge_bfqqs: wr_busy %d", -+ bfq_log_bfqq(bfqd, new_bfqq, "wr_busy %d", - bfqd->wr_busy_queues); - - /* -@@ -2554,7 +2552,7 @@ static void bfq_set_budget_timeout(struct bfq_data *bfqd, - bfqq->budget_timeout = jiffies + - bfqd->bfq_timeout * timeout_coeff; - -- bfq_log_bfqq(bfqd, bfqq, "set budget_timeout %u", -+ bfq_log_bfqq(bfqd, bfqq, "%u", - jiffies_to_msecs(bfqd->bfq_timeout * timeout_coeff)); - } - -@@ -2620,10 +2618,10 @@ static void __bfq_set_in_service_queue(struct bfq_data *bfqd, - - bfq_set_budget_timeout(bfqd, bfqq); - bfq_log_bfqq(bfqd, bfqq, -- "set_in_service_queue, cur-budget = %d", -+ "cur-budget = %d", - bfqq->entity.budget); - } else -- bfq_log(bfqd, "set_in_service_queue: NULL"); -+ bfq_log(bfqd, "NULL"); - - bfqd->in_service_queue = bfqq; - } -@@ -2746,7 +2744,7 @@ static void bfq_reset_rate_computation(struct bfq_data *bfqd, struct request *rq - bfqd->peak_rate_samples = 0; /* full re-init on next disp. */ - - bfq_log(bfqd, -- "reset_rate_computation at end, sample %u/%u tot_sects %llu", -+ "at end, sample %u/%u tot_sects %llu", - bfqd->peak_rate_samples, bfqd->sequential_samples, - bfqd->tot_sectors_dispatched); - } -@@ -2766,7 +2764,7 @@ static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq) - if (bfqd->peak_rate_samples < BFQ_RATE_MIN_SAMPLES || - bfqd->delta_from_first < BFQ_RATE_MIN_INTERVAL) { - bfq_log(bfqd, -- "update_rate_reset: only resetting, delta_first %lluus samples %d", -+ "only resetting, delta_first %lluus samples %d", - bfqd->delta_from_first>>10, bfqd->peak_rate_samples); - goto reset_computation; - } -@@ -2790,7 +2788,7 @@ static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq) - div_u64(bfqd->delta_from_first, NSEC_PER_USEC)); - - bfq_log(bfqd, --"update_rate_reset: tot_sects %llu delta_first %lluus rate %llu sects/s (%d)", -+"tot_sects %llu delta_first %lluus rate %llu sects/s (%d)", - bfqd->tot_sectors_dispatched, bfqd->delta_from_first>>10, - ((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT), - rate > 20<peak_rate) || - rate > 20<peak_rate_samples, bfqd->sequential_samples, - ((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT), - ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT)); - goto reset_computation; - } else { - bfq_log(bfqd, -- "update_rate_reset: do update, samples %u/%u rate/peak %llu/%llu", -+ "do update, samples %u/%u rate/peak %llu/%llu", - bfqd->peak_rate_samples, bfqd->sequential_samples, - ((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT), - ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT)); -@@ -2868,7 +2866,7 @@ static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq) - rate /= divisor; /* smoothing constant alpha = 1/divisor */ - - bfq_log(bfqd, -- "update_rate_reset: divisor %d tmp_peak_rate %llu tmp_rate %u", -+ "divisor %d tmp_peak_rate %llu tmp_rate %u", - divisor, - ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT), - (u32)((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT)); -@@ -2922,7 +2920,7 @@ static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq) - - if (bfqd->peak_rate_samples == 0) { /* first dispatch */ - bfq_log(bfqd, -- "update_peak_rate: goto reset, samples %d", -+ "goto reset, samples %d", - bfqd->peak_rate_samples) ; - bfq_reset_rate_computation(bfqd, rq); - goto update_last_values; /* will add one sample */ -@@ -2943,7 +2941,7 @@ static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq) - if (now_ns - bfqd->last_dispatch > 100*NSEC_PER_MSEC && - bfqd->rq_in_driver == 0) { - bfq_log(bfqd, --"update_peak_rate: jumping to updating&resetting delta_last %lluus samples %d", -+"jumping to updating&resetting delta_last %lluus samples %d", - (now_ns - bfqd->last_dispatch)>>10, - bfqd->peak_rate_samples) ; - goto update_rate_and_reset; -@@ -2969,7 +2967,7 @@ static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq) - bfqd->delta_from_first = now_ns - bfqd->first_dispatch; - - bfq_log(bfqd, -- "update_peak_rate: added samples %u/%u tot_sects %llu delta_first %lluus", -+ "added samples %u/%u tot_sects %llu delta_first %lluus", - bfqd->peak_rate_samples, bfqd->sequential_samples, - bfqd->tot_sectors_dispatched, - bfqd->delta_from_first>>10); -@@ -2985,12 +2983,12 @@ static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq) - bfqd->last_dispatch = now_ns; - - bfq_log(bfqd, -- "update_peak_rate: delta_first %lluus last_pos %llu peak_rate %llu", -+ "delta_first %lluus last_pos %llu peak_rate %llu", - (now_ns - bfqd->first_dispatch)>>10, - (unsigned long long) bfqd->last_position, - ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT)); - bfq_log(bfqd, -- "update_peak_rate: samples at end %d", bfqd->peak_rate_samples); -+ "samples at end %d", bfqd->peak_rate_samples); - } - - /* -@@ -3088,11 +3086,11 @@ static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd, - */ - budget = 2 * min_budget; - -- bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %d, budg left %d", -+ bfq_log_bfqq(bfqd, bfqq, "last budg %d, budg left %d", - bfqq->entity.budget, bfq_bfqq_budget_left(bfqq)); -- bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %d, min budg %d", -+ bfq_log_bfqq(bfqd, bfqq, "last max_budg %d, min budg %d", - budget, bfq_min_budget(bfqd)); -- bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d", -+ bfq_log_bfqq(bfqd, bfqq, "sync %d, seeky %d", - bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue)); - - if (bfq_bfqq_sync(bfqq) && bfqq->wr_coeff == 1) { -@@ -3294,7 +3292,7 @@ static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq, - else /* charge at least one seek */ - *delta_ms = bfq_slice_idle / NSEC_PER_MSEC; - -- bfq_log(bfqd, "bfq_bfqq_is_slow: too short %u", delta_usecs); -+ bfq_log(bfqd, "too short %u", delta_usecs); - - return slow; - } -@@ -3317,11 +3315,11 @@ static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq, - * peak rate. - */ - slow = bfqq->entity.service < bfqd->bfq_max_budget / 2; -- bfq_log(bfqd, "bfq_bfqq_is_slow: relative rate %d/%d", -+ bfq_log(bfqd, "relative rate %d/%d", - bfqq->entity.service, bfqd->bfq_max_budget); - } - -- bfq_log_bfqq(bfqd, bfqq, "bfq_bfqq_is_slow: slow %d", slow); -+ bfq_log_bfqq(bfqd, bfqq, "slow %d", slow); - - return slow; - } -@@ -3423,7 +3421,7 @@ static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd, - struct bfq_queue *bfqq) - { - bfq_log_bfqq(bfqd, bfqq, --"softrt_next_start: service_blkg %lu soft_rate %u sects/sec interval %u", -+"service_blkg %lu soft_rate %u sects/sec interval %u", - bfqq->service_from_backlogged, - bfqd->bfq_wr_max_softrt_rate, - jiffies_to_msecs(HZ * bfqq->service_from_backlogged / -@@ -3602,7 +3600,7 @@ static bool bfq_bfqq_budget_timeout(struct bfq_queue *bfqq) - static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq) - { - bfq_log_bfqq(bfqq->bfqd, bfqq, -- "may_budget_timeout: wait_request %d left %d timeout %d", -+ "wait_request %d left %d timeout %d", - bfq_bfqq_wait_request(bfqq), - bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3, - bfq_bfqq_budget_timeout(bfqq)); -@@ -3863,11 +3861,11 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq) - * either boosts the throughput (without issues), or is - * necessary to preserve service guarantees. - */ -- bfq_log_bfqq(bfqd, bfqq, "may_idle: sync %d idling_boosts_thr %d", -+ bfq_log_bfqq(bfqd, bfqq, "sync %d idling_boosts_thr %d", - bfq_bfqq_sync(bfqq), idling_boosts_thr); - - bfq_log_bfqq(bfqd, bfqq, -- "may_idle: wr_busy %d boosts %d IO-bound %d guar %d", -+ "wr_busy %d boosts %d IO-bound %d guar %d", - bfqd->wr_busy_queues, - idling_boosts_thr_without_issues, - bfq_bfqq_IO_bound(bfqq), -@@ -3907,7 +3905,7 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd) - if (!bfqq) - goto new_queue; - -- bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue"); -+ bfq_log_bfqq(bfqd, bfqq, "already in-service queue"); - - if (bfq_may_expire_for_budg_timeout(bfqq) && - !bfq_bfqq_wait_request(bfqq) && -@@ -3983,14 +3981,14 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd) - new_queue: - bfqq = bfq_set_in_service_queue(bfqd); - if (bfqq) { -- bfq_log_bfqq(bfqd, bfqq, "select_queue: checking new queue"); -+ bfq_log_bfqq(bfqd, bfqq, "checking new queue"); - goto check_queue; - } - keep_queue: - if (bfqq) -- bfq_log_bfqq(bfqd, bfqq, "select_queue: returned this queue"); -+ bfq_log_bfqq(bfqd, bfqq, "returned this queue"); - else -- bfq_log(bfqd, "select_queue: no queue returned"); -+ bfq_log(bfqd, "no queue returned"); - - return bfqq; - } -@@ -4043,8 +4041,7 @@ static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq) - /* see comments on max_service_from_wr */ - bfq_bfqq_end_wr(bfqq); - bfq_log_bfqq(bfqd, bfqq, -- "[%s] too much service", -- __func__); -+ "too much service"); - } - } - /* -@@ -4122,7 +4119,7 @@ static bool bfq_has_work(struct blk_mq_hw_ctx *hctx) - { - struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; - -- bfq_log(bfqd, "has_work, dispatch_non_empty %d busy_queues %d", -+ bfq_log(bfqd, "dispatch_non_empty %d busy_queues %d", - !list_empty_careful(&bfqd->dispatch), bfqd->busy_queues > 0); - - /* -@@ -4146,7 +4143,7 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) - rq->rq_flags &= ~RQF_DISP_LIST; - - bfq_log(bfqd, -- "dispatch requests: picked %p from dispatch list", rq); -+ "picked %p from dispatch list", rq); - bfqq = RQ_BFQQ(rq); - - if (bfqq) { -@@ -4196,7 +4193,7 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) - goto start_rq; - } - -- bfq_log(bfqd, "dispatch requests: %d busy queues", bfqd->busy_queues); -+ bfq_log(bfqd, "%d busy queues", bfqd->busy_queues); - - if (bfqd->busy_queues == 0) - goto exit; -@@ -4236,13 +4233,13 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) - rq->rq_flags |= RQF_STARTED; - if (bfqq) - bfq_log_bfqq(bfqd, bfqq, -- "dispatched %s request %p, rq_in_driver %d", -+ "%s request %p, rq_in_driver %d", - bfq_bfqq_sync(bfqq) ? "sync" : "async", - rq, - bfqd->rq_in_driver); - else - bfq_log(bfqd, -- "dispatched request %p from dispatch list, rq_in_driver %d", -+ "request %p from dispatch list, rq_in_driver %d", - rq, bfqd->rq_in_driver); - } else - bfq_log(bfqd, -@@ -4339,7 +4336,7 @@ static void bfq_put_queue(struct bfq_queue *bfqq) - BUG_ON(bfqq->ref <= 0); - - if (bfqq->bfqd) -- bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d", bfqq, bfqq->ref); -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "%p %d", bfqq, bfqq->ref); - - bfqq->ref--; - if (bfqq->ref) -@@ -4383,10 +4380,10 @@ static void bfq_put_queue(struct bfq_queue *bfqq) - } - - if (bfqq->bfqd) -- bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p freed", bfqq); -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "%p freed", bfqq); - - #ifdef BFQ_GROUP_IOSCHED_ENABLED -- bfq_log_bfqq(bfqq->bfqd, bfqq, "[%s] putting blkg and bfqg %p\n", __func__, bfqg); -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "putting blkg and bfqg %p\n", bfqg); - bfqg_and_blkg_put(bfqg); - #endif - kmem_cache_free(bfq_pool, bfqq); -@@ -4418,7 +4415,7 @@ static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) - bfq_schedule_dispatch(bfqd); - } - -- bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, bfqq->ref); -+ bfq_log_bfqq(bfqd, bfqq, "%p, %d", bfqq, bfqq->ref); - - bfq_put_cooperator(bfqq); - -@@ -4502,7 +4499,7 @@ static void bfq_set_next_ioprio_data(struct bfq_queue *bfqq, - bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->new_ioprio); - bfqq->entity.prio_changed = 1; - bfq_log_bfqq(bfqq->bfqd, bfqq, -- "set_next_ioprio_data: bic_class %d prio %d class %d", -+ "bic_class %d prio %d class %d", - ioprio_class, bfqq->new_ioprio, bfqq->new_ioprio_class); - } - -@@ -4529,7 +4526,7 @@ static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio) - bfqq = bfq_get_queue(bfqd, bio, BLK_RW_ASYNC, bic); - bic_set_bfqq(bic, bfqq, false); - bfq_log_bfqq(bfqd, bfqq, -- "check_ioprio_change: bfqq %p %d", -+ "bfqq %p %d", - bfqq, bfqq->ref); - } - -@@ -4667,14 +4664,14 @@ static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd, - * guarantee that this queue is not freed - * until its group goes away. - */ -- bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d", -+ bfq_log_bfqq(bfqd, bfqq, "bfqq not in async: %p, %d", - bfqq, bfqq->ref); - *async_bfqq = bfqq; - } - - out: - bfqq->ref++; /* get a process reference to this queue */ -- bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq, bfqq->ref); -+ bfq_log_bfqq(bfqd, bfqq, "at end: %p, %d", bfqq, bfqq->ref); - rcu_read_unlock(); - return bfqq; - } -@@ -4733,7 +4730,7 @@ static void bfq_update_has_short_ttime(struct bfq_data *bfqd, - bfqq->ttime.ttime_mean > bfqd->bfq_slice_idle)) - has_short_ttime = false; - -- bfq_log_bfqq(bfqd, bfqq, "update_has_short_ttime: has_short_ttime %d", -+ bfq_log_bfqq(bfqd, bfqq, "has_short_ttime %d", - has_short_ttime); - - if (has_short_ttime) -@@ -4759,7 +4756,7 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, - bfq_update_io_seektime(bfqd, bfqq, rq); - - bfq_log_bfqq(bfqd, bfqq, -- "rq_enqueued: has_short_ttime=%d (seeky %d)", -+ "has_short_ttime=%d (seeky %d)", - bfq_bfqq_has_short_ttime(bfqq), BFQQ_SEEKY(bfqq)); - - bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); -@@ -4818,7 +4815,7 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq) - - assert_spin_locked(&bfqd->lock); - -- bfq_log_bfqq(bfqd, bfqq, "__insert_req: rq %p bfqq %p", rq, bfqq); -+ bfq_log_bfqq(bfqd, bfqq, "rq %p bfqq %p", rq, bfqq); - - /* - * An unplug may trigger a requeue of a request from the device -@@ -4837,9 +4834,9 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq) - new_bfqq->allocated++; - bfqq->allocated--; - bfq_log_bfqq(bfqd, bfqq, -- "insert_request: new allocated %d", bfqq->allocated); -+ "new allocated %d", bfqq->allocated); - bfq_log_bfqq(bfqd, new_bfqq, -- "insert_request: new_bfqq new allocated %d", -+ "new_bfqq new allocated %d", - bfqq->allocated); - - new_bfqq->ref++; -@@ -4911,11 +4908,11 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, - rq->rq_flags |= RQF_DISP_LIST; - if (bfqq) - bfq_log_bfqq(bfqd, bfqq, -- "insert_request %p in disp: at_head %d", -+ "%p in disp: at_head %d", - rq, at_head); - else - bfq_log(bfqd, -- "insert_request %p in disp: at_head %d", -+ "%p in disp: at_head %d", - rq, at_head); - } else { - BUG_ON(!(rq->rq_flags & RQF_GOT)); -@@ -5033,7 +5030,7 @@ static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd) - bfqq->dispatched--; - - bfq_log_bfqq(bfqd, bfqq, -- "completed_requests: new disp %d, new rq_in_driver %d", -+ "new disp %d, new rq_in_driver %d", - bfqq->dispatched, bfqd->rq_in_driver); - - if (!bfqq->dispatched && !bfq_bfqq_busy(bfqq)) { -@@ -5061,7 +5058,7 @@ static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd) - delta_us = div_u64(now_ns - bfqd->last_completion, NSEC_PER_USEC); - - bfq_log_bfqq(bfqd, bfqq, -- "rq_completed: delta %uus/%luus max_size %u rate %llu/%llu", -+ "delta %uus/%luus max_size %u rate %llu/%llu", - delta_us, BFQ_MIN_TT/NSEC_PER_USEC, bfqd->last_rq_max_size, - delta_us > 0 ? - (USEC_PER_SEC* -@@ -5129,7 +5126,7 @@ static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd) - static void bfq_finish_requeue_request_body(struct bfq_queue *bfqq) - { - bfq_log_bfqq(bfqq->bfqd, bfqq, -- "put_request_body: allocated %d", bfqq->allocated); -+ "allocated %d", bfqq->allocated); - BUG_ON(!bfqq->allocated); - bfqq->allocated--; - -@@ -5406,10 +5403,10 @@ static void bfq_prepare_request(struct request *rq, struct bio *bio) - - bfqq->allocated++; - bfq_log_bfqq(bfqq->bfqd, bfqq, -- "get_request: new allocated %d", bfqq->allocated); -+ "new allocated %d", bfqq->allocated); - - bfqq->ref++; -- bfq_log_bfqq(bfqd, bfqq, "get_request %p: bfqq %p, %d", rq, bfqq, bfqq->ref); -+ bfq_log_bfqq(bfqd, bfqq, "%p: bfqq %p, %d", rq, bfqq, bfqq->ref); - - rq->elv.priv[0] = bic; - rq->elv.priv[1] = bfqq; -@@ -5493,7 +5490,7 @@ static enum hrtimer_restart bfq_idle_slice_timer(struct hrtimer *timer) - idle_slice_timer); - struct bfq_queue *bfqq = bfqd->in_service_queue; - -- bfq_log(bfqd, "slice_timer expired"); -+ bfq_log(bfqd, "expired"); - - /* - * Theoretical race here: the in-service queue can be NULL or -@@ -5515,10 +5512,10 @@ static void __bfq_put_async_bfqq(struct bfq_data *bfqd, - struct bfq_group *root_group = bfqd->root_group; - struct bfq_queue *bfqq = *bfqq_ptr; - -- bfq_log(bfqd, "put_async_bfqq: %p", bfqq); -+ bfq_log(bfqd, "%p", bfqq); - if (bfqq) { - bfq_bfqq_move(bfqd, bfqq, root_group); -- bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d", -+ bfq_log_bfqq(bfqd, bfqq, "putting %p, %d", - bfqq, bfqq->ref); - bfq_put_queue(bfqq); - *bfqq_ptr = NULL; -@@ -5547,7 +5544,7 @@ static void bfq_exit_queue(struct elevator_queue *e) - struct bfq_data *bfqd = e->elevator_data; - struct bfq_queue *bfqq, *n; - -- bfq_log(bfqd, "exit_queue: starting ..."); -+ bfq_log(bfqd, "starting ..."); - - hrtimer_cancel(&bfqd->idle_slice_timer); - -@@ -5575,7 +5572,7 @@ static void bfq_exit_queue(struct elevator_queue *e) - spin_unlock_irq(&bfqd->lock); - #endif - -- bfq_log(bfqd, "exit_queue: finished ..."); -+ bfq_log(bfqd, "finished ..."); - kfree(bfqd); - } - -diff --git a/block/bfq-mq.h b/block/bfq-mq.h -index 9a5ce1168ff5..e2ae11bf8f76 100644 ---- a/block/bfq-mq.h -+++ b/block/bfq-mq.h -@@ -712,34 +712,34 @@ static struct bfq_group *bfqq_group(struct bfq_queue *bfqq); - static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg); - - #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \ -- pr_crit("%s bfq%d%c %s " fmt "\n", \ -+ pr_crit("%s bfq%d%c %s [%s] " fmt "\n", \ - checked_dev_name((bfqd)->queue->backing_dev_info->dev), \ - (bfqq)->pid, \ - bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \ -- bfqq_group(bfqq)->blkg_path, ##args); \ -+ bfqq_group(bfqq)->blkg_path, __func__, ##args); \ - } while (0) - - #define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \ -- pr_crit("%s %s " fmt "\n", \ -+ pr_crit("%s %s [%s] " fmt "\n", \ - checked_dev_name((bfqd)->queue->backing_dev_info->dev), \ -- bfqg->blkg_path, ##args); \ -+ bfqg->blkg_path, __func__, ##args); \ - } while (0) - - #else /* BFQ_GROUP_IOSCHED_ENABLED */ - - #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \ -- pr_crit("%s bfq%d%c " fmt "\n", \ -+ pr_crit("%s bfq%d%c [%s] " fmt "\n", \ - checked_dev_name((bfqd)->queue->backing_dev_info->dev), \ - (bfqq)->pid, bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \ -- ##args) -+ __func__, ##args) - #define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do {} while (0) - - #endif /* BFQ_GROUP_IOSCHED_ENABLED */ - - #define bfq_log(bfqd, fmt, args...) \ -- pr_crit("%s bfq " fmt "\n", \ -+ pr_crit("%s bfq [%s] " fmt "\n", \ - checked_dev_name((bfqd)->queue->backing_dev_info->dev), \ -- ##args) -+ __func__, ##args) - - #else /* CONFIG_BFQ_REDIRECT_TO_CONSOLE */ - -@@ -762,28 +762,29 @@ static struct bfq_group *bfqq_group(struct bfq_queue *bfqq); - static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg); - - #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \ -- blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s " fmt, \ -+ blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s [%s] " fmt, \ - (bfqq)->pid, \ - bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \ -- bfqq_group(bfqq)->blkg_path, ##args); \ -+ bfqq_group(bfqq)->blkg_path, __func__, ##args); \ - } while (0) - - #define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \ -- blk_add_trace_msg((bfqd)->queue, "%s " fmt, bfqg->blkg_path, ##args);\ -+ blk_add_trace_msg((bfqd)->queue, "%s [%s] " fmt, bfqg->blkg_path, \ -+ __func__, ##args);\ - } while (0) - - #else /* BFQ_GROUP_IOSCHED_ENABLED */ - - #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \ -- blk_add_trace_msg((bfqd)->queue, "bfq%d%c " fmt, (bfqq)->pid, \ -+ blk_add_trace_msg((bfqd)->queue, "bfq%d%c [%s] " fmt, (bfqq)->pid, \ - bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \ -- ##args) -+ __func__, ##args) - #define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do {} while (0) - - #endif /* BFQ_GROUP_IOSCHED_ENABLED */ - - #define bfq_log(bfqd, fmt, args...) \ -- blk_add_trace_msg((bfqd)->queue, "bfq " fmt, ##args) -+ blk_add_trace_msg((bfqd)->queue, "bfq [%s] " fmt, __func__, ##args) - - #endif /* CONFIG_BLK_DEV_IO_TRACE */ - #endif /* CONFIG_BFQ_REDIRECT_TO_CONSOLE */ -@@ -938,7 +939,7 @@ bfq_entity_service_tree(struct bfq_entity *entity) - - if (bfqq) - bfq_log_bfqq(bfqq->bfqd, bfqq, -- "entity_service_tree %p %d", -+ "%p %d", - sched_data->service_tree + idx, idx); - #ifdef BFQ_GROUP_IOSCHED_ENABLED - else { -@@ -946,7 +947,7 @@ bfq_entity_service_tree(struct bfq_entity *entity) - container_of(entity, struct bfq_group, entity); - - bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg, -- "entity_service_tree %p %d", -+ "%p %d", - sched_data->service_tree + idx, idx); - } - #endif -diff --git a/block/bfq-sched.c b/block/bfq-sched.c -index 4e6c5232e2fb..ead34c30a7c2 100644 ---- a/block/bfq-sched.c -+++ b/block/bfq-sched.c -@@ -119,7 +119,7 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd, - - if (bfqq) - bfq_log_bfqq(bfqq->bfqd, bfqq, -- "update_next_in_service: chose without lookup"); -+ "chose without lookup"); - #ifdef BFQ_GROUP_IOSCHED_ENABLED - else { - struct bfq_group *bfqg = -@@ -127,7 +127,7 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd, - struct bfq_group, entity); - - bfq_log_bfqg((struct bfq_data*)bfqg->bfqd, bfqg, -- "update_next_in_service: chose without lookup"); -+ "chose without lookup"); - } - #endif - } -@@ -148,7 +148,7 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd, - bfqq = bfq_entity_to_bfqq(next_in_service); - if (bfqq) - bfq_log_bfqq(bfqq->bfqd, bfqq, -- "update_next_in_service: chosen this queue"); -+ "chosen this queue"); - #ifdef BFQ_GROUP_IOSCHED_ENABLED - else { - struct bfq_group *bfqg = -@@ -156,7 +156,7 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd, - struct bfq_group, entity); - - bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg, -- "update_next_in_service: chosen this entity"); -+ "chosen this entity"); - } - #endif - return parent_sched_may_change; -@@ -331,10 +331,10 @@ static void bfq_calc_finish(struct bfq_entity *entity, unsigned long service) - - if (bfqq) { - bfq_log_bfqq(bfqq->bfqd, bfqq, -- "calc_finish: serv %lu, w %d", -+ "serv %lu, w %d", - service, entity->weight); - bfq_log_bfqq(bfqq->bfqd, bfqq, -- "calc_finish: start %llu, finish %llu, delta %llu", -+ "start %llu, finish %llu, delta %llu", - start, finish, delta); - #ifdef BFQ_GROUP_IOSCHED_ENABLED - } else { -@@ -342,10 +342,10 @@ static void bfq_calc_finish(struct bfq_entity *entity, unsigned long service) - container_of(entity, struct bfq_group, entity); - - bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg, -- "calc_finish group: serv %lu, w %d", -+ "group: serv %lu, w %d", - service, entity->weight); - bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg, -- "calc_finish group: start %llu, finish %llu, delta %llu", -+ "group: start %llu, finish %llu, delta %llu", - start, finish, delta); - #endif - } -@@ -484,7 +484,7 @@ static void bfq_update_active_node(struct rb_node *node) - - if (bfqq) { - bfq_log_bfqq(bfqq->bfqd, bfqq, -- "update_active_node: new min_start %llu", -+ "new min_start %llu", - ((entity->min_start>>10)*1000)>>12); - #ifdef BFQ_GROUP_IOSCHED_ENABLED - } else { -@@ -492,7 +492,7 @@ static void bfq_update_active_node(struct rb_node *node) - container_of(entity, struct bfq_group, entity); - - bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg, -- "update_active_node: new min_start %llu", -+ "new min_start %llu", - ((entity->min_start>>10)*1000)>>12); - #endif - } -@@ -620,7 +620,7 @@ static void bfq_get_entity(struct bfq_entity *entity) - - if (bfqq) { - bfqq->ref++; -- bfq_log_bfqq(bfqq->bfqd, bfqq, "get_entity: %p %d", -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "%p %d", - bfqq, bfqq->ref); - } - } -@@ -748,7 +748,7 @@ static void bfq_forget_entity(struct bfq_service_tree *st, - entity->on_st = false; - st->wsum -= entity->weight; - if (bfqq && !is_in_service) { -- bfq_log_bfqq(bfqq->bfqd, bfqq, "forget_entity (before): %p %d", -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "(before): %p %d", - bfqq, bfqq->ref); - bfq_put_queue(bfqq); - } -@@ -1008,7 +1008,7 @@ static void bfq_bfqq_charge_time(struct bfq_data *bfqd, struct bfq_queue *bfqq, - tot_serv_to_charge = entity->service; - - bfq_log_bfqq(bfqq->bfqd, bfqq, -- "charge_time: %lu/%u ms, %d/%d/%d sectors", -+ "%lu/%u ms, %d/%d/%d sectors", - time_ms, timeout_ms, entity->service, - tot_serv_to_charge, entity->budget); - -@@ -1080,7 +1080,7 @@ static void bfq_update_fin_time_enqueue(struct bfq_entity *entity, - - if (bfqq) { - bfq_log_bfqq(bfqq->bfqd, bfqq, -- "update_fin_time_enqueue: new queue finish %llu", -+ "new queue finish %llu", - ((entity->finish>>10)*1000)>>12); - #ifdef BFQ_GROUP_IOSCHED_ENABLED - } else { -@@ -1088,7 +1088,7 @@ static void bfq_update_fin_time_enqueue(struct bfq_entity *entity, - container_of(entity, struct bfq_group, entity); - - bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg, -- "update_fin_time_enqueue: new group finish %llu", -+ "new group finish %llu", - ((entity->finish>>10)*1000)>>12); - #endif - } -@@ -1098,7 +1098,7 @@ static void bfq_update_fin_time_enqueue(struct bfq_entity *entity, - - if (bfqq) { - bfq_log_bfqq(bfqq->bfqd, bfqq, -- "update_fin_time_enqueue: queue %seligible in st %p", -+ "queue %seligible in st %p", - entity->start <= st->vtime ? "" : "non ", st); - #ifdef BFQ_GROUP_IOSCHED_ENABLED - } else { -@@ -1106,7 +1106,7 @@ static void bfq_update_fin_time_enqueue(struct bfq_entity *entity, - container_of(entity, struct bfq_group, entity); - - bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg, -- "update_fin_time_enqueue: group %seligible in st %p", -+ "group %seligible in st %p", - entity->start <= st->vtime ? "" : "non ", st); - #endif - } -@@ -1550,7 +1550,7 @@ static u64 bfq_calc_vtime_jump(struct bfq_service_tree *st) - - if (bfqq) - bfq_log_bfqq(bfqq->bfqd, bfqq, -- "calc_vtime_jump: new value %llu", -+ "new value %llu", - ((root_entity->min_start>>10)*1000)>>12); - #ifdef BFQ_GROUP_IOSCHED_ENABLED - else { -@@ -1559,7 +1559,7 @@ static u64 bfq_calc_vtime_jump(struct bfq_service_tree *st) - entity); - - bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg, -- "calc_vtime_jump: new value %llu", -+ "new value %llu", - ((root_entity->min_start>>10)*1000)>>12); - } - #endif -@@ -1677,7 +1677,7 @@ __bfq_lookup_next_entity(struct bfq_service_tree *st, bool in_service) - bfqq = bfq_entity_to_bfqq(entity); - if (bfqq) - bfq_log_bfqq(bfqq->bfqd, bfqq, -- "__lookup_next: start %llu vtime %llu st %p", -+ "start %llu vtime %llu st %p", - ((entity->start>>10)*1000)>>12, - ((new_vtime>>10)*1000)>>12, st); - #ifdef BFQ_GROUP_IOSCHED_ENABLED -@@ -1686,7 +1686,7 @@ __bfq_lookup_next_entity(struct bfq_service_tree *st, bool in_service) - container_of(entity, struct bfq_group, entity); - - bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg, -- "__lookup_next: start %llu vtime %llu (%llu) st %p", -+ "start %llu vtime %llu (%llu) st %p", - ((entity->start>>10)*1000)>>12, - ((st->vtime>>10)*1000)>>12, - ((new_vtime>>10)*1000)>>12, st); -@@ -1821,14 +1821,14 @@ static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd) - container_of(entity, struct bfq_group, entity); - - bfq_log_bfqg(bfqd, bfqg, -- "get_next_queue: lookup in this group"); -+ "lookup in this group"); - if (!sd->next_in_service) -- pr_crit("get_next_queue: lookup in this group"); -+ pr_crit("lookup in this group"); - } else { - bfq_log_bfqg(bfqd, bfqd->root_group, -- "get_next_queue: lookup in root group"); -+ "lookup in root group"); - if (!sd->next_in_service) -- pr_crit("get_next_queue: lookup in root group"); -+ pr_crit("lookup in root group"); - } - #endif - -@@ -1903,7 +1903,7 @@ static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd) - bfqq = bfq_entity_to_bfqq(entity); - if (bfqq) - bfq_log_bfqq(bfqd, bfqq, -- "get_next_queue: this queue, finish %llu", -+ "this queue, finish %llu", - (((entity->finish>>10)*1000)>>10)>>2); - #ifdef BFQ_GROUP_IOSCHED_ENABLED - else { -@@ -1911,7 +1911,7 @@ static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd) - container_of(entity, struct bfq_group, entity); - - bfq_log_bfqg(bfqd, bfqg, -- "get_next_queue: this entity, finish %llu", -+ "this entity, finish %llu", - (((entity->finish>>10)*1000)>>10)>>2); - } - #endif -diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c -index c4df156b1fb4..e49e8ac882b3 100644 ---- a/block/bfq-sq-iosched.c -+++ b/block/bfq-sq-iosched.c -@@ -281,7 +281,7 @@ static void bfq_schedule_dispatch(struct bfq_data *bfqd); - static void bfq_schedule_dispatch(struct bfq_data *bfqd) - { - if (bfqd->queued != 0) { -- bfq_log(bfqd, "schedule dispatch"); -+ bfq_log(bfqd, ""); - kblockd_schedule_work(&bfqd->unplug_work); - } - } -@@ -414,7 +414,7 @@ bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root, - if (rb_link) - *rb_link = p; - -- bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d", -+ bfq_log(bfqd, "%llu: returning %d", - (unsigned long long) sector, - bfqq ? bfqq->pid : 0); - -@@ -635,7 +635,7 @@ static struct request *bfq_check_fifo(struct bfq_queue *bfqq, - if (rq == last || ktime_get_ns() < rq->fifo_time) - return NULL; - -- bfq_log_bfqq(bfqq->bfqd, bfqq, "check_fifo: returned %p", rq); -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "returned %p", rq); - BUG_ON(RB_EMPTY_NODE(&rq->rb_node)); - return rq; - } -@@ -728,7 +728,7 @@ static void bfq_updated_next_req(struct bfq_data *bfqd, - bfq_serv_to_charge(next_rq, bfqq)); - if (entity->budget != new_budget) { - entity->budget = new_budget; -- bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu", -+ bfq_log_bfqq(bfqd, bfqq, "new budget %lu", - new_budget); - bfq_requeue_bfqq(bfqd, bfqq, false); - } -@@ -800,8 +800,7 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd, - BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish)); - - bfq_log_bfqq(bfqq->bfqd, bfqq, -- "[%s] bic %p wr_coeff %d start_finish %lu max_time %lu", -- __func__, -+ "bic %p wr_coeff %d start_finish %lu max_time %lu", - bic, bfqq->wr_coeff, bfqq->last_wr_start_finish, - bfqq->wr_cur_max_time); - -@@ -814,11 +813,11 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd, - bfq_wr_duration(bfqd))) { - switch_back_to_interactive_wr(bfqq, bfqd); - bfq_log_bfqq(bfqq->bfqd, bfqq, -- "resume state: switching back to interactive"); -+ "switching back to interactive"); - } else { - bfqq->wr_coeff = 1; - bfq_log_bfqq(bfqq->bfqd, bfqq, -- "resume state: switching off wr (%lu + %lu < %lu)", -+ "switching off wr (%lu + %lu < %lu)", - bfqq->last_wr_start_finish, bfqq->wr_cur_max_time, - jiffies); - } -@@ -870,7 +869,7 @@ static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq) - /* Increment burst size to take into account also bfqq */ - bfqd->burst_size++; - -- bfq_log_bfqq(bfqd, bfqq, "add_to_burst %d", bfqd->burst_size); -+ bfq_log_bfqq(bfqd, bfqq, "%d", bfqd->burst_size); - - BUG_ON(bfqd->burst_size > bfqd->bfq_large_burst_thresh); - -@@ -883,7 +882,7 @@ static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq) - * other to consider this burst as large. - */ - bfqd->large_burst = true; -- bfq_log_bfqq(bfqd, bfqq, "add_to_burst: large burst started"); -+ bfq_log_bfqq(bfqd, bfqq, "large burst started"); - - /* - * We can now mark all queues in the burst list as -@@ -1055,7 +1054,7 @@ static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq) - bfqd->large_burst = false; - bfq_reset_burst_list(bfqd, bfqq); - bfq_log_bfqq(bfqd, bfqq, -- "handle_burst: late activation or different group"); -+ "late activation or different group"); - goto end; - } - -@@ -1065,7 +1064,7 @@ static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq) - * bfqq as belonging to this large burst immediately. - */ - if (bfqd->large_burst) { -- bfq_log_bfqq(bfqd, bfqq, "handle_burst: marked in burst"); -+ bfq_log_bfqq(bfqd, bfqq, "marked in burst"); - bfq_mark_bfqq_in_large_burst(bfqq); - goto end; - } -@@ -1572,7 +1571,7 @@ static void bfq_add_request(struct request *rq) - unsigned int old_wr_coeff = bfqq->wr_coeff; - bool interactive = false; - -- bfq_log_bfqq(bfqd, bfqq, "add_request: size %u %s", -+ bfq_log_bfqq(bfqd, bfqq, "size %u %s", - blk_rq_sectors(rq), rq_is_sync(rq) ? "S" : "A"); - - if (bfqq->wr_coeff > 1) /* queue is being weight-raised */ -@@ -1870,10 +1869,10 @@ static void bfq_bfqq_end_wr(struct bfq_queue *bfqq) - */ - bfqq->entity.prio_changed = 1; - bfq_log_bfqq(bfqq->bfqd, bfqq, -- "end_wr: wrais ending at %lu, rais_max_time %u", -+ "wrais ending at %lu, rais_max_time %u", - bfqq->last_wr_start_finish, - jiffies_to_msecs(bfqq->wr_cur_max_time)); -- bfq_log_bfqq(bfqq->bfqd, bfqq, "end_wr: wr_busy %d", -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "wr_busy %d", - bfqq->bfqd->wr_busy_queues); - } - -@@ -2048,8 +2047,8 @@ static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq, - { - if (bfq_too_late_for_merging(new_bfqq)) { - bfq_log_bfqq(bfqq->bfqd, bfqq, -- "[%s] too late for bfq%d to be merged", -- __func__, new_bfqq->pid); -+ "too late for bfq%d to be merged", -+ new_bfqq->pid); - return false; - } - -@@ -2258,7 +2257,7 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic, - - } - -- bfq_log_bfqq(bfqd, new_bfqq, "merge_bfqqs: wr_busy %d", -+ bfq_log_bfqq(bfqd, new_bfqq, "wr_busy %d", - bfqd->wr_busy_queues); - - /* -@@ -2359,7 +2358,7 @@ static void bfq_set_budget_timeout(struct bfq_data *bfqd, - bfqq->budget_timeout = jiffies + - bfqd->bfq_timeout * timeout_coeff; - -- bfq_log_bfqq(bfqd, bfqq, "set budget_timeout %u", -+ bfq_log_bfqq(bfqd, bfqq, "%u", - jiffies_to_msecs(bfqd->bfq_timeout * timeout_coeff)); - } - -@@ -2427,10 +2426,10 @@ static void __bfq_set_in_service_queue(struct bfq_data *bfqd, - - bfq_set_budget_timeout(bfqd, bfqq); - bfq_log_bfqq(bfqd, bfqq, -- "set_in_service_queue, cur-budget = %d", -+ "cur-budget = %d", - bfqq->entity.budget); - } else -- bfq_log(bfqd, "set_in_service_queue: NULL"); -+ bfq_log(bfqd, "NULL"); - - bfqd->in_service_queue = bfqq; - } -@@ -2559,7 +2558,7 @@ static void bfq_reset_rate_computation(struct bfq_data *bfqd, struct request *rq - bfqd->peak_rate_samples = 0; /* full re-init on next disp. */ - - bfq_log(bfqd, -- "reset_rate_computation at end, sample %u/%u tot_sects %llu", -+ "at end, sample %u/%u tot_sects %llu", - bfqd->peak_rate_samples, bfqd->sequential_samples, - bfqd->tot_sectors_dispatched); - } -@@ -2579,7 +2578,7 @@ static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq) - if (bfqd->peak_rate_samples < BFQ_RATE_MIN_SAMPLES || - bfqd->delta_from_first < BFQ_RATE_MIN_INTERVAL) { - bfq_log(bfqd, -- "update_rate_reset: only resetting, delta_first %lluus samples %d", -+ "only resetting, delta_first %lluus samples %d", - bfqd->delta_from_first>>10, bfqd->peak_rate_samples); - goto reset_computation; - } -@@ -2603,7 +2602,7 @@ static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq) - div_u64(bfqd->delta_from_first, NSEC_PER_USEC)); - - bfq_log(bfqd, --"update_rate_reset: tot_sects %llu delta_first %lluus rate %llu sects/s (%d)", -+"tot_sects %llu delta_first %lluus rate %llu sects/s (%d)", - bfqd->tot_sectors_dispatched, bfqd->delta_from_first>>10, - ((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT), - rate > 20<peak_rate) || - rate > 20<peak_rate_samples, bfqd->sequential_samples, - ((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT), - ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT)); - goto reset_computation; - } else { - bfq_log(bfqd, -- "update_rate_reset: do update, samples %u/%u rate/peak %llu/%llu", -+ "do update, samples %u/%u rate/peak %llu/%llu", - bfqd->peak_rate_samples, bfqd->sequential_samples, - ((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT), - ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT)); -@@ -2681,7 +2680,7 @@ static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq) - rate /= divisor; /* smoothing constant alpha = 1/divisor */ - - bfq_log(bfqd, -- "update_rate_reset: divisor %d tmp_peak_rate %llu tmp_rate %u", -+ "divisor %d tmp_peak_rate %llu tmp_rate %u", - divisor, - ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT), - (u32)((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT)); -@@ -2735,7 +2734,7 @@ static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq) - - if (bfqd->peak_rate_samples == 0) { /* first dispatch */ - bfq_log(bfqd, -- "update_peak_rate: goto reset, samples %d", -+ "goto reset, samples %d", - bfqd->peak_rate_samples) ; - bfq_reset_rate_computation(bfqd, rq); - goto update_last_values; /* will add one sample */ -@@ -2756,7 +2755,7 @@ static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq) - if (now_ns - bfqd->last_dispatch > 100*NSEC_PER_MSEC && - bfqd->rq_in_driver == 0) { - bfq_log(bfqd, --"update_peak_rate: jumping to updating&resetting delta_last %lluus samples %d", -+"jumping to updating&resetting delta_last %lluus samples %d", - (now_ns - bfqd->last_dispatch)>>10, - bfqd->peak_rate_samples) ; - goto update_rate_and_reset; -@@ -2782,7 +2781,7 @@ static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq) - bfqd->delta_from_first = now_ns - bfqd->first_dispatch; - - bfq_log(bfqd, -- "update_peak_rate: added samples %u/%u tot_sects %llu delta_first %lluus", -+ "added samples %u/%u tot_sects %llu delta_first %lluus", - bfqd->peak_rate_samples, bfqd->sequential_samples, - bfqd->tot_sectors_dispatched, - bfqd->delta_from_first>>10); -@@ -2798,12 +2797,12 @@ static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq) - bfqd->last_dispatch = now_ns; - - bfq_log(bfqd, -- "update_peak_rate: delta_first %lluus last_pos %llu peak_rate %llu", -+ "delta_first %lluus last_pos %llu peak_rate %llu", - (now_ns - bfqd->first_dispatch)>>10, - (unsigned long long) bfqd->last_position, - ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT)); - bfq_log(bfqd, -- "update_peak_rate: samples at end %d", bfqd->peak_rate_samples); -+ "samples at end %d", bfqd->peak_rate_samples); - } - - /* -@@ -2900,11 +2899,11 @@ static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd, - */ - budget = 2 * min_budget; - -- bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %d, budg left %d", -+ bfq_log_bfqq(bfqd, bfqq, "last budg %d, budg left %d", - bfqq->entity.budget, bfq_bfqq_budget_left(bfqq)); -- bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %d, min budg %d", -+ bfq_log_bfqq(bfqd, bfqq, "last max_budg %d, min budg %d", - budget, bfq_min_budget(bfqd)); -- bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d", -+ bfq_log_bfqq(bfqd, bfqq, "sync %d, seeky %d", - bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue)); - - if (bfq_bfqq_sync(bfqq) && bfqq->wr_coeff == 1) { -@@ -3106,7 +3105,7 @@ static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq, - else /* charge at least one seek */ - *delta_ms = bfq_slice_idle / NSEC_PER_MSEC; - -- bfq_log(bfqd, "bfq_bfqq_is_slow: too short %u", delta_usecs); -+ bfq_log(bfqd, "too short %u", delta_usecs); - - return slow; - } -@@ -3129,11 +3128,11 @@ static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq, - * peak rate. - */ - slow = bfqq->entity.service < bfqd->bfq_max_budget / 2; -- bfq_log(bfqd, "bfq_bfqq_is_slow: relative rate %d/%d", -+ bfq_log(bfqd, "relative rate %d/%d", - bfqq->entity.service, bfqd->bfq_max_budget); - } - -- bfq_log_bfqq(bfqd, bfqq, "bfq_bfqq_is_slow: slow %d", slow); -+ bfq_log_bfqq(bfqd, bfqq, "slow %d", slow); - - return slow; - } -@@ -3235,7 +3234,7 @@ static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd, - struct bfq_queue *bfqq) - { - bfq_log_bfqq(bfqd, bfqq, --"softrt_next_start: service_blkg %lu soft_rate %u sects/sec interval %u", -+"service_blkg %lu soft_rate %u sects/sec interval %u", - bfqq->service_from_backlogged, - bfqd->bfq_wr_max_softrt_rate, - jiffies_to_msecs(HZ * bfqq->service_from_backlogged / -@@ -3414,7 +3413,7 @@ static bool bfq_bfqq_budget_timeout(struct bfq_queue *bfqq) - static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq) - { - bfq_log_bfqq(bfqq->bfqd, bfqq, -- "may_budget_timeout: wait_request %d left %d timeout %d", -+ "wait_request %d left %d timeout %d", - bfq_bfqq_wait_request(bfqq), - bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3, - bfq_bfqq_budget_timeout(bfqq)); -@@ -3675,11 +3674,11 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq) - * either boosts the throughput (without issues), or is - * necessary to preserve service guarantees. - */ -- bfq_log_bfqq(bfqd, bfqq, "may_idle: sync %d idling_boosts_thr %d", -+ bfq_log_bfqq(bfqd, bfqq, "sync %d idling_boosts_thr %d", - bfq_bfqq_sync(bfqq), idling_boosts_thr); - - bfq_log_bfqq(bfqd, bfqq, -- "may_idle: wr_busy %d boosts %d IO-bound %d guar %d", -+ "wr_busy %d boosts %d IO-bound %d guar %d", - bfqd->wr_busy_queues, - idling_boosts_thr_without_issues, - bfq_bfqq_IO_bound(bfqq), -@@ -3719,7 +3718,7 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd) - if (!bfqq) - goto new_queue; - -- bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue"); -+ bfq_log_bfqq(bfqd, bfqq, "already in-service queue"); - - if (bfq_may_expire_for_budg_timeout(bfqq) && - !hrtimer_active(&bfqd->idle_slice_timer) && -@@ -3797,14 +3796,14 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd) - new_queue: - bfqq = bfq_set_in_service_queue(bfqd); - if (bfqq) { -- bfq_log_bfqq(bfqd, bfqq, "select_queue: checking new queue"); -+ bfq_log_bfqq(bfqd, bfqq, "checking new queue"); - goto check_queue; - } - keep_queue: - if (bfqq) -- bfq_log_bfqq(bfqd, bfqq, "select_queue: returned this queue"); -+ bfq_log_bfqq(bfqd, bfqq, "returned this queue"); - else -- bfq_log(bfqd, "select_queue: no queue returned"); -+ bfq_log(bfqd, "no queue returned"); - - return bfqq; - } -@@ -3857,8 +3856,7 @@ static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq) - /* see comments on max_service_from_wr */ - bfq_bfqq_end_wr(bfqq); - bfq_log_bfqq(bfqd, bfqq, -- "[%s] too much service", -- __func__); -+ "too much service"); - } - } - /* -@@ -3987,7 +3985,7 @@ static int bfq_dispatch_requests(struct request_queue *q, int force) - struct bfq_data *bfqd = q->elevator->elevator_data; - struct bfq_queue *bfqq; - -- bfq_log(bfqd, "dispatch requests: %d busy queues", bfqd->busy_queues); -+ bfq_log(bfqd, "%d busy queues", bfqd->busy_queues); - - if (bfqd->busy_queues == 0) - return 0; -@@ -4021,7 +4019,7 @@ static int bfq_dispatch_requests(struct request_queue *q, int force) - if (!bfq_dispatch_request(bfqd, bfqq)) - return 0; - -- bfq_log_bfqq(bfqd, bfqq, "dispatched %s request", -+ bfq_log_bfqq(bfqd, bfqq, "%s request", - bfq_bfqq_sync(bfqq) ? "sync" : "async"); - - BUG_ON(bfqq->next_rq == NULL && -@@ -4044,7 +4042,7 @@ static void bfq_put_queue(struct bfq_queue *bfqq) - - BUG_ON(bfqq->ref <= 0); - -- bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d", bfqq, bfqq->ref); -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "%p %d", bfqq, bfqq->ref); - bfqq->ref--; - if (bfqq->ref) - return; -@@ -4086,7 +4084,7 @@ static void bfq_put_queue(struct bfq_queue *bfqq) - bfqq->bfqd->burst_size--; - } - -- bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p freed", bfqq); -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "%p freed", bfqq); - - kmem_cache_free(bfq_pool, bfqq); - #ifdef BFQ_GROUP_IOSCHED_ENABLED -@@ -4120,7 +4118,7 @@ static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) - bfq_schedule_dispatch(bfqd); - } - -- bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, bfqq->ref); -+ bfq_log_bfqq(bfqd, bfqq, "%p, %d", bfqq, bfqq->ref); - - bfq_put_cooperator(bfqq); - -@@ -4200,7 +4198,7 @@ static void bfq_set_next_ioprio_data(struct bfq_queue *bfqq, - bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->new_ioprio); - bfqq->entity.prio_changed = 1; - bfq_log_bfqq(bfqq->bfqd, bfqq, -- "set_next_ioprio_data: bic_class %d prio %d class %d", -+ "bic_class %d prio %d class %d", - ioprio_class, bfqq->new_ioprio, bfqq->new_ioprio_class); - } - -@@ -4227,7 +4225,7 @@ static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio) - bfqq = bfq_get_queue(bfqd, bio, BLK_RW_ASYNC, bic); - bic_set_bfqq(bic, bfqq, false); - bfq_log_bfqq(bfqd, bfqq, -- "check_ioprio_change: bfqq %p %d", -+ "bfqq %p %d", - bfqq, bfqq->ref); - } - -@@ -4362,14 +4360,14 @@ static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd, - * guarantee that this queue is not freed - * until its group goes away. - */ -- bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d", -+ bfq_log_bfqq(bfqd, bfqq, "bfqq not in async: %p, %d", - bfqq, bfqq->ref); - *async_bfqq = bfqq; - } - - out: - bfqq->ref++; /* get a process reference to this queue */ -- bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq, bfqq->ref); -+ bfq_log_bfqq(bfqd, bfqq, "at end: %p, %d", bfqq, bfqq->ref); - rcu_read_unlock(); - return bfqq; - } -@@ -4428,7 +4426,7 @@ static void bfq_update_has_short_ttime(struct bfq_data *bfqd, - bic->ttime.ttime_mean > bfqd->bfq_slice_idle)) - has_short_ttime = false; - -- bfq_log_bfqq(bfqd, bfqq, "update_has_short_ttime: has_short_ttime %d", -+ bfq_log_bfqq(bfqd, bfqq, "has_short_ttime %d", - has_short_ttime); - - if (has_short_ttime) -@@ -4454,7 +4452,7 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, - bfq_update_io_seektime(bfqd, bfqq, rq); - - bfq_log_bfqq(bfqd, bfqq, -- "rq_enqueued: has_short_ttime=%d (seeky %d)", -+ "has_short_ttime=%d (seeky %d)", - bfq_bfqq_has_short_ttime(bfqq), BFQQ_SEEKY(bfqq)); - - bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); -@@ -4629,7 +4627,7 @@ static void bfq_completed_request(struct request_queue *q, struct request *rq) - */ - delta_us = div_u64(now_ns - bfqd->last_completion, NSEC_PER_USEC); - -- bfq_log(bfqd, "rq_completed: delta %uus/%luus max_size %u rate %llu/%llu", -+ bfq_log(bfqd, "delta %uus/%luus max_size %u rate %llu/%llu", - delta_us, BFQ_MIN_TT/NSEC_PER_USEC, bfqd->last_rq_max_size, - delta_us > 0 ? - (USEC_PER_SEC* -@@ -4750,7 +4748,7 @@ static void bfq_put_request(struct request *rq) - rq->elv.priv[0] = NULL; - rq->elv.priv[1] = NULL; - -- bfq_log_bfqq(bfqq->bfqd, bfqq, "put_request %p, %d", -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "%p, %d", - bfqq, bfqq->ref); - bfq_put_queue(bfqq); - } -@@ -4816,7 +4814,7 @@ static int bfq_set_request(struct request_queue *q, struct request *rq, - bic_set_bfqq(bic, bfqq, is_sync); - if (split && is_sync) { - bfq_log_bfqq(bfqd, bfqq, -- "set_request: was_in_list %d " -+ "was_in_list %d " - "was_in_large_burst %d " - "large burst in progress %d", - bic->was_in_burst_list, -@@ -4826,12 +4824,12 @@ static int bfq_set_request(struct request_queue *q, struct request *rq, - if ((bic->was_in_burst_list && bfqd->large_burst) || - bic->saved_in_large_burst) { - bfq_log_bfqq(bfqd, bfqq, -- "set_request: marking in " -+ "marking in " - "large burst"); - bfq_mark_bfqq_in_large_burst(bfqq); - } else { - bfq_log_bfqq(bfqd, bfqq, -- "set_request: clearing in " -+ "clearing in " - "large burst"); - bfq_clear_bfqq_in_large_burst(bfqq); - if (bic->was_in_burst_list) -@@ -4888,7 +4886,7 @@ static int bfq_set_request(struct request_queue *q, struct request *rq, - - bfqq->allocated[rw]++; - bfqq->ref++; -- bfq_log_bfqq(bfqd, bfqq, "set_request: bfqq %p, %d", bfqq, bfqq->ref); -+ bfq_log_bfqq(bfqd, bfqq, "bfqq %p, %d", bfqq, bfqq->ref); - - rq->elv.priv[0] = bic; - rq->elv.priv[1] = bfqq; -@@ -4962,7 +4960,7 @@ static enum hrtimer_restart bfq_idle_slice_timer(struct hrtimer *timer) - * case we just expire a queue too early. - */ - if (bfqq) { -- bfq_log_bfqq(bfqd, bfqq, "slice_timer expired"); -+ bfq_log_bfqq(bfqd, bfqq, "expired"); - bfq_clear_bfqq_wait_request(bfqq); - - if (bfq_bfqq_budget_timeout(bfqq)) -@@ -5005,10 +5003,10 @@ static void __bfq_put_async_bfqq(struct bfq_data *bfqd, - struct bfq_group *root_group = bfqd->root_group; - struct bfq_queue *bfqq = *bfqq_ptr; - -- bfq_log(bfqd, "put_async_bfqq: %p", bfqq); -+ bfq_log(bfqd, "%p", bfqq); - if (bfqq) { - bfq_bfqq_move(bfqd, bfqq, root_group); -- bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d", -+ bfq_log_bfqq(bfqd, bfqq, "putting %p, %d", - bfqq, bfqq->ref); - bfq_put_queue(bfqq); - *bfqq_ptr = NULL; -diff --git a/block/bfq.h b/block/bfq.h -index 0cd7a3f251a7..4d2fe7f77af1 100644 ---- a/block/bfq.h -+++ b/block/bfq.h -@@ -698,37 +698,37 @@ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg); - \ - assert_spin_locked((bfqd)->queue->queue_lock); \ - blkg_path(bfqg_to_blkg(bfqq_group(bfqq)), __pbuf, sizeof(__pbuf)); \ -- pr_crit("%s bfq%d%c %s " fmt "\n", \ -+ pr_crit("%s bfq%d%c %s [%s] " fmt "\n", \ - checked_dev_name((bfqd)->queue->backing_dev_info->dev), \ - (bfqq)->pid, \ - bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \ -- __pbuf, ##args); \ -+ __pbuf, __func__, ##args); \ - } while (0) - - #define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \ - char __pbuf[128]; \ - \ - blkg_path(bfqg_to_blkg(bfqg), __pbuf, sizeof(__pbuf)); \ -- pr_crit("%s %s " fmt "\n", \ -+ pr_crit("%s %s [%s] " fmt "\n", \ - checked_dev_name((bfqd)->queue->backing_dev_info->dev), \ -- __pbuf, ##args); \ -+ __pbuf, __func__, ##args); \ - } while (0) - - #else /* BFQ_GROUP_IOSCHED_ENABLED */ - - #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \ -- pr_crit("%s bfq%d%c " fmt "\n", \ -+ pr_crit("%s bfq%d%c [%s] " fmt "\n", \ - checked_dev_name((bfqd)->queue->backing_dev_info->dev), \ - (bfqq)->pid, bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \ -- ##args) -+ __func__, ##args) - #define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do {} while (0) - - #endif /* BFQ_GROUP_IOSCHED_ENABLED */ - - #define bfq_log(bfqd, fmt, args...) \ -- pr_crit("%s bfq " fmt "\n", \ -+ pr_crit("%s bfq [%s] " fmt "\n", \ - checked_dev_name((bfqd)->queue->backing_dev_info->dev), \ -- ##args) -+ __func__, ##args) - - #else /* CONFIG_BFQ_REDIRECT_TO_CONSOLE */ - -@@ -755,31 +755,32 @@ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg); - \ - assert_spin_locked((bfqd)->queue->queue_lock); \ - blkg_path(bfqg_to_blkg(bfqq_group(bfqq)), __pbuf, sizeof(__pbuf)); \ -- blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s " fmt, \ -+ blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s [%s] " fmt, \ - (bfqq)->pid, \ - bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \ -- __pbuf, ##args); \ -+ __pbuf, __func__, ##args); \ - } while (0) - - #define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \ - char __pbuf[128]; \ - \ - blkg_path(bfqg_to_blkg(bfqg), __pbuf, sizeof(__pbuf)); \ -- blk_add_trace_msg((bfqd)->queue, "%s " fmt, __pbuf, ##args); \ -+ blk_add_trace_msg((bfqd)->queue, "%s [%s] " fmt, __pbuf, \ -+ __func__, ##args); \ - } while (0) - - #else /* BFQ_GROUP_IOSCHED_ENABLED */ - - #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \ -- blk_add_trace_msg((bfqd)->queue, "bfq%d%c " fmt, (bfqq)->pid, \ -+ blk_add_trace_msg((bfqd)->queue, "bfq%d%c [%s] " fmt, (bfqq)->pid, \ - bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \ -- ##args) -+ __func__, ##args) - #define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do {} while (0) - - #endif /* BFQ_GROUP_IOSCHED_ENABLED */ - - #define bfq_log(bfqd, fmt, args...) \ -- blk_add_trace_msg((bfqd)->queue, "bfq " fmt, ##args) -+ blk_add_trace_msg((bfqd)->queue, "bfq [%s] " fmt, __func__, ##args) - - #endif /* CONFIG_BLK_DEV_IO_TRACE */ - #endif /* CONFIG_BFQ_REDIRECT_TO_CONSOLE */ -@@ -928,7 +929,7 @@ bfq_entity_service_tree(struct bfq_entity *entity) - - if (bfqq) - bfq_log_bfqq(bfqq->bfqd, bfqq, -- "entity_service_tree %p %d", -+ "%p %d", - sched_data->service_tree + idx, idx); - #ifdef BFQ_GROUP_IOSCHED_ENABLED - else { -@@ -936,7 +937,7 @@ bfq_entity_service_tree(struct bfq_entity *entity) - container_of(entity, struct bfq_group, entity); - - bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg, -- "entity_service_tree %p %d", -+ "%p %d", - sched_data->service_tree + idx, idx); - } - #endif - -From 673a457e8a54d1c4b66e61b1a50956ba0b8c6a60 Mon Sep 17 00:00:00 2001 -From: Davide Paganelli -Date: Thu, 8 Feb 2018 11:49:58 +0100 -Subject: [PATCH 19/23] block, bfq-mq, bfq-sq: make bfq_bfqq_expire print - expiration reason - -Improve readability of the log messages related to the expiration -reasons of the function bfq_bfqq_expire. -Change the printing of the number that represents the reason for -expiration with an actual textual description of the reason. - -Signed-off-by: Davide Paganelli -Signed-off-by: Paolo Valente ---- - block/bfq-mq-iosched.c | 10 ++++++++-- - block/bfq-sq-iosched.c | 10 ++++++++-- - 2 files changed, 16 insertions(+), 4 deletions(-) - -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index edc93b6af186..9268dd47a4e5 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -133,6 +133,12 @@ static const int bfq_timeout = (HZ / 8); - */ - static const unsigned long bfq_merge_time_limit = HZ/10; - -+#define MAX_LENGTH_REASON_NAME 25 -+ -+static const char reason_name[][MAX_LENGTH_REASON_NAME] = {"TOO_IDLE", -+"BUDGET_TIMEOUT", "BUDGET_EXHAUSTED", "NO_MORE_REQUESTS", -+"PREEMPTED"}; -+ - static struct kmem_cache *bfq_pool; - - /* Below this threshold (in ns), we consider thinktime immediate. */ -@@ -3553,8 +3559,8 @@ static void bfq_bfqq_expire(struct bfq_data *bfqd, - } - - bfq_log_bfqq(bfqd, bfqq, -- "expire (%d, slow %d, num_disp %d, short_ttime %d, weight %d)", -- reason, slow, bfqq->dispatched, -+ "expire (%s, slow %d, num_disp %d, short_ttime %d, weight %d)", -+ reason_name[reason], slow, bfqq->dispatched, - bfq_bfqq_has_short_ttime(bfqq), entity->weight); - - /* -diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c -index e49e8ac882b3..f95deaab49a1 100644 ---- a/block/bfq-sq-iosched.c -+++ b/block/bfq-sq-iosched.c -@@ -127,6 +127,12 @@ static const int bfq_timeout = (HZ / 8); - */ - static const unsigned long bfq_merge_time_limit = HZ/10; - -+#define MAX_LENGTH_REASON_NAME 25 -+ -+static const char reason_name[][MAX_LENGTH_REASON_NAME] = {"TOO_IDLE", -+"BUDGET_TIMEOUT", "BUDGET_EXHAUSTED", "NO_MORE_REQUESTS", -+"PREEMPTED"}; -+ - static struct kmem_cache *bfq_pool; - - /* Below this threshold (in ns), we consider thinktime immediate. */ -@@ -3366,8 +3372,8 @@ static void bfq_bfqq_expire(struct bfq_data *bfqd, - } - - bfq_log_bfqq(bfqd, bfqq, -- "expire (%d, slow %d, num_disp %d, short_ttime %d, weight %d)", -- reason, slow, bfqq->dispatched, -+ "expire (%s, slow %d, num_disp %d, short_ttime %d, weight %d)", -+ reason_name[reason], slow, bfqq->dispatched, - bfq_bfqq_has_short_ttime(bfqq), entity->weight); - - /* - -From 62e80623fbb58367c3f667dab22fea0804001f3b Mon Sep 17 00:00:00 2001 -From: Melzani Alessandro -Date: Mon, 26 Feb 2018 22:21:59 +0100 -Subject: [PATCH 20/23] bfq-mq: port of "block, bfq: remove batches of - confusing ifdefs" - -Commit a33801e8b473 ("block, bfq: move debug blkio stats behind -CONFIG_DEBUG_BLK_CGROUP") introduced two batches of confusing ifdefs: -one reported in [1], plus a similar one in another function. This -commit removes both batches, in the way suggested in [1]. - -[1] https://www.spinics.net/lists/linux-block/msg20043.html - -Fixes: a33801e8b473 ("block, bfq: move debug blkio stats behind CONFIG_DEBUG_BLK_CGROUP") - -Signed-off-by: Alessandro Melzani ---- - block/bfq-mq-iosched.c | 128 ++++++++++++++++++++++++++++--------------------- - 1 file changed, 73 insertions(+), 55 deletions(-) - -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index 9268dd47a4e5..5a211620f316 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -4256,35 +4256,17 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) - return rq; - } - --static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) --{ -- struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; -- struct request *rq; --#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP) -- struct bfq_queue *in_serv_queue, *bfqq; -- bool waiting_rq, idle_timer_disabled; --#endif - -- spin_lock_irq(&bfqd->lock); -- --#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP) -- in_serv_queue = bfqd->in_service_queue; -- waiting_rq = in_serv_queue && bfq_bfqq_wait_request(in_serv_queue); -- -- rq = __bfq_dispatch_request(hctx); -- -- idle_timer_disabled = -- waiting_rq && !bfq_bfqq_wait_request(in_serv_queue); -- --#else -- rq = __bfq_dispatch_request(hctx); --#endif -- spin_unlock_irq(&bfqd->lock); -+#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP) -+static void bfq_update_dispatch_stats(struct request_queue *q, -+ struct request *rq, -+ struct bfq_queue *in_serv_queue, -+ bool idle_timer_disabled) -+{ -+ struct bfq_queue *bfqq = rq ? RQ_BFQQ(rq) : NULL; - --#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP) -- bfqq = rq ? RQ_BFQQ(rq) : NULL; - if (!idle_timer_disabled && !bfqq) -- return rq; -+ return; - - /* - * rq and bfqq are guaranteed to exist until this function -@@ -4299,7 +4281,7 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) - * In addition, the following queue lock guarantees that - * bfqq_group(bfqq) exists as well. - */ -- spin_lock_irq(hctx->queue->queue_lock); -+ spin_lock_irq(q->queue_lock); - if (idle_timer_disabled) - /* - * Since the idle timer has been disabled, -@@ -4318,8 +4300,35 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) - bfqg_stats_set_start_empty_time(bfqg); - bfqg_stats_update_io_remove(bfqg, rq->cmd_flags); - } -- spin_unlock_irq(hctx->queue->queue_lock); -+ spin_unlock_irq(q->queue_lock); -+} -+#else -+static inline void bfq_update_dispatch_stats(struct request_queue *q, -+ struct request *rq, -+ struct bfq_queue *in_serv_queue, -+ bool idle_timer_disabled) {} - #endif -+static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) -+{ -+ struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; -+ struct request *rq; -+ struct bfq_queue *in_serv_queue; -+ bool waiting_rq, idle_timer_disabled; -+ -+ spin_lock_irq(&bfqd->lock); -+ -+ in_serv_queue = bfqd->in_service_queue; -+ waiting_rq = in_serv_queue && bfq_bfqq_wait_request(in_serv_queue); -+ -+ rq = __bfq_dispatch_request(hctx); -+ -+ idle_timer_disabled = -+ waiting_rq && !bfq_bfqq_wait_request(in_serv_queue); -+ -+ spin_unlock_irq(&bfqd->lock); -+ -+ bfq_update_dispatch_stats(hctx->queue, rq, in_serv_queue, -+ idle_timer_disabled); - - return rq; - } -@@ -4881,6 +4890,38 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq) - return idle_timer_disabled; - } - -+#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP) -+static void bfq_update_insert_stats(struct request_queue *q, -+ struct bfq_queue *bfqq, -+ bool idle_timer_disabled, -+ unsigned int cmd_flags) -+{ -+ if (!bfqq) -+ return; -+ -+ /* -+ * bfqq still exists, because it can disappear only after -+ * either it is merged with another queue, or the process it -+ * is associated with exits. But both actions must be taken by -+ * the same process currently executing this flow of -+ * instructions. -+ * -+ * In addition, the following queue lock guarantees that -+ * bfqq_group(bfqq) exists as well. -+ */ -+ spin_lock_irq(q->queue_lock); -+ bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, cmd_flags); -+ if (idle_timer_disabled) -+ bfqg_stats_update_idle_time(bfqq_group(bfqq)); -+ spin_unlock_irq(q->queue_lock); -+} -+#else -+static inline void bfq_update_insert_stats(struct request_queue *q, -+ struct bfq_queue *bfqq, -+ bool idle_timer_disabled, -+ unsigned int cmd_flags) {} -+#endif -+ - static void bfq_prepare_request(struct request *rq, struct bio *bio); - - static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, -@@ -4889,10 +4930,8 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, - struct request_queue *q = hctx->queue; - struct bfq_data *bfqd = q->elevator->elevator_data; - struct bfq_queue *bfqq = RQ_BFQQ(rq); --#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP) - bool idle_timer_disabled = false; - unsigned int cmd_flags; --#endif - - spin_lock_irq(&bfqd->lock); - if (blk_mq_sched_try_insert_merge(q, rq)) { -@@ -4938,7 +4977,6 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, - bfqq = RQ_BFQQ(rq); - } - --#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP) - idle_timer_disabled = __bfq_insert_request(bfqd, rq); - /* - * Update bfqq, because, if a queue merge has occurred -@@ -4946,9 +4984,6 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, - * redirected into a new queue. - */ - bfqq = RQ_BFQQ(rq); --#else -- __bfq_insert_request(bfqd, rq); --#endif - - if (rq_mergeable(rq)) { - elv_rqhash_add(q, rq); -@@ -4956,34 +4991,17 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, - q->last_merge = rq; - } - } --#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP) -+ - /* - * Cache cmd_flags before releasing scheduler lock, because rq - * may disappear afterwards (for example, because of a request - * merge). - */ - cmd_flags = rq->cmd_flags; --#endif -+ - spin_unlock_irq(&bfqd->lock); --#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP) -- if (!bfqq) -- return; -- /* -- * bfqq still exists, because it can disappear only after -- * either it is merged with another queue, or the process it -- * is associated with exits. But both actions must be taken by -- * the same process currently executing this flow of -- * instruction. -- * -- * In addition, the following queue lock guarantees that -- * bfqq_group(bfqq) exists as well. -- */ -- spin_lock_irq(q->queue_lock); -- bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, cmd_flags); -- if (idle_timer_disabled) -- bfqg_stats_update_idle_time(bfqq_group(bfqq)); -- spin_unlock_irq(q->queue_lock); --#endif -+ bfq_update_insert_stats(q, bfqq, idle_timer_disabled, -+ cmd_flags); - } - - static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx, - -From 0d0d05632872b226f4fae5e56af8736a4c24bf57 Mon Sep 17 00:00:00 2001 -From: Melzani Alessandro -Date: Mon, 26 Feb 2018 22:43:30 +0100 -Subject: [PATCH 21/23] bfq-sq, bfq-mq: port of "bfq: Use icq_to_bic() - consistently" - -Some code uses icq_to_bic() to convert an io_cq pointer to a -bfq_io_cq pointer while other code uses a direct cast. Convert -the code that uses a direct cast such that it uses icq_to_bic(). - -Signed-off-by: Alessandro Melzani ---- - block/bfq-mq-iosched.c | 2 +- - block/bfq-sq-iosched.c | 2 +- - 2 files changed, 2 insertions(+), 2 deletions(-) - -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index 5a211620f316..7b1269558c47 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -272,7 +272,7 @@ static const unsigned long max_service_from_wr = 120000; - #define BFQ_SERVICE_TREE_INIT ((struct bfq_service_tree) \ - { RB_ROOT, RB_ROOT, NULL, NULL, 0, 0 }) - --#define RQ_BIC(rq) ((struct bfq_io_cq *) (rq)->elv.priv[0]) -+#define RQ_BIC(rq) icq_to_bic((rq)->elv.priv[0]) - #define RQ_BFQQ(rq) ((rq)->elv.priv[1]) - - /** -diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c -index f95deaab49a1..c4aff8d55fc4 100644 ---- a/block/bfq-sq-iosched.c -+++ b/block/bfq-sq-iosched.c -@@ -266,7 +266,7 @@ static const unsigned long max_service_from_wr = 120000; - #define BFQ_SERVICE_TREE_INIT ((struct bfq_service_tree) \ - { RB_ROOT, RB_ROOT, NULL, NULL, 0, 0 }) - --#define RQ_BIC(rq) ((struct bfq_io_cq *) (rq)->elv.priv[0]) -+#define RQ_BIC(rq) icq_to_bic((rq)->elv.priv[0]) - #define RQ_BFQQ(rq) ((rq)->elv.priv[1]) - - static void bfq_schedule_dispatch(struct bfq_data *bfqd); - -From 4cb5de6add7d6ad0d25d73cb95dc871305db1522 Mon Sep 17 00:00:00 2001 -From: Melzani Alessandro -Date: Mon, 26 Feb 2018 22:59:30 +0100 -Subject: [PATCH 22/23] bfq-sq, bfq-mq: port of "block, bfq: fix error handle - in bfq_init" - -if elv_register fail, bfq_pool should be free. - -Signed-off-by: Alessandro Melzani ---- - block/bfq-mq-iosched.c | 4 +++- - block/bfq-sq-iosched.c | 4 +++- - 2 files changed, 6 insertions(+), 2 deletions(-) - -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index 7b1269558c47..964e88c2ce59 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -6129,7 +6129,7 @@ static int __init bfq_init(void) - - ret = elv_register(&iosched_bfq_mq); - if (ret) -- goto err_pol_unreg; -+ goto slab_kill; - - #ifdef BFQ_GROUP_IOSCHED_ENABLED - strcat(msg, " (with cgroups support)"); -@@ -6138,6 +6138,8 @@ static int __init bfq_init(void) - - return 0; - -+slab_kill: -+ bfq_slab_kill(); - err_pol_unreg: - #ifdef BFQ_GROUP_IOSCHED_ENABLED - blkcg_policy_unregister(&blkcg_policy_bfq); -diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c -index c4aff8d55fc4..7f0cf1f01ffc 100644 ---- a/block/bfq-sq-iosched.c -+++ b/block/bfq-sq-iosched.c -@@ -5590,7 +5590,7 @@ static int __init bfq_init(void) - - ret = elv_register(&iosched_bfq); - if (ret) -- goto err_pol_unreg; -+ goto slab_kill; - - #ifdef BFQ_GROUP_IOSCHED_ENABLED - strcat(msg, " (with cgroups support)"); -@@ -5599,6 +5599,8 @@ static int __init bfq_init(void) - - return 0; - -+slab_kill: -+ bfq_slab_kill(); - err_pol_unreg: - #ifdef BFQ_GROUP_IOSCHED_ENABLED - blkcg_policy_unregister(&blkcg_policy_bfq); - -From 1f77c173aaa87ffb22c9f062a6449245d14311e4 Mon Sep 17 00:00:00 2001 -From: Paolo Valente -Date: Wed, 4 Apr 2018 11:28:16 +0200 -Subject: [PATCH 23/23] block, bfq-sq, bfq-mq: lower-bound the estimated peak - rate to 1 - -If a storage device handled by BFQ happens to be slower than 7.5 KB/s -for a certain amount of time (in the order of a second), then the -estimated peak rate of the device, maintained in BFQ, becomes equal to -0. The reason is the limited precision with which the rate is -represented (details on the range of representable values in the -comments introduced by this commit). This leads to a division-by-zero -error where the estimated peak rate is used as divisor. Such a type of -failure has been reported in [1]. - -This commit addresses this issue by: -1. Lower-bounding the estimated peak rate to 1 -2. Adding and improving comments on the range of rates representable - -[1] https://www.spinics.net/lists/kernel/msg2739205.html - -Signed-off-by: Konstantin Khlebnikov -Signed-off-by: Paolo Valente ---- - block/bfq-mq-iosched.c | 25 ++++++++++++++++++++++++- - block/bfq-mq.h | 7 ++++++- - block/bfq-sq-iosched.c | 25 ++++++++++++++++++++++++- - block/bfq.h | 7 ++++++- - 4 files changed, 60 insertions(+), 4 deletions(-) - -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -index 964e88c2ce59..03efd90c5d20 100644 ---- a/block/bfq-mq-iosched.c -+++ b/block/bfq-mq-iosched.c -@@ -160,7 +160,20 @@ static struct kmem_cache *bfq_pool; - /* Target observation time interval for a peak-rate update (ns) */ - #define BFQ_RATE_REF_INTERVAL NSEC_PER_SEC - --/* Shift used for peak rate fixed precision calculations. */ -+/* -+ * Shift used for peak-rate fixed precision calculations. -+ * With -+ * - the current shift: 16 positions -+ * - the current type used to store rate: u32 -+ * - the current unit of measure for rate: [sectors/usec], or, more precisely, -+ * [(sectors/usec) / 2^BFQ_RATE_SHIFT] to take into account the shift, -+ * the range of rates that can be stored is -+ * [1 / 2^BFQ_RATE_SHIFT, 2^(32 - BFQ_RATE_SHIFT)] sectors/usec = -+ * [1 / 2^16, 2^16] sectors/usec = [15e-6, 65536] sectors/usec = -+ * [15, 65G] sectors/sec -+ * Which, assuming a sector size of 512B, corresponds to a range of -+ * [7.5K, 33T] B/sec -+ */ - #define BFQ_RATE_SHIFT 16 - - /* -@@ -2881,6 +2894,16 @@ static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq) - BUG_ON(bfqd->peak_rate > 20<peak_rate += rate; -+ -+ /* -+ * For a very slow device, bfqd->peak_rate can reach 0 (see -+ * the minimum representable values reported in the comments -+ * on BFQ_RATE_SHIFT). Push to 1 if this happens, to avoid -+ * divisions by zero where bfqd->peak_rate is used as a -+ * divisor. -+ */ -+ bfqd->peak_rate = max_t(u32, 1, bfqd->peak_rate); -+ - update_thr_responsiveness_params(bfqd); - BUG_ON(bfqd->peak_rate > 20<peak_rate > 20<peak_rate += rate; -+ -+ /* -+ * For a very slow device, bfqd->peak_rate can reach 0 (see -+ * the minimum representable values reported in the comments -+ * on BFQ_RATE_SHIFT). Push to 1 if this happens, to avoid -+ * divisions by zero where bfqd->peak_rate is used as a -+ * divisor. -+ */ -+ bfqd->peak_rate = max_t(u32, 1, bfqd->peak_rate); -+ - update_thr_responsiveness_params(bfqd); - BUG_ON(bfqd->peak_rate > 20< -+ * -+ * Copyright (C) 2008 Fabio Checconi -+ * Paolo Valente -+ * -+ * Copyright (C) 2015 Paolo Valente -+ * -+ * Copyright (C) 2016 Paolo Valente -+ * -+ * Licensed under the GPL-2 as detailed in the accompanying COPYING.BFQ -+ * file. -+ */ -+ -+#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP) -+ -+/* bfqg stats flags */ -+enum bfqg_stats_flags { -+ BFQG_stats_waiting = 0, -+ BFQG_stats_idling, -+ BFQG_stats_empty, -+}; -+ -+#define BFQG_FLAG_FNS(name) \ -+static void bfqg_stats_mark_##name(struct bfqg_stats *stats) \ -+{ \ -+ stats->flags |= (1 << BFQG_stats_##name); \ -+} \ -+static void bfqg_stats_clear_##name(struct bfqg_stats *stats) \ -+{ \ -+ stats->flags &= ~(1 << BFQG_stats_##name); \ -+} \ -+static int bfqg_stats_##name(struct bfqg_stats *stats) \ -+{ \ -+ return (stats->flags & (1 << BFQG_stats_##name)) != 0; \ -+} \ -+ -+BFQG_FLAG_FNS(waiting) -+BFQG_FLAG_FNS(idling) -+BFQG_FLAG_FNS(empty) -+#undef BFQG_FLAG_FNS -+ -+#ifdef BFQ_MQ -+/* This should be called with the scheduler lock held. */ -+#else -+/* This should be called with the queue_lock held. */ -+#endif -+static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats) -+{ -+ u64 now; -+ -+ if (!bfqg_stats_waiting(stats)) -+ return; -+ -+ now = ktime_get_ns(); -+ if (now > stats->start_group_wait_time) -+ blkg_stat_add(&stats->group_wait_time, -+ now - stats->start_group_wait_time); -+ bfqg_stats_clear_waiting(stats); -+} -+ -+#ifdef BFQ_MQ -+/* This should be called with the scheduler lock held. */ -+#else -+/* This should be called with the queue_lock held. */ -+#endif -+static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg, -+ struct bfq_group *curr_bfqg) -+{ -+ struct bfqg_stats *stats = &bfqg->stats; -+ -+ if (bfqg_stats_waiting(stats)) -+ return; -+ if (bfqg == curr_bfqg) -+ return; -+ stats->start_group_wait_time = ktime_get_ns(); -+ bfqg_stats_mark_waiting(stats); -+} -+ -+#ifdef BFQ_MQ -+/* This should be called with the scheduler lock held. */ -+#else -+/* This should be called with the queue_lock held. */ -+#endif -+static void bfqg_stats_end_empty_time(struct bfqg_stats *stats) -+{ -+ u64 now; -+ -+ if (!bfqg_stats_empty(stats)) -+ return; -+ -+ now = ktime_get_ns(); -+ if (now > stats->start_empty_time) -+ blkg_stat_add(&stats->empty_time, -+ now - stats->start_empty_time); -+ bfqg_stats_clear_empty(stats); -+} -+ -+static void bfqg_stats_update_dequeue(struct bfq_group *bfqg) -+{ -+ blkg_stat_add(&bfqg->stats.dequeue, 1); -+} -+ -+static void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) -+{ -+ struct bfqg_stats *stats = &bfqg->stats; -+ -+ if (blkg_rwstat_total(&stats->queued)) -+ return; -+ -+ /* -+ * group is already marked empty. This can happen if bfqq got new -+ * request in parent group and moved to this group while being added -+ * to service tree. Just ignore the event and move on. -+ */ -+ if (bfqg_stats_empty(stats)) -+ return; -+ -+ stats->start_empty_time = ktime_get_ns(); -+ bfqg_stats_mark_empty(stats); -+} -+ -+static void bfqg_stats_update_idle_time(struct bfq_group *bfqg) -+{ -+ struct bfqg_stats *stats = &bfqg->stats; -+ -+ if (bfqg_stats_idling(stats)) { -+ u64 now = ktime_get_ns(); -+ -+ if (now > stats->start_idle_time) -+ blkg_stat_add(&stats->idle_time, -+ now - stats->start_idle_time); -+ bfqg_stats_clear_idling(stats); -+ } -+} -+ -+static void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) -+{ -+ struct bfqg_stats *stats = &bfqg->stats; -+ -+ stats->start_idle_time = ktime_get_ns(); -+ bfqg_stats_mark_idling(stats); -+} -+ -+static void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) -+{ -+ struct bfqg_stats *stats = &bfqg->stats; -+ -+ blkg_stat_add(&stats->avg_queue_size_sum, -+ blkg_rwstat_total(&stats->queued)); -+ blkg_stat_add(&stats->avg_queue_size_samples, 1); -+ bfqg_stats_update_group_wait_time(stats); -+} -+ -+static void bfqg_stats_update_io_add(struct bfq_group *bfqg, -+ struct bfq_queue *bfqq, -+ unsigned int op) -+{ -+ blkg_rwstat_add(&bfqg->stats.queued, op, 1); -+ bfqg_stats_end_empty_time(&bfqg->stats); -+ if (!(bfqq == ((struct bfq_data *)bfqg->bfqd)->in_service_queue)) -+ bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq)); -+} -+ -+static void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) -+{ -+ blkg_rwstat_add(&bfqg->stats.queued, op, -1); -+} -+ -+static void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) -+{ -+ blkg_rwstat_add(&bfqg->stats.merged, op, 1); -+} -+ -+static void bfqg_stats_update_completion(struct bfq_group *bfqg, -+ u64 start_time_ns, -+ u64 io_start_time_ns, -+ unsigned int op) -+{ -+ struct bfqg_stats *stats = &bfqg->stats; -+ u64 now = ktime_get_ns(); -+ -+ if (now > io_start_time_ns) -+ blkg_rwstat_add(&stats->service_time, op, -+ now - io_start_time_ns); -+ if (io_start_time_ns > start_time_ns) -+ blkg_rwstat_add(&stats->wait_time, op, -+ io_start_time_ns - start_time_ns); -+} -+ -+#else /* BFQ_GROUP_IOSCHED_ENABLED && CONFIG_DEBUG_BLK_CGROUP */ -+ -+static inline void bfqg_stats_update_io_add(struct bfq_group *bfqg, -+ struct bfq_queue *bfqq, unsigned int op) { } -+static inline void -+bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) { } -+static inline void -+bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) { } -+static inline void bfqg_stats_update_completion(struct bfq_group *bfqg, -+ u64 start_time_ns, -+ u64 io_start_time_ns, -+ unsigned int op) { } -+static inline void -+bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg, -+ struct bfq_group *curr_bfqg) { } -+static inline void bfqg_stats_end_empty_time(struct bfqg_stats *stats) { } -+static inline void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { } -+static inline void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) { } -+static inline void bfqg_stats_update_idle_time(struct bfq_group *bfqg) { } -+static inline void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { } -+static inline void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) { } -+ -+#endif /* BFQ_GROUP_IOSCHED_ENABLED && CONFIG_DEBUG_BLK_CGROUP */ -+ -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+static struct blkcg_policy blkcg_policy_bfq; -+ -+/* -+ * blk-cgroup policy-related handlers -+ * The following functions help in converting between blk-cgroup -+ * internal structures and BFQ-specific structures. -+ */ -+ -+static struct bfq_group *pd_to_bfqg(struct blkg_policy_data *pd) -+{ -+ return pd ? container_of(pd, struct bfq_group, pd) : NULL; -+} -+ -+static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg) -+{ -+ return pd_to_blkg(&bfqg->pd); -+} -+ -+static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg) -+{ -+ struct blkg_policy_data *pd = blkg_to_pd(blkg, &blkcg_policy_bfq); -+ -+ return pd_to_bfqg(pd); -+} -+ -+/* -+ * bfq_group handlers -+ * The following functions help in navigating the bfq_group hierarchy -+ * by allowing to find the parent of a bfq_group or the bfq_group -+ * associated to a bfq_queue. -+ */ -+ -+static struct bfq_group *bfqg_parent(struct bfq_group *bfqg) -+{ -+ struct blkcg_gq *pblkg = bfqg_to_blkg(bfqg)->parent; -+ -+ return pblkg ? blkg_to_bfqg(pblkg) : NULL; -+} -+ -+static struct bfq_group *bfqq_group(struct bfq_queue *bfqq) -+{ -+ struct bfq_entity *group_entity = bfqq->entity.parent; -+ -+ return group_entity ? container_of(group_entity, struct bfq_group, -+ entity) : -+ bfqq->bfqd->root_group; -+} -+ -+/* -+ * The following two functions handle get and put of a bfq_group by -+ * wrapping the related blk-cgroup hooks. -+ */ -+ -+static void bfqg_get(struct bfq_group *bfqg) -+{ -+#ifdef BFQ_MQ -+ bfqg->ref++; -+#else -+ blkg_get(bfqg_to_blkg(bfqg)); -+#endif -+} -+ -+static void bfqg_put(struct bfq_group *bfqg) -+{ -+#ifdef BFQ_MQ -+ bfqg->ref--; -+ -+ BUG_ON(bfqg->ref < 0); -+ if (bfqg->ref == 0) -+ kfree(bfqg); -+#else -+ blkg_put(bfqg_to_blkg(bfqg)); -+#endif -+} -+ -+#ifdef BFQ_MQ -+static void bfqg_and_blkg_get(struct bfq_group *bfqg) -+{ -+ /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */ -+ bfqg_get(bfqg); -+ -+ blkg_get(bfqg_to_blkg(bfqg)); -+} -+ -+static void bfqg_and_blkg_put(struct bfq_group *bfqg) -+{ -+ blkg_put(bfqg_to_blkg(bfqg)); -+ -+ bfqg_put(bfqg); -+} -+#endif -+ -+/* @stats = 0 */ -+static void bfqg_stats_reset(struct bfqg_stats *stats) -+{ -+#ifdef CONFIG_DEBUG_BLK_CGROUP -+ /* queued stats shouldn't be cleared */ -+ blkg_rwstat_reset(&stats->merged); -+ blkg_rwstat_reset(&stats->service_time); -+ blkg_rwstat_reset(&stats->wait_time); -+ blkg_stat_reset(&stats->time); -+ blkg_stat_reset(&stats->avg_queue_size_sum); -+ blkg_stat_reset(&stats->avg_queue_size_samples); -+ blkg_stat_reset(&stats->dequeue); -+ blkg_stat_reset(&stats->group_wait_time); -+ blkg_stat_reset(&stats->idle_time); -+ blkg_stat_reset(&stats->empty_time); -+#endif -+} -+ -+/* @to += @from */ -+static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from) -+{ -+ if (!to || !from) -+ return; -+ -+#ifdef CONFIG_DEBUG_BLK_CGROUP -+ /* queued stats shouldn't be cleared */ -+ blkg_rwstat_add_aux(&to->merged, &from->merged); -+ blkg_rwstat_add_aux(&to->service_time, &from->service_time); -+ blkg_rwstat_add_aux(&to->wait_time, &from->wait_time); -+ blkg_stat_add_aux(&from->time, &from->time); -+ blkg_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum); -+ blkg_stat_add_aux(&to->avg_queue_size_samples, -+ &from->avg_queue_size_samples); -+ blkg_stat_add_aux(&to->dequeue, &from->dequeue); -+ blkg_stat_add_aux(&to->group_wait_time, &from->group_wait_time); -+ blkg_stat_add_aux(&to->idle_time, &from->idle_time); -+ blkg_stat_add_aux(&to->empty_time, &from->empty_time); -+#endif -+} -+ -+/* -+ * Transfer @bfqg's stats to its parent's dead_stats so that the ancestors' -+ * recursive stats can still account for the amount used by this bfqg after -+ * it's gone. -+ */ -+static void bfqg_stats_xfer_dead(struct bfq_group *bfqg) -+{ -+ struct bfq_group *parent; -+ -+ if (!bfqg) /* root_group */ -+ return; -+ -+ parent = bfqg_parent(bfqg); -+ -+ lockdep_assert_held(bfqg_to_blkg(bfqg)->q->queue_lock); -+ -+ if (unlikely(!parent)) -+ return; -+ -+ bfqg_stats_add_aux(&parent->stats, &bfqg->stats); -+ bfqg_stats_reset(&bfqg->stats); -+} -+ -+static void bfq_init_entity(struct bfq_entity *entity, -+ struct bfq_group *bfqg) -+{ -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); -+ -+ entity->weight = entity->new_weight; -+ entity->orig_weight = entity->new_weight; -+ if (bfqq) { -+ bfqq->ioprio = bfqq->new_ioprio; -+ bfqq->ioprio_class = bfqq->new_ioprio_class; -+#ifdef BFQ_MQ -+ /* -+ * Make sure that bfqg and its associated blkg do not -+ * disappear before entity. -+ */ -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "getting bfqg %p and blkg\n", -+ bfqg); -+ -+ bfqg_and_blkg_get(bfqg); -+#else -+ bfqg_get(bfqg); -+#endif -+ } -+ entity->parent = bfqg->my_entity; /* NULL for root group */ -+ entity->sched_data = &bfqg->sched_data; -+} -+ -+static void bfqg_stats_exit(struct bfqg_stats *stats) -+{ -+#ifdef CONFIG_DEBUG_BLK_CGROUP -+ blkg_rwstat_exit(&stats->merged); -+ blkg_rwstat_exit(&stats->service_time); -+ blkg_rwstat_exit(&stats->wait_time); -+ blkg_rwstat_exit(&stats->queued); -+ blkg_stat_exit(&stats->time); -+ blkg_stat_exit(&stats->avg_queue_size_sum); -+ blkg_stat_exit(&stats->avg_queue_size_samples); -+ blkg_stat_exit(&stats->dequeue); -+ blkg_stat_exit(&stats->group_wait_time); -+ blkg_stat_exit(&stats->idle_time); -+ blkg_stat_exit(&stats->empty_time); -+#endif -+} -+ -+static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp) -+{ -+#ifdef CONFIG_DEBUG_BLK_CGROUP -+ if (blkg_rwstat_init(&stats->merged, gfp) || -+ blkg_rwstat_init(&stats->service_time, gfp) || -+ blkg_rwstat_init(&stats->wait_time, gfp) || -+ blkg_rwstat_init(&stats->queued, gfp) || -+ blkg_stat_init(&stats->time, gfp) || -+ blkg_stat_init(&stats->avg_queue_size_sum, gfp) || -+ blkg_stat_init(&stats->avg_queue_size_samples, gfp) || -+ blkg_stat_init(&stats->dequeue, gfp) || -+ blkg_stat_init(&stats->group_wait_time, gfp) || -+ blkg_stat_init(&stats->idle_time, gfp) || -+ blkg_stat_init(&stats->empty_time, gfp)) { -+ bfqg_stats_exit(stats); -+ return -ENOMEM; -+ } -+#endif -+ -+ return 0; -+} -+ -+static struct bfq_group_data *cpd_to_bfqgd(struct blkcg_policy_data *cpd) -+{ -+ return cpd ? container_of(cpd, struct bfq_group_data, pd) : NULL; -+} -+ -+static struct bfq_group_data *blkcg_to_bfqgd(struct blkcg *blkcg) -+{ -+ return cpd_to_bfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_bfq)); -+} -+ -+static struct blkcg_policy_data *bfq_cpd_alloc(gfp_t gfp) -+{ -+ struct bfq_group_data *bgd; -+ -+ bgd = kzalloc(sizeof(*bgd), gfp); -+ if (!bgd) -+ return NULL; -+ return &bgd->pd; -+} -+ -+static void bfq_cpd_init(struct blkcg_policy_data *cpd) -+{ -+ struct bfq_group_data *d = cpd_to_bfqgd(cpd); -+ -+ d->weight = cgroup_subsys_on_dfl(io_cgrp_subsys) ? -+ CGROUP_WEIGHT_DFL : BFQ_WEIGHT_LEGACY_DFL; -+} -+ -+static void bfq_cpd_free(struct blkcg_policy_data *cpd) -+{ -+ kfree(cpd_to_bfqgd(cpd)); -+} -+ -+static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node) -+{ -+ struct bfq_group *bfqg; -+ -+ bfqg = kzalloc_node(sizeof(*bfqg), gfp, node); -+ if (!bfqg) -+ return NULL; -+ -+ if (bfqg_stats_init(&bfqg->stats, gfp)) { -+ kfree(bfqg); -+ return NULL; -+ } -+#ifdef BFQ_MQ -+ /* see comments in bfq_bic_update_cgroup for why refcounting */ -+ bfqg_get(bfqg); -+#endif -+ return &bfqg->pd; -+} -+ -+static void bfq_pd_init(struct blkg_policy_data *pd) -+{ -+ struct blkcg_gq *blkg; -+ struct bfq_group *bfqg; -+ struct bfq_data *bfqd; -+ struct bfq_entity *entity; -+ struct bfq_group_data *d; -+ -+ blkg = pd_to_blkg(pd); -+ BUG_ON(!blkg); -+ bfqg = blkg_to_bfqg(blkg); -+ bfqd = blkg->q->elevator->elevator_data; -+ BUG_ON(bfqg == bfqd->root_group); -+ entity = &bfqg->entity; -+ d = blkcg_to_bfqgd(blkg->blkcg); -+ -+ entity->orig_weight = entity->weight = entity->new_weight = d->weight; -+ entity->my_sched_data = &bfqg->sched_data; -+ bfqg->my_entity = entity; /* -+ * the root_group's will be set to NULL -+ * in bfq_init_queue() -+ */ -+ bfqg->bfqd = bfqd; -+ bfqg->active_entities = 0; -+ bfqg->rq_pos_tree = RB_ROOT; -+} -+ -+static void bfq_pd_free(struct blkg_policy_data *pd) -+{ -+ struct bfq_group *bfqg = pd_to_bfqg(pd); -+ -+ bfqg_stats_exit(&bfqg->stats); -+#ifdef BFQ_MQ -+ bfqg_put(bfqg); -+#else -+ kfree(bfqg); -+#endif -+} -+ -+static void bfq_pd_reset_stats(struct blkg_policy_data *pd) -+{ -+ struct bfq_group *bfqg = pd_to_bfqg(pd); -+ -+ bfqg_stats_reset(&bfqg->stats); -+} -+ -+static void bfq_group_set_parent(struct bfq_group *bfqg, -+ struct bfq_group *parent) -+{ -+ struct bfq_entity *entity; -+ -+ BUG_ON(!parent); -+ BUG_ON(!bfqg); -+ BUG_ON(bfqg == parent); -+ -+ entity = &bfqg->entity; -+ entity->parent = parent->my_entity; -+ entity->sched_data = &parent->sched_data; -+} -+ -+static struct bfq_group *bfq_lookup_bfqg(struct bfq_data *bfqd, -+ struct blkcg *blkcg) -+{ -+ struct blkcg_gq *blkg; -+ -+ blkg = blkg_lookup(blkcg, bfqd->queue); -+ if (likely(blkg)) -+ return blkg_to_bfqg(blkg); -+ return NULL; -+} -+ -+static struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, -+ struct blkcg *blkcg) -+{ -+ struct bfq_group *bfqg, *parent; -+ struct bfq_entity *entity; -+ -+ bfqg = bfq_lookup_bfqg(bfqd, blkcg); -+ -+ if (unlikely(!bfqg)) -+ return NULL; -+ -+ /* -+ * Update chain of bfq_groups as we might be handling a leaf group -+ * which, along with some of its relatives, has not been hooked yet -+ * to the private hierarchy of BFQ. -+ */ -+ entity = &bfqg->entity; -+ for_each_entity(entity) { -+ bfqg = container_of(entity, struct bfq_group, entity); -+ BUG_ON(!bfqg); -+ if (bfqg != bfqd->root_group) { -+ parent = bfqg_parent(bfqg); -+ if (!parent) -+ parent = bfqd->root_group; -+ BUG_ON(!parent); -+ bfq_group_set_parent(bfqg, parent); -+ } -+ } -+ -+ return bfqg; -+} -+ -+static void bfq_pos_tree_add_move(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq); -+ -+static void bfq_bfqq_expire(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq, -+ bool compensate, -+ enum bfqq_expiration reason); -+ -+/** -+ * bfq_bfqq_move - migrate @bfqq to @bfqg. -+ * @bfqd: queue descriptor. -+ * @bfqq: the queue to move. -+ * @bfqg: the group to move to. -+ * -+ * Move @bfqq to @bfqg, deactivating it from its old group and reactivating -+ * it on the new one. Avoid putting the entity on the old group idle tree. -+ * -+#ifdef BFQ_MQ -+ * Must be called under the scheduler lock, to make sure that the blkg -+ * owning @bfqg does not disappear (see comments in -+ * bfq_bic_update_cgroup on guaranteeing the consistency of blkg -+ * objects). -+#else -+ * Must be called under the queue lock; the cgroup owning @bfqg must -+ * not disappear (by now this just means that we are called under -+ * rcu_read_lock()). -+#endif -+ */ -+static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, -+ struct bfq_group *bfqg) -+{ -+ struct bfq_entity *entity = &bfqq->entity; -+ -+ BUG_ON(!bfq_bfqq_busy(bfqq) && !RB_EMPTY_ROOT(&bfqq->sort_list)); -+ BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list) && !entity->on_st); -+ BUG_ON(bfq_bfqq_busy(bfqq) && RB_EMPTY_ROOT(&bfqq->sort_list) -+ && entity->on_st && -+ bfqq != bfqd->in_service_queue); -+ BUG_ON(!bfq_bfqq_busy(bfqq) && bfqq == bfqd->in_service_queue); -+ -+ /* If bfqq is empty, then bfq_bfqq_expire also invokes -+ * bfq_del_bfqq_busy, thereby removing bfqq and its entity -+ * from data structures related to current group. Otherwise we -+ * need to remove bfqq explicitly with bfq_deactivate_bfqq, as -+ * we do below. -+ */ -+ if (bfqq == bfqd->in_service_queue) -+ bfq_bfqq_expire(bfqd, bfqd->in_service_queue, -+ false, BFQ_BFQQ_PREEMPTED); -+ -+ BUG_ON(entity->on_st && !bfq_bfqq_busy(bfqq) -+ && &bfq_entity_service_tree(entity)->idle != -+ entity->tree); -+ -+ BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_bfqq_busy(bfqq)); -+ -+ if (bfq_bfqq_busy(bfqq)) -+ bfq_deactivate_bfqq(bfqd, bfqq, false, false); -+ else if (entity->on_st) { -+ BUG_ON(&bfq_entity_service_tree(entity)->idle != -+ entity->tree); -+ bfq_put_idle_entity(bfq_entity_service_tree(entity), entity); -+ } -+#ifdef BFQ_MQ -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "putting blkg and bfqg %p\n", bfqg); -+ -+ bfqg_and_blkg_put(bfqq_group(bfqq)); -+#else -+ bfqg_put(bfqq_group(bfqq)); -+#endif -+ -+ entity->parent = bfqg->my_entity; -+ entity->sched_data = &bfqg->sched_data; -+#ifdef BFQ_MQ -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "getting blkg and bfqg %p\n", bfqg); -+ -+ /* pin down bfqg and its associated blkg */ -+ bfqg_and_blkg_get(bfqg); -+#else -+ bfqg_get(bfqg); -+#endif -+ -+ BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_bfqq_busy(bfqq)); -+ if (bfq_bfqq_busy(bfqq)) { -+ bfq_pos_tree_add_move(bfqd, bfqq); -+ bfq_activate_bfqq(bfqd, bfqq); -+ } -+ -+ if (!bfqd->in_service_queue && !bfqd->rq_in_driver) -+ bfq_schedule_dispatch(bfqd); -+ BUG_ON(entity->on_st && !bfq_bfqq_busy(bfqq) -+ && &bfq_entity_service_tree(entity)->idle != -+ entity->tree); -+} -+ -+/** -+ * __bfq_bic_change_cgroup - move @bic to @cgroup. -+ * @bfqd: the queue descriptor. -+ * @bic: the bic to move. -+ * @blkcg: the blk-cgroup to move to. -+ * -+#ifdef BFQ_MQ -+ * Move bic to blkcg, assuming that bfqd->lock is held; which makes -+ * sure that the reference to cgroup is valid across the call (see -+ * comments in bfq_bic_update_cgroup on this issue) -+#else -+ * Move bic to blkcg, assuming that bfqd->queue is locked; the caller -+ * has to make sure that the reference to cgroup is valid across the call. -+#endif -+ * -+ * NOTE: an alternative approach might have been to store the current -+ * cgroup in bfqq and getting a reference to it, reducing the lookup -+ * time here, at the price of slightly more complex code. -+ */ -+static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd, -+ struct bfq_io_cq *bic, -+ struct blkcg *blkcg) -+{ -+ struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0); -+ struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1); -+ struct bfq_group *bfqg; -+ struct bfq_entity *entity; -+ -+ bfqg = bfq_find_set_group(bfqd, blkcg); -+ -+ if (unlikely(!bfqg)) -+ bfqg = bfqd->root_group; -+ -+ if (async_bfqq) { -+ entity = &async_bfqq->entity; -+ -+ if (entity->sched_data != &bfqg->sched_data) { -+ bic_set_bfqq(bic, NULL, 0); -+ bfq_log_bfqq(bfqd, async_bfqq, -+ "%p %d", -+ async_bfqq, -+ async_bfqq->ref); -+ bfq_put_queue(async_bfqq); -+ } -+ } -+ -+ if (sync_bfqq) { -+ entity = &sync_bfqq->entity; -+ if (entity->sched_data != &bfqg->sched_data) -+ bfq_bfqq_move(bfqd, sync_bfqq, bfqg); -+ } -+ -+ return bfqg; -+} -+ -+static void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) -+{ -+ struct bfq_data *bfqd = bic_to_bfqd(bic); -+ struct bfq_group *bfqg = NULL; -+ uint64_t serial_nr; -+ -+ rcu_read_lock(); -+ serial_nr = bio_blkcg(bio)->css.serial_nr; -+ -+ /* -+ * Check whether blkcg has changed. The condition may trigger -+ * spuriously on a newly created cic but there's no harm. -+ */ -+ if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr)) -+ goto out; -+ -+ bfqg = __bfq_bic_change_cgroup(bfqd, bic, bio_blkcg(bio)); -+#ifdef BFQ_MQ -+ /* -+ * Update blkg_path for bfq_log_* functions. We cache this -+ * path, and update it here, for the following -+ * reasons. Operations on blkg objects in blk-cgroup are -+ * protected with the request_queue lock, and not with the -+ * lock that protects the instances of this scheduler -+ * (bfqd->lock). This exposes BFQ to the following sort of -+ * race. -+ * -+ * The blkg_lookup performed in bfq_get_queue, protected -+ * through rcu, may happen to return the address of a copy of -+ * the original blkg. If this is the case, then the -+ * bfqg_and_blkg_get performed in bfq_get_queue, to pin down -+ * the blkg, is useless: it does not prevent blk-cgroup code -+ * from destroying both the original blkg and all objects -+ * directly or indirectly referred by the copy of the -+ * blkg. -+ * -+ * On the bright side, destroy operations on a blkg invoke, as -+ * a first step, hooks of the scheduler associated with the -+ * blkg. And these hooks are executed with bfqd->lock held for -+ * BFQ. As a consequence, for any blkg associated with the -+ * request queue this instance of the scheduler is attached -+ * to, we are guaranteed that such a blkg is not destroyed, and -+ * that all the pointers it contains are consistent, while we -+ * are holding bfqd->lock. A blkg_lookup performed with -+ * bfqd->lock held then returns a fully consistent blkg, which -+ * remains consistent until this lock is held. -+ * -+ * Thanks to the last fact, and to the fact that: (1) bfqg has -+ * been obtained through a blkg_lookup in the above -+ * assignment, and (2) bfqd->lock is being held, here we can -+ * safely use the policy data for the involved blkg (i.e., the -+ * field bfqg->pd) to get to the blkg associated with bfqg, -+ * and then we can safely use any field of blkg. After we -+ * release bfqd->lock, even just getting blkg through this -+ * bfqg may cause dangling references to be traversed, as -+ * bfqg->pd may not exist any more. -+ * -+ * In view of the above facts, here we cache, in the bfqg, any -+ * blkg data we may need for this bic, and for its associated -+ * bfq_queue. As of now, we need to cache only the path of the -+ * blkg, which is used in the bfq_log_* functions. -+ * -+ * Finally, note that bfqg itself needs to be protected from -+ * destruction on the blkg_free of the original blkg (which -+ * invokes bfq_pd_free). We use an additional private -+ * refcounter for bfqg, to let it disappear only after no -+ * bfq_queue refers to it any longer. -+ */ -+ blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path)); -+#endif -+ bic->blkcg_serial_nr = serial_nr; -+out: -+ rcu_read_unlock(); -+} -+ -+/** -+ * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st. -+ * @st: the service tree being flushed. -+ */ -+static void bfq_flush_idle_tree(struct bfq_service_tree *st) -+{ -+ struct bfq_entity *entity = st->first_idle; -+ -+ for (; entity ; entity = st->first_idle) -+ __bfq_deactivate_entity(entity, false); -+} -+ -+/** -+ * bfq_reparent_leaf_entity - move leaf entity to the root_group. -+ * @bfqd: the device data structure with the root group. -+ * @entity: the entity to move. -+ */ -+static void bfq_reparent_leaf_entity(struct bfq_data *bfqd, -+ struct bfq_entity *entity) -+{ -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); -+ -+ BUG_ON(!bfqq); -+ bfq_bfqq_move(bfqd, bfqq, bfqd->root_group); -+} -+ -+/** -+ * bfq_reparent_active_entities - move to the root group all active -+ * entities. -+ * @bfqd: the device data structure with the root group. -+ * @bfqg: the group to move from. -+ * @st: the service tree with the entities. -+ */ -+static void bfq_reparent_active_entities(struct bfq_data *bfqd, -+ struct bfq_group *bfqg, -+ struct bfq_service_tree *st) -+{ -+ struct rb_root *active = &st->active; -+ struct bfq_entity *entity = NULL; -+ -+ if (!RB_EMPTY_ROOT(&st->active)) -+ entity = bfq_entity_of(rb_first(active)); -+ -+ for (; entity ; entity = bfq_entity_of(rb_first(active))) -+ bfq_reparent_leaf_entity(bfqd, entity); -+ -+ if (bfqg->sched_data.in_service_entity) -+ bfq_reparent_leaf_entity(bfqd, -+ bfqg->sched_data.in_service_entity); -+} -+ -+/** -+ * bfq_pd_offline - deactivate the entity associated with @pd, -+ * and reparent its children entities. -+ * @pd: descriptor of the policy going offline. -+ * -+ * blkio already grabs the queue_lock for us, so no need to use -+ * RCU-based magic -+ */ -+static void bfq_pd_offline(struct blkg_policy_data *pd) -+{ -+ struct bfq_service_tree *st; -+ struct bfq_group *bfqg; -+ struct bfq_data *bfqd; -+ struct bfq_entity *entity; -+#ifdef BFQ_MQ -+ unsigned long flags; -+#endif -+ int i; -+ -+ BUG_ON(!pd); -+ bfqg = pd_to_bfqg(pd); -+ BUG_ON(!bfqg); -+ bfqd = bfqg->bfqd; -+ BUG_ON(bfqd && !bfqd->root_group); -+ -+ entity = bfqg->my_entity; -+ -+#ifdef BFQ_MQ -+ spin_lock_irqsave(&bfqd->lock, flags); -+#endif -+ -+ if (!entity) /* root group */ -+ goto put_async_queues; -+ -+ /* -+ * Empty all service_trees belonging to this group before -+ * deactivating the group itself. -+ */ -+ for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) { -+ BUG_ON(!bfqg->sched_data.service_tree); -+ st = bfqg->sched_data.service_tree + i; -+ /* -+ * The idle tree may still contain bfq_queues belonging -+ * to exited task because they never migrated to a different -+ * cgroup from the one being destroyed now. -+ */ -+ bfq_flush_idle_tree(st); -+ -+ /* -+ * It may happen that some queues are still active -+ * (busy) upon group destruction (if the corresponding -+ * processes have been forced to terminate). We move -+ * all the leaf entities corresponding to these queues -+ * to the root_group. -+ * Also, it may happen that the group has an entity -+ * in service, which is disconnected from the active -+ * tree: it must be moved, too. -+ * There is no need to put the sync queues, as the -+ * scheduler has taken no reference. -+ */ -+ bfq_reparent_active_entities(bfqd, bfqg, st); -+ BUG_ON(!RB_EMPTY_ROOT(&st->active)); -+ BUG_ON(!RB_EMPTY_ROOT(&st->idle)); -+ } -+ BUG_ON(bfqg->sched_data.next_in_service); -+ BUG_ON(bfqg->sched_data.in_service_entity); -+ -+ __bfq_deactivate_entity(entity, false); -+ -+put_async_queues: -+ bfq_put_async_queues(bfqd, bfqg); -+ -+#ifdef BFQ_MQ -+ spin_unlock_irqrestore(&bfqd->lock, flags); -+#endif -+ /* -+ * @blkg is going offline and will be ignored by -+ * blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so -+ * that they don't get lost. If IOs complete after this point, the -+ * stats for them will be lost. Oh well... -+ */ -+ bfqg_stats_xfer_dead(bfqg); -+} -+ -+static void bfq_end_wr_async(struct bfq_data *bfqd) -+{ -+ struct blkcg_gq *blkg; -+ -+ list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) { -+ struct bfq_group *bfqg = blkg_to_bfqg(blkg); -+ BUG_ON(!bfqg); -+ -+ bfq_end_wr_async_queues(bfqd, bfqg); -+ } -+ bfq_end_wr_async_queues(bfqd, bfqd->root_group); -+} -+ -+static int bfq_io_show_weight(struct seq_file *sf, void *v) -+{ -+ struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); -+ struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg); -+ unsigned int val = 0; -+ -+ if (bfqgd) -+ val = bfqgd->weight; -+ -+ seq_printf(sf, "%u\n", val); -+ -+ return 0; -+} -+ -+static int bfq_io_set_weight_legacy(struct cgroup_subsys_state *css, -+ struct cftype *cftype, -+ u64 val) -+{ -+ struct blkcg *blkcg = css_to_blkcg(css); -+ struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg); -+ struct blkcg_gq *blkg; -+ int ret = -ERANGE; -+ -+ if (val < BFQ_MIN_WEIGHT || val > BFQ_MAX_WEIGHT) -+ return ret; -+ -+ ret = 0; -+ spin_lock_irq(&blkcg->lock); -+ bfqgd->weight = (unsigned short)val; -+ hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { -+ struct bfq_group *bfqg = blkg_to_bfqg(blkg); -+ -+ if (!bfqg) -+ continue; -+ /* -+ * Setting the prio_changed flag of the entity -+ * to 1 with new_weight == weight would re-set -+ * the value of the weight to its ioprio mapping. -+ * Set the flag only if necessary. -+ */ -+ if ((unsigned short)val != bfqg->entity.new_weight) { -+ bfqg->entity.new_weight = (unsigned short)val; -+ /* -+ * Make sure that the above new value has been -+ * stored in bfqg->entity.new_weight before -+ * setting the prio_changed flag. In fact, -+ * this flag may be read asynchronously (in -+ * critical sections protected by a different -+ * lock than that held here), and finding this -+ * flag set may cause the execution of the code -+ * for updating parameters whose value may -+ * depend also on bfqg->entity.new_weight (in -+ * __bfq_entity_update_weight_prio). -+ * This barrier makes sure that the new value -+ * of bfqg->entity.new_weight is correctly -+ * seen in that code. -+ */ -+ smp_wmb(); -+ bfqg->entity.prio_changed = 1; -+ } -+ } -+ spin_unlock_irq(&blkcg->lock); -+ -+ return ret; -+} -+ -+static ssize_t bfq_io_set_weight(struct kernfs_open_file *of, -+ char *buf, size_t nbytes, -+ loff_t off) -+{ -+ u64 weight; -+ /* First unsigned long found in the file is used */ -+ int ret = kstrtoull(strim(buf), 0, &weight); -+ -+ if (ret) -+ return ret; -+ -+ ret = bfq_io_set_weight_legacy(of_css(of), NULL, weight); -+ return ret ?: nbytes; -+} -+ -+#ifdef CONFIG_DEBUG_BLK_CGROUP -+static int bfqg_print_stat(struct seq_file *sf, void *v) -+{ -+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat, -+ &blkcg_policy_bfq, seq_cft(sf)->private, false); -+ return 0; -+} -+ -+static int bfqg_print_rwstat(struct seq_file *sf, void *v) -+{ -+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat, -+ &blkcg_policy_bfq, seq_cft(sf)->private, true); -+ return 0; -+} -+ -+static u64 bfqg_prfill_stat_recursive(struct seq_file *sf, -+ struct blkg_policy_data *pd, int off) -+{ -+ u64 sum = blkg_stat_recursive_sum(pd_to_blkg(pd), -+ &blkcg_policy_bfq, off); -+ return __blkg_prfill_u64(sf, pd, sum); -+} -+ -+static u64 bfqg_prfill_rwstat_recursive(struct seq_file *sf, -+ struct blkg_policy_data *pd, int off) -+{ -+ struct blkg_rwstat sum = blkg_rwstat_recursive_sum(pd_to_blkg(pd), -+ &blkcg_policy_bfq, -+ off); -+ return __blkg_prfill_rwstat(sf, pd, &sum); -+} -+ -+static int bfqg_print_stat_recursive(struct seq_file *sf, void *v) -+{ -+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), -+ bfqg_prfill_stat_recursive, &blkcg_policy_bfq, -+ seq_cft(sf)->private, false); -+ return 0; -+} -+ -+static int bfqg_print_rwstat_recursive(struct seq_file *sf, void *v) -+{ -+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), -+ bfqg_prfill_rwstat_recursive, &blkcg_policy_bfq, -+ seq_cft(sf)->private, true); -+ return 0; -+} -+ -+static u64 bfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd, -+ int off) -+{ -+ u64 sum = blkg_rwstat_total(&pd->blkg->stat_bytes); -+ -+ return __blkg_prfill_u64(sf, pd, sum >> 9); -+} -+ -+static int bfqg_print_stat_sectors(struct seq_file *sf, void *v) -+{ -+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), -+ bfqg_prfill_sectors, &blkcg_policy_bfq, 0, false); -+ return 0; -+} -+ -+static u64 bfqg_prfill_sectors_recursive(struct seq_file *sf, -+ struct blkg_policy_data *pd, int off) -+{ -+ struct blkg_rwstat tmp = blkg_rwstat_recursive_sum(pd->blkg, NULL, -+ offsetof(struct blkcg_gq, stat_bytes)); -+ u64 sum = atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) + -+ atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]); -+ -+ return __blkg_prfill_u64(sf, pd, sum >> 9); -+} -+ -+static int bfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v) -+{ -+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), -+ bfqg_prfill_sectors_recursive, &blkcg_policy_bfq, 0, -+ false); -+ return 0; -+} -+ -+ -+static u64 bfqg_prfill_avg_queue_size(struct seq_file *sf, -+ struct blkg_policy_data *pd, int off) -+{ -+ struct bfq_group *bfqg = pd_to_bfqg(pd); -+ u64 samples = blkg_stat_read(&bfqg->stats.avg_queue_size_samples); -+ u64 v = 0; -+ -+ if (samples) { -+ v = blkg_stat_read(&bfqg->stats.avg_queue_size_sum); -+ v = div64_u64(v, samples); -+ } -+ __blkg_prfill_u64(sf, pd, v); -+ return 0; -+} -+ -+/* print avg_queue_size */ -+static int bfqg_print_avg_queue_size(struct seq_file *sf, void *v) -+{ -+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), -+ bfqg_prfill_avg_queue_size, &blkcg_policy_bfq, -+ 0, false); -+ return 0; -+} -+#endif /* CONFIG_DEBUG_BLK_CGROUP */ -+ -+static struct bfq_group * -+bfq_create_group_hierarchy(struct bfq_data *bfqd, int node) -+{ -+ int ret; -+ -+ ret = blkcg_activate_policy(bfqd->queue, &blkcg_policy_bfq); -+ if (ret) -+ return NULL; -+ -+ return blkg_to_bfqg(bfqd->queue->root_blkg); -+} -+ -+#ifdef BFQ_MQ -+#define BFQ_CGROUP_FNAME(param) "bfq-mq."#param -+#else -+#define BFQ_CGROUP_FNAME(param) "bfq-sq."#param -+#endif -+ -+static struct cftype bfq_blkcg_legacy_files[] = { -+ { -+ .name = BFQ_CGROUP_FNAME(weight), -+ .flags = CFTYPE_NOT_ON_ROOT, -+ .seq_show = bfq_io_show_weight, -+ .write_u64 = bfq_io_set_weight_legacy, -+ }, -+ -+ /* statistics, covers only the tasks in the bfqg */ -+ { -+ .name = BFQ_CGROUP_FNAME(io_service_bytes), -+ .private = (unsigned long)&blkcg_policy_bfq, -+ .seq_show = blkg_print_stat_bytes, -+ }, -+ { -+ .name = BFQ_CGROUP_FNAME(io_serviced), -+ .private = (unsigned long)&blkcg_policy_bfq, -+ .seq_show = blkg_print_stat_ios, -+ }, -+#ifdef CONFIG_DEBUG_BLK_CGROUP -+ { -+ .name = BFQ_CGROUP_FNAME(time), -+ .private = offsetof(struct bfq_group, stats.time), -+ .seq_show = bfqg_print_stat, -+ }, -+ { -+ .name = BFQ_CGROUP_FNAME(sectors), -+ .seq_show = bfqg_print_stat_sectors, -+ }, -+ { -+ .name = BFQ_CGROUP_FNAME(io_service_time), -+ .private = offsetof(struct bfq_group, stats.service_time), -+ .seq_show = bfqg_print_rwstat, -+ }, -+ { -+ .name = BFQ_CGROUP_FNAME(io_wait_time), -+ .private = offsetof(struct bfq_group, stats.wait_time), -+ .seq_show = bfqg_print_rwstat, -+ }, -+ { -+ .name = BFQ_CGROUP_FNAME(io_merged), -+ .private = offsetof(struct bfq_group, stats.merged), -+ .seq_show = bfqg_print_rwstat, -+ }, -+ { -+ .name = BFQ_CGROUP_FNAME(io_queued), -+ .private = offsetof(struct bfq_group, stats.queued), -+ .seq_show = bfqg_print_rwstat, -+ }, -+#endif /* CONFIG_DEBUG_BLK_CGROUP */ -+ -+ /* the same statictics which cover the bfqg and its descendants */ -+ { -+ .name = BFQ_CGROUP_FNAME(io_service_bytes_recursive), -+ .private = (unsigned long)&blkcg_policy_bfq, -+ .seq_show = blkg_print_stat_bytes_recursive, -+ }, -+ { -+ .name = BFQ_CGROUP_FNAME(io_serviced_recursive), -+ .private = (unsigned long)&blkcg_policy_bfq, -+ .seq_show = blkg_print_stat_ios_recursive, -+ }, -+#ifdef CONFIG_DEBUG_BLK_CGROUP -+ { -+ .name = BFQ_CGROUP_FNAME(time_recursive), -+ .private = offsetof(struct bfq_group, stats.time), -+ .seq_show = bfqg_print_stat_recursive, -+ }, -+ { -+ .name = BFQ_CGROUP_FNAME(sectors_recursive), -+ .seq_show = bfqg_print_stat_sectors_recursive, -+ }, -+ { -+ .name = BFQ_CGROUP_FNAME(io_service_time_recursive), -+ .private = offsetof(struct bfq_group, stats.service_time), -+ .seq_show = bfqg_print_rwstat_recursive, -+ }, -+ { -+ .name = BFQ_CGROUP_FNAME(io_wait_time_recursive), -+ .private = offsetof(struct bfq_group, stats.wait_time), -+ .seq_show = bfqg_print_rwstat_recursive, -+ }, -+ { -+ .name = BFQ_CGROUP_FNAME(io_merged_recursive), -+ .private = offsetof(struct bfq_group, stats.merged), -+ .seq_show = bfqg_print_rwstat_recursive, -+ }, -+ { -+ .name = BFQ_CGROUP_FNAME(io_queued_recursive), -+ .private = offsetof(struct bfq_group, stats.queued), -+ .seq_show = bfqg_print_rwstat_recursive, -+ }, -+ { -+ .name = BFQ_CGROUP_FNAME(avg_queue_size), -+ .seq_show = bfqg_print_avg_queue_size, -+ }, -+ { -+ .name = BFQ_CGROUP_FNAME(group_wait_time), -+ .private = offsetof(struct bfq_group, stats.group_wait_time), -+ .seq_show = bfqg_print_stat, -+ }, -+ { -+ .name = BFQ_CGROUP_FNAME(idle_time), -+ .private = offsetof(struct bfq_group, stats.idle_time), -+ .seq_show = bfqg_print_stat, -+ }, -+ { -+ .name = BFQ_CGROUP_FNAME(empty_time), -+ .private = offsetof(struct bfq_group, stats.empty_time), -+ .seq_show = bfqg_print_stat, -+ }, -+ { -+ .name = BFQ_CGROUP_FNAME(dequeue), -+ .private = offsetof(struct bfq_group, stats.dequeue), -+ .seq_show = bfqg_print_stat, -+ }, -+#endif /* CONFIG_DEBUG_BLK_CGROUP */ -+ { } /* terminate */ -+}; -+ -+static struct cftype bfq_blkg_files[] = { -+ { -+ .name = BFQ_CGROUP_FNAME(weight), -+ .flags = CFTYPE_NOT_ON_ROOT, -+ .seq_show = bfq_io_show_weight, -+ .write = bfq_io_set_weight, -+ }, -+ {} /* terminate */ -+}; -+ -+#undef BFQ_CGROUP_FNAME -+ -+#else /* BFQ_GROUP_IOSCHED_ENABLED */ -+ -+static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, -+ struct bfq_group *bfqg) {} -+ -+static void bfq_init_entity(struct bfq_entity *entity, -+ struct bfq_group *bfqg) -+{ -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); -+ -+ entity->weight = entity->new_weight; -+ entity->orig_weight = entity->new_weight; -+ if (bfqq) { -+ bfqq->ioprio = bfqq->new_ioprio; -+ bfqq->ioprio_class = bfqq->new_ioprio_class; -+ } -+ entity->sched_data = &bfqg->sched_data; -+} -+ -+static void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) {} -+ -+static void bfq_end_wr_async(struct bfq_data *bfqd) -+{ -+ bfq_end_wr_async_queues(bfqd, bfqd->root_group); -+} -+ -+static struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, -+ struct blkcg *blkcg) -+{ -+ return bfqd->root_group; -+} -+ -+static struct bfq_group *bfqq_group(struct bfq_queue *bfqq) -+{ -+ return bfqq->bfqd->root_group; -+} -+ -+static struct bfq_group * -+bfq_create_group_hierarchy(struct bfq_data *bfqd, int node) -+{ -+ struct bfq_group *bfqg; -+ int i; -+ -+ bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node); -+ if (!bfqg) -+ return NULL; -+ -+ for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) -+ bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT; -+ -+ return bfqg; -+} -+#endif -diff --git a/block/bfq-ioc.c b/block/bfq-ioc.c -new file mode 100644 -index 000000000000..fb7bb8f08b75 ---- /dev/null -+++ b/block/bfq-ioc.c -@@ -0,0 +1,36 @@ -+/* -+ * BFQ: I/O context handling. -+ * -+ * Based on ideas and code from CFQ: -+ * Copyright (C) 2003 Jens Axboe -+ * -+ * Copyright (C) 2008 Fabio Checconi -+ * Paolo Valente -+ * -+ * Copyright (C) 2010 Paolo Valente -+ */ -+ -+/** -+ * icq_to_bic - convert iocontext queue structure to bfq_io_cq. -+ * @icq: the iocontext queue. -+ */ -+static struct bfq_io_cq *icq_to_bic(struct io_cq *icq) -+{ -+ /* bic->icq is the first member, %NULL will convert to %NULL */ -+ return container_of(icq, struct bfq_io_cq, icq); -+} -+ -+/** -+ * bfq_bic_lookup - search into @ioc a bic associated to @bfqd. -+ * @bfqd: the lookup key. -+ * @ioc: the io_context of the process doing I/O. -+ * -+ * Queue lock must be held. -+ */ -+static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd, -+ struct io_context *ioc) -+{ -+ if (ioc) -+ return icq_to_bic(ioc_lookup_icq(ioc, bfqd->queue)); -+ return NULL; -+} -diff --git a/block/bfq-mq-iosched.c b/block/bfq-mq-iosched.c -new file mode 100644 -index 000000000000..47a49d9e6512 ---- /dev/null -+++ b/block/bfq-mq-iosched.c -@@ -0,0 +1,6548 @@ -+/* -+ * Budget Fair Queueing (BFQ) I/O scheduler. -+ * -+ * Based on ideas and code from CFQ: -+ * Copyright (C) 2003 Jens Axboe -+ * -+ * Copyright (C) 2008 Fabio Checconi -+ * Paolo Valente -+ * -+ * Copyright (C) 2015 Paolo Valente -+ * -+ * Copyright (C) 2017 Paolo Valente -+ * -+ * Licensed under the GPL-2 as detailed in the accompanying COPYING.BFQ -+ * file. -+ * -+ * BFQ is a proportional-share I/O scheduler, with some extra -+ * low-latency capabilities. BFQ also supports full hierarchical -+ * scheduling through cgroups. Next paragraphs provide an introduction -+ * on BFQ inner workings. Details on BFQ benefits and usage can be -+ * found in Documentation/block/bfq-iosched.txt. -+ * -+ * BFQ is a proportional-share storage-I/O scheduling algorithm based -+ * on the slice-by-slice service scheme of CFQ. But BFQ assigns -+ * budgets, measured in number of sectors, to processes instead of -+ * time slices. The device is not granted to the in-service process -+ * for a given time slice, but until it has exhausted its assigned -+ * budget. This change from the time to the service domain enables BFQ -+ * to distribute the device throughput among processes as desired, -+ * without any distortion due to throughput fluctuations, or to device -+ * internal queueing. BFQ uses an ad hoc internal scheduler, called -+ * B-WF2Q+, to schedule processes according to their budgets. More -+ * precisely, BFQ schedules queues associated with processes. Thanks to -+ * the accurate policy of B-WF2Q+, BFQ can afford to assign high -+ * budgets to I/O-bound processes issuing sequential requests (to -+ * boost the throughput), and yet guarantee a low latency to -+ * interactive and soft real-time applications. -+ * -+ * In particular, BFQ schedules I/O so as to achieve the latter goal-- -+ * low latency for interactive and soft real-time applications--if the -+ * low_latency parameter is set (default configuration). To this -+ * purpose, BFQ constantly tries to detect whether the I/O requests in -+ * a bfq_queue come from an interactive or a soft real-time -+ * application. For brevity, in these cases, the queue is said to be -+ * interactive or soft real-time. In both cases, BFQ privileges the -+ * service of the queue, over that of non-interactive and -+ * non-soft-real-time queues. This privileging is performed, mainly, -+ * by raising the weight of the queue. So, for brevity, we call just -+ * weight-raising periods the time periods during which a queue is -+ * privileged, because deemed interactive or soft real-time. -+ * -+ * The detection of soft real-time queues/applications is described in -+ * detail in the comments on the function -+ * bfq_bfqq_softrt_next_start. On the other hand, the detection of an -+ * interactive queue works as follows: a queue is deemed interactive -+ * if it is constantly non empty only for a limited time interval, -+ * after which it does become empty. The queue may be deemed -+ * interactive again (for a limited time), if it restarts being -+ * constantly non empty, provided that this happens only after the -+ * queue has remained empty for a given minimum idle time. -+ * -+ * By default, BFQ computes automatically the above maximum time -+ * interval, i.e., the time interval after which a constantly -+ * non-empty queue stops being deemed interactive. Since a queue is -+ * weight-raised while it is deemed interactive, this maximum time -+ * interval happens to coincide with the (maximum) duration of the -+ * weight-raising for interactive queues. -+ * -+ * NOTE: if the main or only goal, with a given device, is to achieve -+ * the maximum-possible throughput at all times, then do switch off -+ * all low-latency heuristics for that device, by setting low_latency -+ * to 0. -+ * -+ * BFQ is described in [1], where also a reference to the initial, -+ * more theoretical paper on BFQ can be found. The interested reader -+ * can find in the latter paper full details on the main algorithm, as -+ * well as formulas of the guarantees and formal proofs of all the -+ * properties. With respect to the version of BFQ presented in these -+ * papers, this implementation adds a few more heuristics, such as the -+ * one that guarantees a low latency to soft real-time applications, -+ * and a hierarchical extension based on H-WF2Q+. -+ * -+ * B-WF2Q+ is based on WF2Q+, that is described in [2], together with -+ * H-WF2Q+, while the augmented tree used to implement B-WF2Q+ with O(log N) -+ * complexity derives from the one introduced with EEVDF in [3]. -+ * -+ * [1] P. Valente, A. Avanzini, "Evolution of the BFQ Storage I/O -+ * Scheduler", Proceedings of the First Workshop on Mobile System -+ * Technologies (MST-2015), May 2015. -+ * http://algogroup.unimore.it/people/paolo/disk_sched/mst-2015.pdf -+ * -+ * http://algogroup.unimo.it/people/paolo/disk_sched/bf1-v1-suite-results.pdf -+ * -+ * [2] Jon C.R. Bennett and H. Zhang, ``Hierarchical Packet Fair Queueing -+ * Algorithms,'' IEEE/ACM Transactions on Networking, 5(5):675-689, -+ * Oct 1997. -+ * -+ * http://www.cs.cmu.edu/~hzhang/papers/TON-97-Oct.ps.gz -+ * -+ * [3] I. Stoica and H. Abdel-Wahab, ``Earliest Eligible Virtual Deadline -+ * First: A Flexible and Accurate Mechanism for Proportional Share -+ * Resource Allocation,'' technical report. -+ * -+ * http://www.cs.berkeley.edu/~istoica/papers/eevdf-tr-95.pdf -+ */ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "blk.h" -+#include "blk-mq.h" -+#include "blk-mq-tag.h" -+#include "blk-mq-sched.h" -+#include "bfq-mq.h" -+#include "blk-wbt.h" -+ -+/* Expiration time of sync (0) and async (1) requests, in ns. */ -+static const u64 bfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 }; -+ -+/* Maximum backwards seek, in KiB. */ -+static const int bfq_back_max = (16 * 1024); -+ -+/* Penalty of a backwards seek, in number of sectors. */ -+static const int bfq_back_penalty = 2; -+ -+/* Idling period duration, in ns. */ -+static u32 bfq_slice_idle = (NSEC_PER_SEC / 125); -+ -+/* Minimum number of assigned budgets for which stats are safe to compute. */ -+static const int bfq_stats_min_budgets = 194; -+ -+/* Default maximum budget values, in sectors and number of requests. */ -+static const int bfq_default_max_budget = (16 * 1024); -+ -+/* -+ * When a sync request is dispatched, the queue that contains that -+ * request, and all the ancestor entities of that queue, are charged -+ * with the number of sectors of the request. In constrast, if the -+ * request is async, then the queue and its ancestor entities are -+ * charged with the number of sectors of the request, multiplied by -+ * the factor below. This throttles the bandwidth for async I/O, -+ * w.r.t. to sync I/O, and it is done to counter the tendency of async -+ * writes to steal I/O throughput to reads. -+ * -+ * The current value of this parameter is the result of a tuning with -+ * several hardware and software configurations. We tried to find the -+ * lowest value for which writes do not cause noticeable problems to -+ * reads. In fact, the lower this parameter, the stabler I/O control, -+ * in the following respect. The lower this parameter is, the less -+ * the bandwidth enjoyed by a group decreases -+ * - when the group does writes, w.r.t. to when it does reads; -+ * - when other groups do reads, w.r.t. to when they do writes. -+ */ -+static const int bfq_async_charge_factor = 3; -+ -+/* Default timeout values, in jiffies, approximating CFQ defaults. */ -+static const int bfq_timeout = (HZ / 8); -+ -+/* -+ * Time limit for merging (see comments in bfq_setup_cooperator). Set -+ * to the slowest value that, in our tests, proved to be effective in -+ * removing false positives, while not causing true positives to miss -+ * queue merging. -+ * -+ * As can be deduced from the low time limit below, queue merging, if -+ * successful, happens at the very beggining of the I/O of the involved -+ * cooperating processes, as a consequence of the arrival of the very -+ * first requests from each cooperator. After that, there is very -+ * little chance to find cooperators. -+ */ -+static const unsigned long bfq_merge_time_limit = HZ/10; -+ -+#define MAX_LENGTH_REASON_NAME 25 -+ -+static const char reason_name[][MAX_LENGTH_REASON_NAME] = {"TOO_IDLE", -+"BUDGET_TIMEOUT", "BUDGET_EXHAUSTED", "NO_MORE_REQUESTS", -+"PREEMPTED"}; -+ -+static struct kmem_cache *bfq_pool; -+ -+/* Below this threshold (in ns), we consider thinktime immediate. */ -+#define BFQ_MIN_TT (2 * NSEC_PER_MSEC) -+ -+/* hw_tag detection: parallel requests threshold and min samples needed. */ -+#define BFQ_HW_QUEUE_THRESHOLD 3 -+#define BFQ_HW_QUEUE_SAMPLES 32 -+ -+#define BFQQ_SEEK_THR (sector_t)(8 * 100) -+#define BFQQ_SECT_THR_NONROT (sector_t)(2 * 32) -+#define BFQ_RQ_SEEKY(bfqd, last_pos, rq) \ -+ (get_sdist(last_pos, rq) > \ -+ BFQQ_SEEK_THR && \ -+ (!blk_queue_nonrot(bfqd->queue) || \ -+ blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT)) -+#define BFQQ_CLOSE_THR (sector_t)(8 * 1024) -+#define BFQQ_SEEKY(bfqq) (hweight32(bfqq->seek_history) > 19) -+ -+/* Min number of samples required to perform peak-rate update */ -+#define BFQ_RATE_MIN_SAMPLES 32 -+/* Min observation time interval required to perform a peak-rate update (ns) */ -+#define BFQ_RATE_MIN_INTERVAL (300*NSEC_PER_MSEC) -+/* Target observation time interval for a peak-rate update (ns) */ -+#define BFQ_RATE_REF_INTERVAL NSEC_PER_SEC -+ -+/* -+ * Shift used for peak-rate fixed precision calculations. -+ * With -+ * - the current shift: 16 positions -+ * - the current type used to store rate: u32 -+ * - the current unit of measure for rate: [sectors/usec], or, more precisely, -+ * [(sectors/usec) / 2^BFQ_RATE_SHIFT] to take into account the shift, -+ * the range of rates that can be stored is -+ * [1 / 2^BFQ_RATE_SHIFT, 2^(32 - BFQ_RATE_SHIFT)] sectors/usec = -+ * [1 / 2^16, 2^16] sectors/usec = [15e-6, 65536] sectors/usec = -+ * [15, 65G] sectors/sec -+ * Which, assuming a sector size of 512B, corresponds to a range of -+ * [7.5K, 33T] B/sec -+ */ -+#define BFQ_RATE_SHIFT 16 -+ -+/* -+ * When configured for computing the duration of the weight-raising -+ * for interactive queues automatically (see the comments at the -+ * beginning of this file), BFQ does it using the following formula: -+ * duration = (ref_rate / r) * ref_wr_duration, -+ * where r is the peak rate of the device, and ref_rate and -+ * ref_wr_duration are two reference parameters. In particular, -+ * ref_rate is the peak rate of the reference storage device (see -+ * below), and ref_wr_duration is about the maximum time needed, with -+ * BFQ and while reading two files in parallel, to load typical large -+ * applications on the reference device (see the comments on -+ * max_service_from_wr below, for more details on how ref_wr_duration -+ * is obtained). In practice, the slower/faster the device at hand -+ * is, the more/less it takes to load applications with respect to the -+ * reference device. Accordingly, the longer/shorter BFQ grants -+ * weight raising to interactive applications. -+ * -+ * BFQ uses two different reference pairs (ref_rate, ref_wr_duration), -+ * depending on whether the device is rotational or non-rotational. -+ * -+ * In the following definitions, ref_rate[0] and ref_wr_duration[0] -+ * are the reference values for a rotational device, whereas -+ * ref_rate[1] and ref_wr_duration[1] are the reference values for a -+ * non-rotational device. The reference rates are not the actual peak -+ * rates of the devices used as a reference, but slightly lower -+ * values. The reason for using slightly lower values is that the -+ * peak-rate estimator tends to yield slightly lower values than the -+ * actual peak rate (it can yield the actual peak rate only if there -+ * is only one process doing I/O, and the process does sequential -+ * I/O). -+ * -+ * The reference peak rates are measured in sectors/usec, left-shifted -+ * by BFQ_RATE_SHIFT. -+ */ -+static int ref_rate[2] = {14000, 33000}; -+/* -+ * To improve readability, a conversion function is used to initialize -+ * the following array, which entails that the array can be -+ * initialized only in a function. -+ */ -+static int ref_wr_duration[2]; -+ -+/* -+ * BFQ uses the above-detailed, time-based weight-raising mechanism to -+ * privilege interactive tasks. This mechanism is vulnerable to the -+ * following false positives: I/O-bound applications that will go on -+ * doing I/O for much longer than the duration of weight -+ * raising. These applications have basically no benefit from being -+ * weight-raised at the beginning of their I/O. On the opposite end, -+ * while being weight-raised, these applications -+ * a) unjustly steal throughput to applications that may actually need -+ * low latency; -+ * b) make BFQ uselessly perform device idling; device idling results -+ * in loss of device throughput with most flash-based storage, and may -+ * increase latencies when used purposelessly. -+ * -+ * BFQ tries to reduce these problems, by adopting the following -+ * countermeasure. To introduce this countermeasure, we need first to -+ * finish explaining how the duration of weight-raising for -+ * interactive tasks is computed. -+ * -+ * For a bfq_queue deemed as interactive, the duration of weight -+ * raising is dynamically adjusted, as a function of the estimated -+ * peak rate of the device, so as to be equal to the time needed to -+ * execute the 'largest' interactive task we benchmarked so far. By -+ * largest task, we mean the task for which each involved process has -+ * to do more I/O than for any of the other tasks we benchmarked. This -+ * reference interactive task is the start-up of LibreOffice Writer, -+ * and in this task each process/bfq_queue needs to have at most ~110K -+ * sectors transferred. -+ * -+ * This last piece of information enables BFQ to reduce the actual -+ * duration of weight-raising for at least one class of I/O-bound -+ * applications: those doing sequential or quasi-sequential I/O. An -+ * example is file copy. In fact, once started, the main I/O-bound -+ * processes of these applications usually consume the above 110K -+ * sectors in much less time than the processes of an application that -+ * is starting, because these I/O-bound processes will greedily devote -+ * almost all their CPU cycles only to their target, -+ * throughput-friendly I/O operations. This is even more true if BFQ -+ * happens to be underestimating the device peak rate, and thus -+ * overestimating the duration of weight raising. But, according to -+ * our measurements, once transferred 110K sectors, these processes -+ * have no right to be weight-raised any longer. -+ * -+ * Basing on the last consideration, BFQ ends weight-raising for a -+ * bfq_queue if the latter happens to have received an amount of -+ * service at least equal to the following constant. The constant is -+ * set to slightly more than 110K, to have a minimum safety margin. -+ * -+ * This early ending of weight-raising reduces the amount of time -+ * during which interactive false positives cause the two problems -+ * described at the beginning of these comments. -+ */ -+static const unsigned long max_service_from_wr = 120000; -+ -+#define BFQ_SERVICE_TREE_INIT ((struct bfq_service_tree) \ -+ { RB_ROOT, RB_ROOT, NULL, NULL, 0, 0 }) -+ -+#define RQ_BIC(rq) icq_to_bic((rq)->elv.priv[0]) -+#define RQ_BFQQ(rq) ((rq)->elv.priv[1]) -+ -+/** -+ * icq_to_bic - convert iocontext queue structure to bfq_io_cq. -+ * @icq: the iocontext queue. -+ */ -+static struct bfq_io_cq *icq_to_bic(struct io_cq *icq) -+{ -+ /* bic->icq is the first member, %NULL will convert to %NULL */ -+ return container_of(icq, struct bfq_io_cq, icq); -+} -+ -+/** -+ * bfq_bic_lookup - search into @ioc a bic associated to @bfqd. -+ * @bfqd: the lookup key. -+ * @ioc: the io_context of the process doing I/O. -+ * @q: the request queue. -+ */ -+static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd, -+ struct io_context *ioc, -+ struct request_queue *q) -+{ -+ if (ioc) { -+ unsigned long flags; -+ struct bfq_io_cq *icq; -+ -+ spin_lock_irqsave(q->queue_lock, flags); -+ icq = icq_to_bic(ioc_lookup_icq(ioc, q)); -+ spin_unlock_irqrestore(q->queue_lock, flags); -+ -+ return icq; -+ } -+ -+ return NULL; -+} -+ -+/* -+ * Scheduler run of queue, if there are requests pending and no one in the -+ * driver that will restart queueing. -+ */ -+static void bfq_schedule_dispatch(struct bfq_data *bfqd) -+{ -+ if (bfqd->queued != 0) { -+ bfq_log(bfqd, ""); -+ blk_mq_run_hw_queues(bfqd->queue, true); -+ } -+} -+ -+#define BFQ_MQ -+#include "bfq-sched.c" -+#include "bfq-cgroup-included.c" -+ -+#define bfq_class_idle(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_IDLE) -+#define bfq_class_rt(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_RT) -+ -+#define bfq_sample_valid(samples) ((samples) > 80) -+ -+/* -+ * Lifted from AS - choose which of rq1 and rq2 that is best served now. -+ * We choose the request that is closesr to the head right now. Distance -+ * behind the head is penalized and only allowed to a certain extent. -+ */ -+static struct request *bfq_choose_req(struct bfq_data *bfqd, -+ struct request *rq1, -+ struct request *rq2, -+ sector_t last) -+{ -+ sector_t s1, s2, d1 = 0, d2 = 0; -+ unsigned long back_max; -+#define BFQ_RQ1_WRAP 0x01 /* request 1 wraps */ -+#define BFQ_RQ2_WRAP 0x02 /* request 2 wraps */ -+ unsigned int wrap = 0; /* bit mask: requests behind the disk head? */ -+ -+ if (!rq1 || rq1 == rq2) -+ return rq2; -+ if (!rq2) -+ return rq1; -+ -+ if (rq_is_sync(rq1) && !rq_is_sync(rq2)) -+ return rq1; -+ else if (rq_is_sync(rq2) && !rq_is_sync(rq1)) -+ return rq2; -+ if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META)) -+ return rq1; -+ else if ((rq2->cmd_flags & REQ_META) && !(rq1->cmd_flags & REQ_META)) -+ return rq2; -+ -+ s1 = blk_rq_pos(rq1); -+ s2 = blk_rq_pos(rq2); -+ -+ /* -+ * By definition, 1KiB is 2 sectors. -+ */ -+ back_max = bfqd->bfq_back_max * 2; -+ -+ /* -+ * Strict one way elevator _except_ in the case where we allow -+ * short backward seeks which are biased as twice the cost of a -+ * similar forward seek. -+ */ -+ if (s1 >= last) -+ d1 = s1 - last; -+ else if (s1 + back_max >= last) -+ d1 = (last - s1) * bfqd->bfq_back_penalty; -+ else -+ wrap |= BFQ_RQ1_WRAP; -+ -+ if (s2 >= last) -+ d2 = s2 - last; -+ else if (s2 + back_max >= last) -+ d2 = (last - s2) * bfqd->bfq_back_penalty; -+ else -+ wrap |= BFQ_RQ2_WRAP; -+ -+ /* Found required data */ -+ -+ /* -+ * By doing switch() on the bit mask "wrap" we avoid having to -+ * check two variables for all permutations: --> faster! -+ */ -+ switch (wrap) { -+ case 0: /* common case for CFQ: rq1 and rq2 not wrapped */ -+ if (d1 < d2) -+ return rq1; -+ else if (d2 < d1) -+ return rq2; -+ -+ if (s1 >= s2) -+ return rq1; -+ else -+ return rq2; -+ -+ case BFQ_RQ2_WRAP: -+ return rq1; -+ case BFQ_RQ1_WRAP: -+ return rq2; -+ case (BFQ_RQ1_WRAP|BFQ_RQ2_WRAP): /* both rqs wrapped */ -+ default: -+ /* -+ * Since both rqs are wrapped, -+ * start with the one that's further behind head -+ * (--> only *one* back seek required), -+ * since back seek takes more time than forward. -+ */ -+ if (s1 <= s2) -+ return rq1; -+ else -+ return rq2; -+ } -+} -+ -+/* -+ * Async I/O can easily starve sync I/O (both sync reads and sync -+ * writes), by consuming all tags. Similarly, storms of sync writes, -+ * such as those that sync(2) may trigger, can starve sync reads. -+ * Limit depths of async I/O and sync writes so as to counter both -+ * problems. -+ */ -+static void bfq_limit_depth(unsigned int op, struct blk_mq_alloc_data *data) -+{ -+ struct bfq_data *bfqd = data->q->elevator->elevator_data; -+ -+ if (op_is_sync(op) && !op_is_write(op)) -+ return; -+ -+ data->shallow_depth = -+ bfqd->word_depths[!!bfqd->wr_busy_queues][op_is_sync(op)]; -+ -+ bfq_log(bfqd, "wr_busy %d sync %d depth %u", -+ bfqd->wr_busy_queues, op_is_sync(op), -+ data->shallow_depth); -+} -+ -+static struct bfq_queue * -+bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root, -+ sector_t sector, struct rb_node **ret_parent, -+ struct rb_node ***rb_link) -+{ -+ struct rb_node **p, *parent; -+ struct bfq_queue *bfqq = NULL; -+ -+ parent = NULL; -+ p = &root->rb_node; -+ while (*p) { -+ struct rb_node **n; -+ -+ parent = *p; -+ bfqq = rb_entry(parent, struct bfq_queue, pos_node); -+ -+ /* -+ * Sort strictly based on sector. Smallest to the left, -+ * largest to the right. -+ */ -+ if (sector > blk_rq_pos(bfqq->next_rq)) -+ n = &(*p)->rb_right; -+ else if (sector < blk_rq_pos(bfqq->next_rq)) -+ n = &(*p)->rb_left; -+ else -+ break; -+ p = n; -+ bfqq = NULL; -+ } -+ -+ *ret_parent = parent; -+ if (rb_link) -+ *rb_link = p; -+ -+ bfq_log(bfqd, "%llu: returning %d", -+ (unsigned long long) sector, -+ bfqq ? bfqq->pid : 0); -+ -+ return bfqq; -+} -+ -+static bool bfq_too_late_for_merging(struct bfq_queue *bfqq) -+{ -+ return bfqq->service_from_backlogged > 0 && -+ time_is_before_jiffies(bfqq->first_IO_time + -+ bfq_merge_time_limit); -+} -+ -+static void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq) -+{ -+ struct rb_node **p, *parent; -+ struct bfq_queue *__bfqq; -+ -+ if (bfqq->pos_root) { -+ rb_erase(&bfqq->pos_node, bfqq->pos_root); -+ bfqq->pos_root = NULL; -+ } -+ -+ /* -+ * bfqq cannot be merged any longer (see comments in -+ * bfq_setup_cooperator): no point in adding bfqq into the -+ * position tree. -+ */ -+ if (bfq_too_late_for_merging(bfqq)) -+ return; -+ -+ if (bfq_class_idle(bfqq)) -+ return; -+ if (!bfqq->next_rq) -+ return; -+ -+ bfqq->pos_root = &bfq_bfqq_to_bfqg(bfqq)->rq_pos_tree; -+ __bfqq = bfq_rq_pos_tree_lookup(bfqd, bfqq->pos_root, -+ blk_rq_pos(bfqq->next_rq), &parent, &p); -+ if (!__bfqq) { -+ rb_link_node(&bfqq->pos_node, parent, p); -+ rb_insert_color(&bfqq->pos_node, bfqq->pos_root); -+ } else -+ bfqq->pos_root = NULL; -+} -+ -+/* -+ * The following function returns true if every queue must receive the -+ * same share of the throughput (this condition is used when deciding -+ * whether idling may be disabled, see the comments in the function -+ * bfq_better_to_idle()). -+ * -+ * Such a scenario occurs when: -+ * 1) all active queues have the same weight, -+ * 2) all active queues belong to the same I/O-priority class, -+ * 3) all active groups at the same level in the groups tree have the same -+ * weight, -+ * 4) all active groups at the same level in the groups tree have the same -+ * number of children. -+ * -+ * Unfortunately, keeping the necessary state for evaluating exactly -+ * the last two symmetry sub-conditions above would be quite complex -+ * and time consuming. Therefore this function evaluates, instead, -+ * only the following stronger three sub-conditions, for which it is -+ * much easier to maintain the needed state: -+ * 1) all active queues have the same weight, -+ * 2) all active queues belong to the same I/O-priority class, -+ * 3) there are no active groups. -+ * In particular, the last condition is always true if hierarchical -+ * support or the cgroups interface are not enabled, thus no state -+ * needs to be maintained in this case. -+ */ -+static bool bfq_symmetric_scenario(struct bfq_data *bfqd) -+{ -+ /* -+ * For queue weights to differ, queue_weights_tree must contain -+ * at least two nodes. -+ */ -+ bool varied_queue_weights = !RB_EMPTY_ROOT(&bfqd->queue_weights_tree) && -+ (bfqd->queue_weights_tree.rb_node->rb_left || -+ bfqd->queue_weights_tree.rb_node->rb_right); -+ -+ bool multiple_classes_busy = -+ (bfqd->busy_queues[0] && bfqd->busy_queues[1]) || -+ (bfqd->busy_queues[0] && bfqd->busy_queues[2]) || -+ (bfqd->busy_queues[1] && bfqd->busy_queues[2]); -+ -+ bfq_log(bfqd, "varied_queue_weights %d mul_classes %d", -+ varied_queue_weights, multiple_classes_busy); -+ -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ bfq_log(bfqd, "num_groups_with_pending_reqs %u", -+ bfqd->num_groups_with_pending_reqs); -+#endif -+ -+ return !(varied_queue_weights || multiple_classes_busy -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ || bfqd->num_groups_with_pending_reqs > 0 -+#endif -+ ); -+} -+ -+/* -+ * If the weight-counter tree passed as input contains no counter for -+ * the weight of the input queue, then add that counter; otherwise just -+ * increment the existing counter. -+ * -+ * Note that weight-counter trees contain few nodes in mostly symmetric -+ * scenarios. For example, if all queues have the same weight, then the -+ * weight-counter tree for the queues may contain at most one node. -+ * This holds even if low_latency is on, because weight-raised queues -+ * are not inserted in the tree. -+ * In most scenarios, the rate at which nodes are created/destroyed -+ * should be low too. -+ */ -+static void bfq_weights_tree_add(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq, -+ struct rb_root *root) -+{ -+ struct bfq_entity *entity = &bfqq->entity; -+ struct rb_node **new = &(root->rb_node), *parent = NULL; -+ -+ /* -+ * Do not insert if the queue is already associated with a -+ * counter, which happens if: -+ * 1) a request arrival has caused the queue to become both -+ * non-weight-raised, and hence change its weight, and -+ * backlogged; in this respect, each of the two events -+ * causes an invocation of this function, -+ * 2) this is the invocation of this function caused by the -+ * second event. This second invocation is actually useless, -+ * and we handle this fact by exiting immediately. More -+ * efficient or clearer solutions might possibly be adopted. -+ */ -+ if (bfqq->weight_counter) -+ return; -+ -+ while (*new) { -+ struct bfq_weight_counter *__counter = container_of(*new, -+ struct bfq_weight_counter, -+ weights_node); -+ parent = *new; -+ -+ if (entity->weight == __counter->weight) { -+ bfqq->weight_counter = __counter; -+ goto inc_counter; -+ } -+ if (entity->weight < __counter->weight) -+ new = &((*new)->rb_left); -+ else -+ new = &((*new)->rb_right); -+ } -+ -+ bfqq->weight_counter = kzalloc(sizeof(struct bfq_weight_counter), -+ GFP_ATOMIC); -+ -+ /* -+ * In the unlucky event of an allocation failure, we just -+ * exit. This will cause the weight of queue to not be -+ * considered in bfq_symmetric_scenario, which, in its turn, -+ * causes the scenario to be deemed wrongly symmetric in case -+ * bfqq's weight would have been the only weight making the -+ * scenario asymmetric. On the bright side, no unbalance will -+ * however occur when bfqq becomes inactive again (the -+ * invocation of this function is triggered by an activation -+ * of queue). In fact, bfq_weights_tree_remove does nothing -+ * if !bfqq->weight_counter. -+ */ -+ if (unlikely(!bfqq->weight_counter)) -+ return; -+ -+ bfqq->weight_counter->weight = entity->weight; -+ rb_link_node(&bfqq->weight_counter->weights_node, parent, new); -+ rb_insert_color(&bfqq->weight_counter->weights_node, root); -+ -+inc_counter: -+ bfqq->weight_counter->num_active++; -+ bfqq->ref++; -+ -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "refs %d weight %d symmetric %d", -+ bfqq->ref, -+ entity->weight, -+ bfq_symmetric_scenario(bfqd)); -+} -+ -+/* -+ * Decrement the weight counter associated with the queue, and, if the -+ * counter reaches 0, remove the counter from the tree. -+ * See the comments to the function bfq_weights_tree_add() for considerations -+ * about overhead. -+ */ -+static void __bfq_weights_tree_remove(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq, -+ struct rb_root *root) -+{ -+ struct bfq_entity *entity = &bfqq->entity; -+ -+ if (!bfqq->weight_counter) -+ return; -+ -+ BUG_ON(RB_EMPTY_ROOT(root)); -+ BUG_ON(bfqq->weight_counter->weight != entity->weight); -+ -+ BUG_ON(!bfqq->weight_counter->num_active); -+ bfqq->weight_counter->num_active--; -+ -+ if (bfqq->weight_counter->num_active > 0) -+ goto reset_entity_pointer; -+ -+ rb_erase(&bfqq->weight_counter->weights_node, root); -+ kfree(bfqq->weight_counter); -+ -+reset_entity_pointer: -+ bfqq->weight_counter = NULL; -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "refs %d weight %d symmetric %d", -+ bfqq->ref, -+ entity->weight, -+ bfq_symmetric_scenario(bfqd)); -+ bfq_put_queue(bfqq); -+} -+ -+/* -+ * Invoke __bfq_weights_tree_remove on bfqq and decrement the number -+ * of active groups for each queue's inactive parent entity. -+ */ -+static void bfq_weights_tree_remove(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq) -+{ -+ struct bfq_entity *entity = bfqq->entity.parent; -+ -+ for_each_entity(entity) { -+ struct bfq_sched_data *sd = entity->my_sched_data; -+ -+ BUG_ON(entity->sched_data == NULL); /* -+ * It would mean -+ * that this is -+ * the root group. -+ */ -+ -+ if (sd->next_in_service || sd->in_service_entity) { -+ BUG_ON(!entity->in_groups_with_pending_reqs); -+ /* -+ * entity is still active, because either -+ * next_in_service or in_service_entity is not -+ * NULL (see the comments on the definition of -+ * next_in_service for details on why -+ * in_service_entity must be checked too). -+ * -+ * As a consequence, its parent entities are -+ * active as well, and thus this loop must -+ * stop here. -+ */ -+ break; -+ } -+ -+ BUG_ON(!bfqd->num_groups_with_pending_reqs && -+ entity->in_groups_with_pending_reqs); -+ /* -+ * The decrement of num_groups_with_pending_reqs is -+ * not performed immediately upon the deactivation of -+ * entity, but it is delayed to when it also happens -+ * that the first leaf descendant bfqq of entity gets -+ * all its pending requests completed. The following -+ * instructions perform this delayed decrement, if -+ * needed. See the comments on -+ * num_groups_with_pending_reqs for details. -+ */ -+ if (entity->in_groups_with_pending_reqs) { -+ entity->in_groups_with_pending_reqs = false; -+ bfqd->num_groups_with_pending_reqs--; -+ } -+ bfq_log_bfqq(bfqd, bfqq, "num_groups_with_pending_reqs %u", -+ bfqd->num_groups_with_pending_reqs); -+ } -+ -+ /* -+ * Next function is invoked last, because it causes bfqq to be -+ * freed if the following holds: bfqq is not in service and -+ * has no dispatched request. DO NOT use bfqq after the next -+ * function invocation. -+ */ -+ __bfq_weights_tree_remove(bfqd, bfqq, -+ &bfqd->queue_weights_tree); -+} -+ -+/* -+ * Return expired entry, or NULL to just start from scratch in rbtree. -+ */ -+static struct request *bfq_check_fifo(struct bfq_queue *bfqq, -+ struct request *last) -+{ -+ struct request *rq; -+ -+ if (bfq_bfqq_fifo_expire(bfqq)) -+ return NULL; -+ -+ bfq_mark_bfqq_fifo_expire(bfqq); -+ -+ rq = rq_entry_fifo(bfqq->fifo.next); -+ -+ if (rq == last || ktime_get_ns() < rq->fifo_time) -+ return NULL; -+ -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "returned %p", rq); -+ BUG_ON(RB_EMPTY_NODE(&rq->rb_node)); -+ return rq; -+} -+ -+static struct request *bfq_find_next_rq(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq, -+ struct request *last) -+{ -+ struct rb_node *rbnext = rb_next(&last->rb_node); -+ struct rb_node *rbprev = rb_prev(&last->rb_node); -+ struct request *next, *prev = NULL; -+ -+ BUG_ON(list_empty(&bfqq->fifo)); -+ -+ /* Follow expired path, else get first next available. */ -+ next = bfq_check_fifo(bfqq, last); -+ if (next) { -+ BUG_ON(next == last); -+ return next; -+ } -+ -+ BUG_ON(RB_EMPTY_NODE(&last->rb_node)); -+ -+ if (rbprev) -+ prev = rb_entry_rq(rbprev); -+ -+ if (rbnext) -+ next = rb_entry_rq(rbnext); -+ else { -+ rbnext = rb_first(&bfqq->sort_list); -+ if (rbnext && rbnext != &last->rb_node) -+ next = rb_entry_rq(rbnext); -+ } -+ -+ return bfq_choose_req(bfqd, next, prev, blk_rq_pos(last)); -+} -+ -+/* see the definition of bfq_async_charge_factor for details */ -+static unsigned long bfq_serv_to_charge(struct request *rq, -+ struct bfq_queue *bfqq) -+{ -+ if (bfq_bfqq_sync(bfqq) || bfqq->wr_coeff > 1 || -+ !bfq_symmetric_scenario(bfqq->bfqd)) -+ return blk_rq_sectors(rq); -+ -+ return blk_rq_sectors(rq) * bfq_async_charge_factor; -+} -+ -+/** -+ * bfq_updated_next_req - update the queue after a new next_rq selection. -+ * @bfqd: the device data the queue belongs to. -+ * @bfqq: the queue to update. -+ * -+ * If the first request of a queue changes we make sure that the queue -+ * has enough budget to serve at least its first request (if the -+ * request has grown). We do this because if the queue has not enough -+ * budget for its first request, it has to go through two dispatch -+ * rounds to actually get it dispatched. -+ */ -+static void bfq_updated_next_req(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq) -+{ -+ struct bfq_entity *entity = &bfqq->entity; -+ struct bfq_service_tree *st = bfq_entity_service_tree(entity); -+ struct request *next_rq = bfqq->next_rq; -+ unsigned long new_budget; -+ -+ if (!next_rq) -+ return; -+ -+ if (bfqq == bfqd->in_service_queue) -+ /* -+ * In order not to break guarantees, budgets cannot be -+ * changed after an entity has been selected. -+ */ -+ return; -+ -+ BUG_ON(entity->tree != &st->active); -+ BUG_ON(entity == entity->sched_data->in_service_entity); -+ -+ new_budget = max_t(unsigned long, -+ max_t(unsigned long, bfqq->max_budget, -+ bfq_serv_to_charge(next_rq, bfqq)), -+ entity->service); -+ if (entity->budget != new_budget) { -+ entity->budget = new_budget; -+ bfq_log_bfqq(bfqd, bfqq, "new budget %lu", -+ new_budget); -+ bfq_requeue_bfqq(bfqd, bfqq, false); -+ } -+} -+ -+static unsigned int bfq_wr_duration(struct bfq_data *bfqd) -+{ -+ u64 dur; -+ -+ if (bfqd->bfq_wr_max_time > 0) -+ return bfqd->bfq_wr_max_time; -+ -+ dur = bfqd->rate_dur_prod; -+ do_div(dur, bfqd->peak_rate); -+ -+ /* -+ * Limit duration between 3 and 25 seconds. The upper limit -+ * has been conservatively set after the following worst case: -+ * on a QEMU/KVM virtual machine -+ * - running in a slow PC -+ * - with a virtual disk stacked on a slow low-end 5400rpm HDD -+ * - serving a heavy I/O workload, such as the sequential reading -+ * of several files -+ * mplayer took 23 seconds to start, if constantly weight-raised. -+ * -+ * As for higher values than that accomodating the above bad -+ * scenario, tests show that higher values would often yield -+ * the opposite of the desired result, i.e., would worsen -+ * responsiveness by allowing non-interactive applications to -+ * preserve weight raising for too long. -+ * -+ * On the other end, lower values than 3 seconds make it -+ * difficult for most interactive tasks to complete their jobs -+ * before weight-raising finishes. -+ */ -+ return clamp_val(dur, msecs_to_jiffies(3000), msecs_to_jiffies(25000)); -+} -+ -+/* switch back from soft real-time to interactive weight raising */ -+static void switch_back_to_interactive_wr(struct bfq_queue *bfqq, -+ struct bfq_data *bfqd) -+{ -+ bfqq->wr_coeff = bfqd->bfq_wr_coeff; -+ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); -+ bfqq->last_wr_start_finish = bfqq->wr_start_at_switch_to_srt; -+} -+ -+static void -+bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd, -+ struct bfq_io_cq *bic, bool bfq_already_existing) -+{ -+ unsigned int old_wr_coeff; -+ bool busy = bfq_already_existing && bfq_bfqq_busy(bfqq); -+ -+ if (bic->saved_has_short_ttime) -+ bfq_mark_bfqq_has_short_ttime(bfqq); -+ else -+ bfq_clear_bfqq_has_short_ttime(bfqq); -+ -+ if (bic->saved_IO_bound) -+ bfq_mark_bfqq_IO_bound(bfqq); -+ else -+ bfq_clear_bfqq_IO_bound(bfqq); -+ -+ if (unlikely(busy)) -+ old_wr_coeff = bfqq->wr_coeff; -+ -+ bfqq->ttime = bic->saved_ttime; -+ bfqq->wr_coeff = bic->saved_wr_coeff; -+ bfqq->wr_start_at_switch_to_srt = bic->saved_wr_start_at_switch_to_srt; -+ BUG_ON(time_is_after_jiffies(bfqq->wr_start_at_switch_to_srt)); -+ bfqq->last_wr_start_finish = bic->saved_last_wr_start_finish; -+ bfqq->wr_cur_max_time = bic->saved_wr_cur_max_time; -+ BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish)); -+ -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "bic %p wr_coeff %d start_finish %lu max_time %lu", -+ bic, bfqq->wr_coeff, bfqq->last_wr_start_finish, -+ bfqq->wr_cur_max_time); -+ -+ if (bfqq->wr_coeff > 1 && (bfq_bfqq_in_large_burst(bfqq) || -+ time_is_before_jiffies(bfqq->last_wr_start_finish + -+ bfqq->wr_cur_max_time))) { -+ if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time && -+ !bfq_bfqq_in_large_burst(bfqq) && -+ time_is_after_eq_jiffies(bfqq->wr_start_at_switch_to_srt + -+ bfq_wr_duration(bfqd))) { -+ switch_back_to_interactive_wr(bfqq, bfqd); -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "switching back to interactive"); -+ } else { -+ bfqq->wr_coeff = 1; -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "switching off wr (%lu + %lu < %lu)", -+ bfqq->last_wr_start_finish, bfqq->wr_cur_max_time, -+ jiffies); -+ } -+ } -+ -+ /* make sure weight will be updated, however we got here */ -+ bfqq->entity.prio_changed = 1; -+ -+ if (likely(!busy)) -+ return; -+ -+ if (old_wr_coeff == 1 && bfqq->wr_coeff > 1) { -+ bfqd->wr_busy_queues++; -+ BUG_ON(bfqd->wr_busy_queues > bfq_tot_busy_queues(bfqd)); -+ } else if (old_wr_coeff > 1 && bfqq->wr_coeff == 1) { -+ bfqd->wr_busy_queues--; -+ BUG_ON(bfqd->wr_busy_queues < 0); -+ } -+} -+ -+static int bfqq_process_refs(struct bfq_queue *bfqq) -+{ -+ int process_refs, io_refs; -+ -+ lockdep_assert_held(&bfqq->bfqd->lock); -+ -+ io_refs = bfqq->allocated; -+ process_refs = bfqq->ref - io_refs - bfqq->entity.on_st - -+ (bfqq->weight_counter != NULL); -+ BUG_ON(process_refs < 0); -+ return process_refs; -+} -+ -+/* Empty burst list and add just bfqq (see comments to bfq_handle_burst) */ -+static void bfq_reset_burst_list(struct bfq_data *bfqd, struct bfq_queue *bfqq) -+{ -+ struct bfq_queue *item; -+ struct hlist_node *n; -+ -+ hlist_for_each_entry_safe(item, n, &bfqd->burst_list, burst_list_node) -+ hlist_del_init(&item->burst_list_node); -+ hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list); -+ bfqd->burst_size = 1; -+ bfqd->burst_parent_entity = bfqq->entity.parent; -+} -+ -+/* Add bfqq to the list of queues in current burst (see bfq_handle_burst) */ -+static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq) -+{ -+ /* Increment burst size to take into account also bfqq */ -+ bfqd->burst_size++; -+ -+ bfq_log_bfqq(bfqd, bfqq, "%d", bfqd->burst_size); -+ -+ BUG_ON(bfqd->burst_size > bfqd->bfq_large_burst_thresh); -+ -+ if (bfqd->burst_size == bfqd->bfq_large_burst_thresh) { -+ struct bfq_queue *pos, *bfqq_item; -+ struct hlist_node *n; -+ -+ /* -+ * Enough queues have been activated shortly after each -+ * other to consider this burst as large. -+ */ -+ bfqd->large_burst = true; -+ bfq_log_bfqq(bfqd, bfqq, "large burst started"); -+ -+ /* -+ * We can now mark all queues in the burst list as -+ * belonging to a large burst. -+ */ -+ hlist_for_each_entry(bfqq_item, &bfqd->burst_list, -+ burst_list_node) { -+ bfq_mark_bfqq_in_large_burst(bfqq_item); -+ bfq_log_bfqq(bfqd, bfqq_item, "marked in large burst"); -+ } -+ bfq_mark_bfqq_in_large_burst(bfqq); -+ bfq_log_bfqq(bfqd, bfqq, "marked in large burst"); -+ -+ /* -+ * From now on, and until the current burst finishes, any -+ * new queue being activated shortly after the last queue -+ * was inserted in the burst can be immediately marked as -+ * belonging to a large burst. So the burst list is not -+ * needed any more. Remove it. -+ */ -+ hlist_for_each_entry_safe(pos, n, &bfqd->burst_list, -+ burst_list_node) -+ hlist_del_init(&pos->burst_list_node); -+ } else /* -+ * Burst not yet large: add bfqq to the burst list. Do -+ * not increment the ref counter for bfqq, because bfqq -+ * is removed from the burst list before freeing bfqq -+ * in put_queue. -+ */ -+ hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list); -+} -+ -+/* -+ * If many queues belonging to the same group happen to be created -+ * shortly after each other, then the processes associated with these -+ * queues have typically a common goal. In particular, bursts of queue -+ * creations are usually caused by services or applications that spawn -+ * many parallel threads/processes. Examples are systemd during boot, -+ * or git grep. To help these processes get their job done as soon as -+ * possible, it is usually better to not grant either weight-raising -+ * or device idling to their queues. -+ * -+ * In this comment we describe, firstly, the reasons why this fact -+ * holds, and, secondly, the next function, which implements the main -+ * steps needed to properly mark these queues so that they can then be -+ * treated in a different way. -+ * -+ * The above services or applications benefit mostly from a high -+ * throughput: the quicker the requests of the activated queues are -+ * cumulatively served, the sooner the target job of these queues gets -+ * completed. As a consequence, weight-raising any of these queues, -+ * which also implies idling the device for it, is almost always -+ * counterproductive. In most cases it just lowers throughput. -+ * -+ * On the other hand, a burst of queue creations may be caused also by -+ * the start of an application that does not consist of a lot of -+ * parallel I/O-bound threads. In fact, with a complex application, -+ * several short processes may need to be executed to start-up the -+ * application. In this respect, to start an application as quickly as -+ * possible, the best thing to do is in any case to privilege the I/O -+ * related to the application with respect to all other -+ * I/O. Therefore, the best strategy to start as quickly as possible -+ * an application that causes a burst of queue creations is to -+ * weight-raise all the queues created during the burst. This is the -+ * exact opposite of the best strategy for the other type of bursts. -+ * -+ * In the end, to take the best action for each of the two cases, the -+ * two types of bursts need to be distinguished. Fortunately, this -+ * seems relatively easy, by looking at the sizes of the bursts. In -+ * particular, we found a threshold such that only bursts with a -+ * larger size than that threshold are apparently caused by -+ * services or commands such as systemd or git grep. For brevity, -+ * hereafter we call just 'large' these bursts. BFQ *does not* -+ * weight-raise queues whose creation occurs in a large burst. In -+ * addition, for each of these queues BFQ performs or does not perform -+ * idling depending on which choice boosts the throughput more. The -+ * exact choice depends on the device and request pattern at -+ * hand. -+ * -+ * Unfortunately, false positives may occur while an interactive task -+ * is starting (e.g., an application is being started). The -+ * consequence is that the queues associated with the task do not -+ * enjoy weight raising as expected. Fortunately these false positives -+ * are very rare. They typically occur if some service happens to -+ * start doing I/O exactly when the interactive task starts. -+ * -+ * Turning back to the next function, it implements all the steps -+ * needed to detect the occurrence of a large burst and to properly -+ * mark all the queues belonging to it (so that they can then be -+ * treated in a different way). This goal is achieved by maintaining a -+ * "burst list" that holds, temporarily, the queues that belong to the -+ * burst in progress. The list is then used to mark these queues as -+ * belonging to a large burst if the burst does become large. The main -+ * steps are the following. -+ * -+ * . when the very first queue is created, the queue is inserted into the -+ * list (as it could be the first queue in a possible burst) -+ * -+ * . if the current burst has not yet become large, and a queue Q that does -+ * not yet belong to the burst is activated shortly after the last time -+ * at which a new queue entered the burst list, then the function appends -+ * Q to the burst list -+ * -+ * . if, as a consequence of the previous step, the burst size reaches -+ * the large-burst threshold, then -+ * -+ * . all the queues in the burst list are marked as belonging to a -+ * large burst -+ * -+ * . the burst list is deleted; in fact, the burst list already served -+ * its purpose (keeping temporarily track of the queues in a burst, -+ * so as to be able to mark them as belonging to a large burst in the -+ * previous sub-step), and now is not needed any more -+ * -+ * . the device enters a large-burst mode -+ * -+ * . if a queue Q that does not belong to the burst is created while -+ * the device is in large-burst mode and shortly after the last time -+ * at which a queue either entered the burst list or was marked as -+ * belonging to the current large burst, then Q is immediately marked -+ * as belonging to a large burst. -+ * -+ * . if a queue Q that does not belong to the burst is created a while -+ * later, i.e., not shortly after, than the last time at which a queue -+ * either entered the burst list or was marked as belonging to the -+ * current large burst, then the current burst is deemed as finished and: -+ * -+ * . the large-burst mode is reset if set -+ * -+ * . the burst list is emptied -+ * -+ * . Q is inserted in the burst list, as Q may be the first queue -+ * in a possible new burst (then the burst list contains just Q -+ * after this step). -+ */ -+static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq) -+{ -+ /* -+ * If bfqq is already in the burst list or is part of a large -+ * burst, or finally has just been split, then there is -+ * nothing else to do. -+ */ -+ if (!hlist_unhashed(&bfqq->burst_list_node) || -+ bfq_bfqq_in_large_burst(bfqq) || -+ time_is_after_eq_jiffies(bfqq->split_time + -+ msecs_to_jiffies(10))) -+ return; -+ -+ /* -+ * If bfqq's creation happens late enough, or bfqq belongs to -+ * a different group than the burst group, then the current -+ * burst is finished, and related data structures must be -+ * reset. -+ * -+ * In this respect, consider the special case where bfqq is -+ * the very first queue created after BFQ is selected for this -+ * device. In this case, last_ins_in_burst and -+ * burst_parent_entity are not yet significant when we get -+ * here. But it is easy to verify that, whether or not the -+ * following condition is true, bfqq will end up being -+ * inserted into the burst list. In particular the list will -+ * happen to contain only bfqq. And this is exactly what has -+ * to happen, as bfqq may be the first queue of the first -+ * burst. -+ */ -+ if (time_is_before_jiffies(bfqd->last_ins_in_burst + -+ bfqd->bfq_burst_interval) || -+ bfqq->entity.parent != bfqd->burst_parent_entity) { -+ bfqd->large_burst = false; -+ bfq_reset_burst_list(bfqd, bfqq); -+ bfq_log_bfqq(bfqd, bfqq, -+ "late activation or different group"); -+ goto end; -+ } -+ -+ /* -+ * If we get here, then bfqq is being activated shortly after the -+ * last queue. So, if the current burst is also large, we can mark -+ * bfqq as belonging to this large burst immediately. -+ */ -+ if (bfqd->large_burst) { -+ bfq_log_bfqq(bfqd, bfqq, "marked in burst"); -+ bfq_mark_bfqq_in_large_burst(bfqq); -+ goto end; -+ } -+ -+ /* -+ * If we get here, then a large-burst state has not yet been -+ * reached, but bfqq is being activated shortly after the last -+ * queue. Then we add bfqq to the burst. -+ */ -+ bfq_add_to_burst(bfqd, bfqq); -+end: -+ /* -+ * At this point, bfqq either has been added to the current -+ * burst or has caused the current burst to terminate and a -+ * possible new burst to start. In particular, in the second -+ * case, bfqq has become the first queue in the possible new -+ * burst. In both cases last_ins_in_burst needs to be moved -+ * forward. -+ */ -+ bfqd->last_ins_in_burst = jiffies; -+ -+} -+ -+static int bfq_bfqq_budget_left(struct bfq_queue *bfqq) -+{ -+ struct bfq_entity *entity = &bfqq->entity; -+ -+ if (entity->budget < entity->service) { -+ pr_crit("budget %d service %d\n", -+ entity->budget, entity->service); -+ BUG(); -+ } -+ return entity->budget - entity->service; -+} -+ -+/* -+ * If enough samples have been computed, return the current max budget -+ * stored in bfqd, which is dynamically updated according to the -+ * estimated disk peak rate; otherwise return the default max budget -+ */ -+static int bfq_max_budget(struct bfq_data *bfqd) -+{ -+ if (bfqd->budgets_assigned < bfq_stats_min_budgets) -+ return bfq_default_max_budget; -+ else -+ return bfqd->bfq_max_budget; -+} -+ -+/* -+ * Return min budget, which is a fraction of the current or default -+ * max budget (trying with 1/32) -+ */ -+static int bfq_min_budget(struct bfq_data *bfqd) -+{ -+ if (bfqd->budgets_assigned < bfq_stats_min_budgets) -+ return bfq_default_max_budget / 32; -+ else -+ return bfqd->bfq_max_budget / 32; -+} -+ -+static void bfq_bfqq_expire(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq, -+ bool compensate, -+ enum bfqq_expiration reason); -+ -+/* -+ * The next function, invoked after the input queue bfqq switches from -+ * idle to busy, updates the budget of bfqq. The function also tells -+ * whether the in-service queue should be expired, by returning -+ * true. The purpose of expiring the in-service queue is to give bfqq -+ * the chance to possibly preempt the in-service queue, and the reason -+ * for preempting the in-service queue is to achieve one of the two -+ * goals below. -+ * -+ * 1. Guarantee to bfqq its reserved bandwidth even if bfqq has -+ * expired because it has remained idle. In particular, bfqq may have -+ * expired for one of the following two reasons: -+ * -+ * - BFQ_BFQQ_NO_MORE_REQUEST bfqq did not enjoy any device idling and -+ * did not make it to issue a new request before its last request -+ * was served; -+ * -+ * - BFQ_BFQQ_TOO_IDLE bfqq did enjoy device idling, but did not issue -+ * a new request before the expiration of the idling-time. -+ * -+ * Even if bfqq has expired for one of the above reasons, the process -+ * associated with the queue may be however issuing requests greedily, -+ * and thus be sensitive to the bandwidth it receives (bfqq may have -+ * remained idle for other reasons: CPU high load, bfqq not enjoying -+ * idling, I/O throttling somewhere in the path from the process to -+ * the I/O scheduler, ...). But if, after every expiration for one of -+ * the above two reasons, bfqq has to wait for the service of at least -+ * one full budget of another queue before being served again, then -+ * bfqq is likely to get a much lower bandwidth or resource time than -+ * its reserved ones. To address this issue, two countermeasures need -+ * to be taken. -+ * -+ * First, the budget and the timestamps of bfqq need to be updated in -+ * a special way on bfqq reactivation: they need to be updated as if -+ * bfqq did not remain idle and did not expire. In fact, if they are -+ * computed as if bfqq expired and remained idle until reactivation, -+ * then the process associated with bfqq is treated as if, instead of -+ * being greedy, it stopped issuing requests when bfqq remained idle, -+ * and restarts issuing requests only on this reactivation. In other -+ * words, the scheduler does not help the process recover the "service -+ * hole" between bfqq expiration and reactivation. As a consequence, -+ * the process receives a lower bandwidth than its reserved one. In -+ * contrast, to recover this hole, the budget must be updated as if -+ * bfqq was not expired at all before this reactivation, i.e., it must -+ * be set to the value of the remaining budget when bfqq was -+ * expired. Along the same line, timestamps need to be assigned the -+ * value they had the last time bfqq was selected for service, i.e., -+ * before last expiration. Thus timestamps need to be back-shifted -+ * with respect to their normal computation (see [1] for more details -+ * on this tricky aspect). -+ * -+ * Secondly, to allow the process to recover the hole, the in-service -+ * queue must be expired too, to give bfqq the chance to preempt it -+ * immediately. In fact, if bfqq has to wait for a full budget of the -+ * in-service queue to be completed, then it may become impossible to -+ * let the process recover the hole, even if the back-shifted -+ * timestamps of bfqq are lower than those of the in-service queue. If -+ * this happens for most or all of the holes, then the process may not -+ * receive its reserved bandwidth. In this respect, it is worth noting -+ * that, being the service of outstanding requests unpreemptible, a -+ * little fraction of the holes may however be unrecoverable, thereby -+ * causing a little loss of bandwidth. -+ * -+ * The last important point is detecting whether bfqq does need this -+ * bandwidth recovery. In this respect, the next function deems the -+ * process associated with bfqq greedy, and thus allows it to recover -+ * the hole, if: 1) the process is waiting for the arrival of a new -+ * request (which implies that bfqq expired for one of the above two -+ * reasons), and 2) such a request has arrived soon. The first -+ * condition is controlled through the flag non_blocking_wait_rq, -+ * while the second through the flag arrived_in_time. If both -+ * conditions hold, then the function computes the budget in the -+ * above-described special way, and signals that the in-service queue -+ * should be expired. Timestamp back-shifting is done later in -+ * __bfq_activate_entity. -+ * -+ * 2. Reduce latency. Even if timestamps are not backshifted to let -+ * the process associated with bfqq recover a service hole, bfqq may -+ * however happen to have, after being (re)activated, a lower finish -+ * timestamp than the in-service queue. That is, the next budget of -+ * bfqq may have to be completed before the one of the in-service -+ * queue. If this is the case, then preempting the in-service queue -+ * allows this goal to be achieved, apart from the unpreemptible, -+ * outstanding requests mentioned above. -+ * -+ * Unfortunately, regardless of which of the above two goals one wants -+ * to achieve, service trees need first to be updated to know whether -+ * the in-service queue must be preempted. To have service trees -+ * correctly updated, the in-service queue must be expired and -+ * rescheduled, and bfqq must be scheduled too. This is one of the -+ * most costly operations (in future versions, the scheduling -+ * mechanism may be re-designed in such a way to make it possible to -+ * know whether preemption is needed without needing to update service -+ * trees). In addition, queue preemptions almost always cause random -+ * I/O, and thus loss of throughput. Because of these facts, the next -+ * function adopts the following simple scheme to avoid both costly -+ * operations and too frequent preemptions: it requests the expiration -+ * of the in-service queue (unconditionally) only for queues that need -+ * to recover a hole, or that either are weight-raised or deserve to -+ * be weight-raised. -+ */ -+static bool bfq_bfqq_update_budg_for_activation(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq, -+ bool arrived_in_time, -+ bool wr_or_deserves_wr) -+{ -+ struct bfq_entity *entity = &bfqq->entity; -+ -+ /* -+ * In the next compound condition, we check also whether there -+ * is some budget left, because otherwise there is no point in -+ * trying to go on serving bfqq with this same budget: bfqq -+ * would be expired immediately after being selected for -+ * service. This would only cause useless overhead. -+ */ -+ if (bfq_bfqq_non_blocking_wait_rq(bfqq) && arrived_in_time && -+ bfq_bfqq_budget_left(bfqq) > 0) { -+ /* -+ * We do not clear the flag non_blocking_wait_rq here, as -+ * the latter is used in bfq_activate_bfqq to signal -+ * that timestamps need to be back-shifted (and is -+ * cleared right after). -+ */ -+ -+ /* -+ * In next assignment we rely on that either -+ * entity->service or entity->budget are not updated -+ * on expiration if bfqq is empty (see -+ * __bfq_bfqq_recalc_budget). Thus both quantities -+ * remain unchanged after such an expiration, and the -+ * following statement therefore assigns to -+ * entity->budget the remaining budget on such an -+ * expiration. -+ */ -+ BUG_ON(bfqq->max_budget < 0); -+ entity->budget = min_t(unsigned long, -+ bfq_bfqq_budget_left(bfqq), -+ bfqq->max_budget); -+ -+ BUG_ON(entity->budget < 0); -+ -+ /* -+ * At this point, we have used entity->service to get -+ * the budget left (needed for updating -+ * entity->budget). Thus we finally can, and have to, -+ * reset entity->service. The latter must be reset -+ * because bfqq would otherwise be charged again for -+ * the service it has received during its previous -+ * service slot(s). -+ */ -+ entity->service = 0; -+ -+ return true; -+ } -+ -+ /* -+ * We can finally complete expiration, by setting service to 0. -+ */ -+ entity->service = 0; -+ BUG_ON(bfqq->max_budget < 0); -+ entity->budget = max_t(unsigned long, bfqq->max_budget, -+ bfq_serv_to_charge(bfqq->next_rq, bfqq)); -+ BUG_ON(entity->budget < 0); -+ -+ bfq_clear_bfqq_non_blocking_wait_rq(bfqq); -+ return wr_or_deserves_wr; -+} -+ -+/* -+ * Return the farthest past time instant according to jiffies -+ * macros. -+ */ -+static unsigned long bfq_smallest_from_now(void) -+{ -+ return jiffies - MAX_JIFFY_OFFSET; -+} -+ -+static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq, -+ unsigned int old_wr_coeff, -+ bool wr_or_deserves_wr, -+ bool interactive, -+ bool in_burst, -+ bool soft_rt) -+{ -+ if (old_wr_coeff == 1 && wr_or_deserves_wr) { -+ /* start a weight-raising period */ -+ if (interactive) { -+ bfqq->service_from_wr = 0; -+ bfqq->wr_coeff = bfqd->bfq_wr_coeff; -+ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); -+ } else { -+ /* -+ * No interactive weight raising in progress -+ * here: assign minus infinity to -+ * wr_start_at_switch_to_srt, to make sure -+ * that, at the end of the soft-real-time -+ * weight raising periods that is starting -+ * now, no interactive weight-raising period -+ * may be wrongly considered as still in -+ * progress (and thus actually started by -+ * mistake). -+ */ -+ bfqq->wr_start_at_switch_to_srt = -+ bfq_smallest_from_now(); -+ bfqq->wr_coeff = bfqd->bfq_wr_coeff * -+ BFQ_SOFTRT_WEIGHT_FACTOR; -+ bfqq->wr_cur_max_time = -+ bfqd->bfq_wr_rt_max_time; -+ } -+ /* -+ * If needed, further reduce budget to make sure it is -+ * close to bfqq's backlog, so as to reduce the -+ * scheduling-error component due to a too large -+ * budget. Do not care about throughput consequences, -+ * but only about latency. Finally, do not assign a -+ * too small budget either, to avoid increasing -+ * latency by causing too frequent expirations. -+ */ -+ bfqq->entity.budget = min_t(unsigned long, -+ bfqq->entity.budget, -+ 2 * bfq_min_budget(bfqd)); -+ -+ bfq_log_bfqq(bfqd, bfqq, -+ "wrais starting at %lu, rais_max_time %u", -+ jiffies, -+ jiffies_to_msecs(bfqq->wr_cur_max_time)); -+ } else if (old_wr_coeff > 1) { -+ if (interactive) { /* update wr coeff and duration */ -+ bfqq->wr_coeff = bfqd->bfq_wr_coeff; -+ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); -+ } else if (in_burst) { -+ bfqq->wr_coeff = 1; -+ bfq_log_bfqq(bfqd, bfqq, -+ "wrais ending at %lu, rais_max_time %u", -+ jiffies, -+ jiffies_to_msecs(bfqq-> -+ wr_cur_max_time)); -+ } else if (soft_rt) { -+ /* -+ * The application is now or still meeting the -+ * requirements for being deemed soft rt. We -+ * can then correctly and safely (re)charge -+ * the weight-raising duration for the -+ * application with the weight-raising -+ * duration for soft rt applications. -+ * -+ * In particular, doing this recharge now, i.e., -+ * before the weight-raising period for the -+ * application finishes, reduces the probability -+ * of the following negative scenario: -+ * 1) the weight of a soft rt application is -+ * raised at startup (as for any newly -+ * created application), -+ * 2) since the application is not interactive, -+ * at a certain time weight-raising is -+ * stopped for the application, -+ * 3) at that time the application happens to -+ * still have pending requests, and hence -+ * is destined to not have a chance to be -+ * deemed soft rt before these requests are -+ * completed (see the comments to the -+ * function bfq_bfqq_softrt_next_start() -+ * for details on soft rt detection), -+ * 4) these pending requests experience a high -+ * latency because the application is not -+ * weight-raised while they are pending. -+ */ -+ if (bfqq->wr_cur_max_time != -+ bfqd->bfq_wr_rt_max_time) { -+ bfqq->wr_start_at_switch_to_srt = -+ bfqq->last_wr_start_finish; -+ BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish)); -+ -+ bfqq->wr_cur_max_time = -+ bfqd->bfq_wr_rt_max_time; -+ bfqq->wr_coeff = bfqd->bfq_wr_coeff * -+ BFQ_SOFTRT_WEIGHT_FACTOR; -+ bfq_log_bfqq(bfqd, bfqq, -+ "switching to soft_rt wr"); -+ } else -+ bfq_log_bfqq(bfqd, bfqq, -+ "moving forward soft_rt wr duration"); -+ bfqq->last_wr_start_finish = jiffies; -+ } -+ } -+} -+ -+static bool bfq_bfqq_idle_for_long_time(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq) -+{ -+ return bfqq->dispatched == 0 && -+ time_is_before_jiffies( -+ bfqq->budget_timeout + -+ bfqd->bfq_wr_min_idle_time); -+} -+ -+static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq, -+ int old_wr_coeff, -+ struct request *rq, -+ bool *interactive) -+{ -+ bool soft_rt, in_burst, wr_or_deserves_wr, -+ bfqq_wants_to_preempt, -+ idle_for_long_time = bfq_bfqq_idle_for_long_time(bfqd, bfqq), -+ /* -+ * See the comments on -+ * bfq_bfqq_update_budg_for_activation for -+ * details on the usage of the next variable. -+ */ -+ arrived_in_time = ktime_get_ns() <= -+ bfqq->ttime.last_end_request + -+ bfqd->bfq_slice_idle * 3; -+ -+ bfq_log_bfqq(bfqd, bfqq, -+ "bfq_add_request non-busy: " -+ "jiffies %lu, in_time %d, idle_long %d busyw %d " -+ "wr_coeff %u", -+ jiffies, arrived_in_time, -+ idle_for_long_time, -+ bfq_bfqq_non_blocking_wait_rq(bfqq), -+ old_wr_coeff); -+ -+ BUG_ON(bfqq->entity.budget < bfqq->entity.service); -+ -+ BUG_ON(bfqq == bfqd->in_service_queue); -+ -+ /* -+ * bfqq deserves to be weight-raised if: -+ * - it is sync, -+ * - it does not belong to a large burst, -+ * - it has been idle for enough time or is soft real-time, -+ * - is linked to a bfq_io_cq (it is not shared in any sense) -+ */ -+ in_burst = bfq_bfqq_in_large_burst(bfqq); -+ soft_rt = bfqd->bfq_wr_max_softrt_rate > 0 && -+ !in_burst && -+ time_is_before_jiffies(bfqq->soft_rt_next_start) && -+ bfqq->dispatched == 0; -+ *interactive = -+ !in_burst && -+ idle_for_long_time; -+ wr_or_deserves_wr = bfqd->low_latency && -+ (bfqq->wr_coeff > 1 || -+ (bfq_bfqq_sync(bfqq) && -+ bfqq->bic && (*interactive || soft_rt))); -+ -+ bfq_log_bfqq(bfqd, bfqq, -+ "bfq_add_request: " -+ "in_burst %d, " -+ "soft_rt %d (next %lu), inter %d, bic %p", -+ bfq_bfqq_in_large_burst(bfqq), soft_rt, -+ bfqq->soft_rt_next_start, -+ *interactive, -+ bfqq->bic); -+ -+ /* -+ * Using the last flag, update budget and check whether bfqq -+ * may want to preempt the in-service queue. -+ */ -+ bfqq_wants_to_preempt = -+ bfq_bfqq_update_budg_for_activation(bfqd, bfqq, -+ arrived_in_time, -+ wr_or_deserves_wr); -+ -+ /* -+ * If bfqq happened to be activated in a burst, but has been -+ * idle for much more than an interactive queue, then we -+ * assume that, in the overall I/O initiated in the burst, the -+ * I/O associated with bfqq is finished. So bfqq does not need -+ * to be treated as a queue belonging to a burst -+ * anymore. Accordingly, we reset bfqq's in_large_burst flag -+ * if set, and remove bfqq from the burst list if it's -+ * there. We do not decrement burst_size, because the fact -+ * that bfqq does not need to belong to the burst list any -+ * more does not invalidate the fact that bfqq was created in -+ * a burst. -+ */ -+ if (likely(!bfq_bfqq_just_created(bfqq)) && -+ idle_for_long_time && -+ time_is_before_jiffies( -+ bfqq->budget_timeout + -+ msecs_to_jiffies(10000))) { -+ hlist_del_init(&bfqq->burst_list_node); -+ bfq_clear_bfqq_in_large_burst(bfqq); -+ } -+ -+ bfq_clear_bfqq_just_created(bfqq); -+ -+ if (!bfq_bfqq_IO_bound(bfqq)) { -+ if (arrived_in_time) { -+ bfqq->requests_within_timer++; -+ if (bfqq->requests_within_timer >= -+ bfqd->bfq_requests_within_timer) -+ bfq_mark_bfqq_IO_bound(bfqq); -+ } else -+ bfqq->requests_within_timer = 0; -+ bfq_log_bfqq(bfqd, bfqq, "requests in time %d", -+ bfqq->requests_within_timer); -+ } -+ -+ if (bfqd->low_latency) { -+ if (unlikely(time_is_after_jiffies(bfqq->split_time))) -+ /* wraparound */ -+ bfqq->split_time = -+ jiffies - bfqd->bfq_wr_min_idle_time - 1; -+ -+ if (time_is_before_jiffies(bfqq->split_time + -+ bfqd->bfq_wr_min_idle_time)) { -+ bfq_update_bfqq_wr_on_rq_arrival(bfqd, bfqq, -+ old_wr_coeff, -+ wr_or_deserves_wr, -+ *interactive, -+ in_burst, -+ soft_rt); -+ -+ if (old_wr_coeff != bfqq->wr_coeff) -+ bfqq->entity.prio_changed = 1; -+ } -+ } -+ -+ bfqq->last_idle_bklogged = jiffies; -+ bfqq->service_from_backlogged = 0; -+ bfq_clear_bfqq_softrt_update(bfqq); -+ -+ bfq_add_bfqq_busy(bfqd, bfqq); -+ -+ /* -+ * Expire in-service queue only if preemption may be needed -+ * for guarantees. In this respect, the function -+ * next_queue_may_preempt just checks a simple, necessary -+ * condition, and not a sufficient condition based on -+ * timestamps. In fact, for the latter condition to be -+ * evaluated, timestamps would need first to be updated, and -+ * this operation is quite costly (see the comments on the -+ * function bfq_bfqq_update_budg_for_activation). -+ */ -+ if (bfqd->in_service_queue && bfqq_wants_to_preempt && -+ bfqd->in_service_queue->wr_coeff < bfqq->wr_coeff && -+ next_queue_may_preempt(bfqd)) { -+ struct bfq_queue *in_serv = -+ bfqd->in_service_queue; -+ BUG_ON(in_serv == bfqq); -+ -+ bfq_bfqq_expire(bfqd, bfqd->in_service_queue, -+ false, BFQ_BFQQ_PREEMPTED); -+ } -+} -+ -+static void bfq_add_request(struct request *rq) -+{ -+ struct bfq_queue *bfqq = RQ_BFQQ(rq); -+ struct bfq_data *bfqd = bfqq->bfqd; -+ struct request *next_rq, *prev; -+ unsigned int old_wr_coeff = bfqq->wr_coeff; -+ bool interactive = false; -+ -+ bfq_log_bfqq(bfqd, bfqq, "size %u %s", -+ blk_rq_sectors(rq), rq_is_sync(rq) ? "S" : "A"); -+ -+ if (bfqq->wr_coeff > 1) /* queue is being weight-raised */ -+ bfq_log_bfqq(bfqd, bfqq, -+ "raising period dur %u/%u msec, old coeff %u, w %d(%d)", -+ jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish), -+ jiffies_to_msecs(bfqq->wr_cur_max_time), -+ bfqq->wr_coeff, -+ bfqq->entity.weight, bfqq->entity.orig_weight); -+ -+ bfqq->queued[rq_is_sync(rq)]++; -+ bfqd->queued++; -+ -+ BUG_ON(!RQ_BFQQ(rq)); -+ BUG_ON(RQ_BFQQ(rq) != bfqq); -+ WARN_ON(blk_rq_sectors(rq) == 0); -+ -+ elv_rb_add(&bfqq->sort_list, rq); -+ -+ /* -+ * Check if this request is a better next-to-serve candidate. -+ */ -+ prev = bfqq->next_rq; -+ next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position); -+ BUG_ON(!next_rq); -+ BUG_ON(!RQ_BFQQ(next_rq)); -+ BUG_ON(RQ_BFQQ(next_rq) != bfqq); -+ bfqq->next_rq = next_rq; -+ -+ /* -+ * Adjust priority tree position, if next_rq changes. -+ */ -+ if (prev != bfqq->next_rq) -+ bfq_pos_tree_add_move(bfqd, bfqq); -+ -+ if (!bfq_bfqq_busy(bfqq)) /* switching to busy ... */ -+ bfq_bfqq_handle_idle_busy_switch(bfqd, bfqq, old_wr_coeff, -+ rq, &interactive); -+ else { -+ if (bfqd->low_latency && old_wr_coeff == 1 && !rq_is_sync(rq) && -+ time_is_before_jiffies( -+ bfqq->last_wr_start_finish + -+ bfqd->bfq_wr_min_inter_arr_async)) { -+ bfqq->wr_coeff = bfqd->bfq_wr_coeff; -+ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); -+ -+ bfqd->wr_busy_queues++; -+ BUG_ON(bfqd->wr_busy_queues > bfq_tot_busy_queues(bfqd)); -+ bfqq->entity.prio_changed = 1; -+ bfq_log_bfqq(bfqd, bfqq, -+ "non-idle wrais starting, " -+ "wr_max_time %u wr_busy %d", -+ jiffies_to_msecs(bfqq->wr_cur_max_time), -+ bfqd->wr_busy_queues); -+ } -+ if (prev != bfqq->next_rq) -+ bfq_updated_next_req(bfqd, bfqq); -+ } -+ -+ /* -+ * Assign jiffies to last_wr_start_finish in the following -+ * cases: -+ * -+ * . if bfqq is not going to be weight-raised, because, for -+ * non weight-raised queues, last_wr_start_finish stores the -+ * arrival time of the last request; as of now, this piece -+ * of information is used only for deciding whether to -+ * weight-raise async queues -+ * -+ * . if bfqq is not weight-raised, because, if bfqq is now -+ * switching to weight-raised, then last_wr_start_finish -+ * stores the time when weight-raising starts -+ * -+ * . if bfqq is interactive, because, regardless of whether -+ * bfqq is currently weight-raised, the weight-raising -+ * period must start or restart (this case is considered -+ * separately because it is not detected by the above -+ * conditions, if bfqq is already weight-raised) -+ * -+ * last_wr_start_finish has to be updated also if bfqq is soft -+ * real-time, because the weight-raising period is constantly -+ * restarted on idle-to-busy transitions for these queues, but -+ * this is already done in bfq_bfqq_handle_idle_busy_switch if -+ * needed. -+ */ -+ if (bfqd->low_latency && -+ (old_wr_coeff == 1 || bfqq->wr_coeff == 1 || interactive)) -+ bfqq->last_wr_start_finish = jiffies; -+} -+ -+static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd, -+ struct bio *bio, -+ struct request_queue *q) -+{ -+ struct bfq_queue *bfqq = bfqd->bio_bfqq; -+ -+ BUG_ON(!bfqd->bio_bfqq_set); -+ -+ if (bfqq) -+ return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio)); -+ -+ return NULL; -+} -+ -+static sector_t get_sdist(sector_t last_pos, struct request *rq) -+{ -+ sector_t sdist = 0; -+ -+ if (last_pos) { -+ if (last_pos < blk_rq_pos(rq)) -+ sdist = blk_rq_pos(rq) - last_pos; -+ else -+ sdist = last_pos - blk_rq_pos(rq); -+ } -+ -+ return sdist; -+} -+ -+#if 0 /* Still not clear if we can do without next two functions */ -+static void bfq_activate_request(struct request_queue *q, struct request *rq) -+{ -+ struct bfq_data *bfqd = q->elevator->elevator_data; -+ bfqd->rq_in_driver++; -+} -+ -+static void bfq_deactivate_request(struct request_queue *q, struct request *rq) -+{ -+ struct bfq_data *bfqd = q->elevator->elevator_data; -+ -+ BUG_ON(bfqd->rq_in_driver == 0); -+ bfqd->rq_in_driver--; -+} -+#endif -+ -+static void bfq_remove_request(struct request_queue *q, -+ struct request *rq) -+{ -+ struct bfq_queue *bfqq = RQ_BFQQ(rq); -+ struct bfq_data *bfqd = bfqq->bfqd; -+ const int sync = rq_is_sync(rq); -+ -+ BUG_ON(bfqq->entity.service > bfqq->entity.budget); -+ -+ if (bfqq->next_rq == rq) { -+ bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq); -+ if (bfqq->next_rq && !RQ_BFQQ(bfqq->next_rq)) { -+ pr_crit("no bfqq! for next rq %p bfqq %p\n", -+ bfqq->next_rq, bfqq); -+ } -+ -+ BUG_ON(bfqq->next_rq && !RQ_BFQQ(bfqq->next_rq)); -+ if (bfqq->next_rq && RQ_BFQQ(bfqq->next_rq) != bfqq) { -+ pr_crit( -+ "wrong bfqq! for next rq %p, rq_bfqq %p bfqq %p\n", -+ bfqq->next_rq, RQ_BFQQ(bfqq->next_rq), bfqq); -+ } -+ BUG_ON(bfqq->next_rq && RQ_BFQQ(bfqq->next_rq) != bfqq); -+ -+ bfq_updated_next_req(bfqd, bfqq); -+ } -+ -+ if (rq->queuelist.prev != &rq->queuelist) -+ list_del_init(&rq->queuelist); -+ BUG_ON(bfqq->queued[sync] == 0); -+ bfqq->queued[sync]--; -+ bfqd->queued--; -+ elv_rb_del(&bfqq->sort_list, rq); -+ -+ elv_rqhash_del(q, rq); -+ if (q->last_merge == rq) -+ q->last_merge = NULL; -+ -+ if (RB_EMPTY_ROOT(&bfqq->sort_list)) { -+ bfqq->next_rq = NULL; -+ -+ BUG_ON(bfqq->entity.budget < 0); -+ -+ if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->in_service_queue) { -+ BUG_ON(bfqq->ref < 2); /* referred by rq and on tree */ -+ bfq_del_bfqq_busy(bfqd, bfqq, false); -+ /* -+ * bfqq emptied. In normal operation, when -+ * bfqq is empty, bfqq->entity.service and -+ * bfqq->entity.budget must contain, -+ * respectively, the service received and the -+ * budget used last time bfqq emptied. These -+ * facts do not hold in this case, as at least -+ * this last removal occurred while bfqq is -+ * not in service. To avoid inconsistencies, -+ * reset both bfqq->entity.service and -+ * bfqq->entity.budget, if bfqq has still a -+ * process that may issue I/O requests to it. -+ */ -+ bfqq->entity.budget = bfqq->entity.service = 0; -+ } -+ -+ /* -+ * Remove queue from request-position tree as it is empty. -+ */ -+ if (bfqq->pos_root) { -+ rb_erase(&bfqq->pos_node, bfqq->pos_root); -+ bfqq->pos_root = NULL; -+ } -+ } else { -+ BUG_ON(!bfqq->next_rq); -+ bfq_pos_tree_add_move(bfqd, bfqq); -+ } -+ -+ if (rq->cmd_flags & REQ_META) { -+ BUG_ON(bfqq->meta_pending == 0); -+ bfqq->meta_pending--; -+ } -+} -+ -+static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio) -+{ -+ struct request_queue *q = hctx->queue; -+ struct bfq_data *bfqd = q->elevator->elevator_data; -+ struct request *free = NULL; -+ /* -+ * bfq_bic_lookup grabs the queue_lock: invoke it now and -+ * store its return value for later use, to avoid nesting -+ * queue_lock inside the bfqd->lock. We assume that the bic -+ * returned by bfq_bic_lookup does not go away before -+ * bfqd->lock is taken. -+ */ -+ struct bfq_io_cq *bic = bfq_bic_lookup(bfqd, current->io_context, q); -+ bool ret; -+ -+ spin_lock_irq(&bfqd->lock); -+ -+ if (bic) -+ bfqd->bio_bfqq = bic_to_bfqq(bic, op_is_sync(bio->bi_opf)); -+ else -+ bfqd->bio_bfqq = NULL; -+ bfqd->bio_bic = bic; -+ /* Set next flag just for testing purposes */ -+ bfqd->bio_bfqq_set = true; -+ -+ ret = blk_mq_sched_try_merge(q, bio, &free); -+ -+ /* -+ * XXX Not yet freeing without lock held, to avoid an -+ * inconsistency with respect to the lock-protected invocation -+ * of blk_mq_sched_try_insert_merge in bfq_bio_merge. Waiting -+ * for clarifications from Jens. -+ */ -+ if (free) -+ blk_mq_free_request(free); -+ bfqd->bio_bfqq_set = false; -+ spin_unlock_irq(&bfqd->lock); -+ -+ return ret; -+} -+ -+static int bfq_request_merge(struct request_queue *q, struct request **req, -+ struct bio *bio) -+{ -+ struct bfq_data *bfqd = q->elevator->elevator_data; -+ struct request *__rq; -+ -+ __rq = bfq_find_rq_fmerge(bfqd, bio, q); -+ if (__rq && elv_bio_merge_ok(__rq, bio)) { -+ *req = __rq; -+ bfq_log(bfqd, "req %p", __rq); -+ -+ return ELEVATOR_FRONT_MERGE; -+ } -+ -+ return ELEVATOR_NO_MERGE; -+} -+ -+static struct bfq_queue *bfq_init_rq(struct request *rq); -+ -+static void bfq_request_merged(struct request_queue *q, struct request *req, -+ enum elv_merge type) -+{ -+ BUG_ON(req->rq_flags & RQF_DISP_LIST); -+ -+ if (type == ELEVATOR_FRONT_MERGE && -+ rb_prev(&req->rb_node) && -+ blk_rq_pos(req) < -+ blk_rq_pos(container_of(rb_prev(&req->rb_node), -+ struct request, rb_node))) { -+ struct bfq_queue *bfqq = bfq_init_rq(req); -+ struct bfq_data *bfqd = bfqq->bfqd; -+ struct request *prev, *next_rq; -+ -+ /* Reposition request in its sort_list */ -+ elv_rb_del(&bfqq->sort_list, req); -+ BUG_ON(!RQ_BFQQ(req)); -+ BUG_ON(RQ_BFQQ(req) != bfqq); -+ elv_rb_add(&bfqq->sort_list, req); -+ -+ /* Choose next request to be served for bfqq */ -+ prev = bfqq->next_rq; -+ next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req, -+ bfqd->last_position); -+ BUG_ON(!next_rq); -+ -+ bfqq->next_rq = next_rq; -+ -+ bfq_log_bfqq(bfqd, bfqq, -+ "req %p prev %p next_rq %p bfqq %p", -+ req, prev, next_rq, bfqq); -+ -+ /* -+ * If next_rq changes, update both the queue's budget to -+ * fit the new request and the queue's position in its -+ * rq_pos_tree. -+ */ -+ if (prev != bfqq->next_rq) { -+ bfq_updated_next_req(bfqd, bfqq); -+ bfq_pos_tree_add_move(bfqd, bfqq); -+ } -+ } -+} -+ -+/* -+ * This function is called to notify the scheduler that the requests -+ * rq and 'next' have been merged, with 'next' going away. BFQ -+ * exploits this hook to address the following issue: if 'next' has a -+ * fifo_time lower that rq, then the fifo_time of rq must be set to -+ * the value of 'next', to not forget the greater age of 'next'. -+ * -+ * NOTE: in this function we assume that rq is in a bfq_queue, basing -+ * on that rq is picked from the hash table q->elevator->hash, which, -+ * in its turn, is filled only with I/O requests present in -+ * bfq_queues, while BFQ is in use for the request queue q. In fact, -+ * the function that fills this hash table (elv_rqhash_add) is called -+ * only by bfq_insert_request. -+ */ -+static void bfq_requests_merged(struct request_queue *q, struct request *rq, -+ struct request *next) -+{ -+ struct bfq_queue *bfqq = bfq_init_rq(rq), -+ *next_bfqq = bfq_init_rq(next); -+ -+ BUG_ON(!RQ_BFQQ(rq)); -+ BUG_ON(!RQ_BFQQ(next)); /* this does not imply next is in a bfqq */ -+ BUG_ON(rq->rq_flags & RQF_DISP_LIST); -+ BUG_ON(next->rq_flags & RQF_DISP_LIST); -+ -+ lockdep_assert_held(&bfqq->bfqd->lock); -+ -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "rq %p next %p bfqq %p next_bfqq %p", -+ rq, next, bfqq, next_bfqq); -+ -+ /* -+ * If next and rq belong to the same bfq_queue and next is older -+ * than rq, then reposition rq in the fifo (by substituting next -+ * with rq). Otherwise, if next and rq belong to different -+ * bfq_queues, never reposition rq: in fact, we would have to -+ * reposition it with respect to next's position in its own fifo, -+ * which would most certainly be too expensive with respect to -+ * the benefits. -+ */ -+ if (bfqq == next_bfqq && -+ !list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && -+ next->fifo_time < rq->fifo_time) { -+ list_del_init(&rq->queuelist); -+ list_replace_init(&next->queuelist, &rq->queuelist); -+ rq->fifo_time = next->fifo_time; -+ } -+ -+ if (bfqq->next_rq == next) -+ bfqq->next_rq = rq; -+ -+ bfqg_stats_update_io_merged(bfqq_group(bfqq), next->cmd_flags); -+} -+ -+/* Must be called with bfqq != NULL */ -+static void bfq_bfqq_end_wr(struct bfq_queue *bfqq) -+{ -+ BUG_ON(!bfqq); -+ -+ if (bfq_bfqq_busy(bfqq)) { -+ bfqq->bfqd->wr_busy_queues--; -+ BUG_ON(bfqq->bfqd->wr_busy_queues < 0); -+ } -+ bfqq->wr_coeff = 1; -+ bfqq->wr_cur_max_time = 0; -+ bfqq->last_wr_start_finish = jiffies; -+ /* -+ * Trigger a weight change on the next invocation of -+ * __bfq_entity_update_weight_prio. -+ */ -+ bfqq->entity.prio_changed = 1; -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "wrais ending at %lu, rais_max_time %u", -+ bfqq->last_wr_start_finish, -+ jiffies_to_msecs(bfqq->wr_cur_max_time)); -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "wr_busy %d", -+ bfqq->bfqd->wr_busy_queues); -+} -+ -+static void bfq_end_wr_async_queues(struct bfq_data *bfqd, -+ struct bfq_group *bfqg) -+{ -+ int i, j; -+ -+ for (i = 0; i < 2; i++) -+ for (j = 0; j < IOPRIO_BE_NR; j++) -+ if (bfqg->async_bfqq[i][j]) -+ bfq_bfqq_end_wr(bfqg->async_bfqq[i][j]); -+ if (bfqg->async_idle_bfqq) -+ bfq_bfqq_end_wr(bfqg->async_idle_bfqq); -+} -+ -+static void bfq_end_wr(struct bfq_data *bfqd) -+{ -+ struct bfq_queue *bfqq; -+ -+ spin_lock_irq(&bfqd->lock); -+ -+ list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list) -+ bfq_bfqq_end_wr(bfqq); -+ list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list) -+ bfq_bfqq_end_wr(bfqq); -+ bfq_end_wr_async(bfqd); -+ -+ spin_unlock_irq(&bfqd->lock); -+} -+ -+static sector_t bfq_io_struct_pos(void *io_struct, bool request) -+{ -+ if (request) -+ return blk_rq_pos(io_struct); -+ else -+ return ((struct bio *)io_struct)->bi_iter.bi_sector; -+} -+ -+static int bfq_rq_close_to_sector(void *io_struct, bool request, -+ sector_t sector) -+{ -+ return abs(bfq_io_struct_pos(io_struct, request) - sector) <= -+ BFQQ_CLOSE_THR; -+} -+ -+static struct bfq_queue *bfqq_find_close(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq, -+ sector_t sector) -+{ -+ struct rb_root *root = &bfq_bfqq_to_bfqg(bfqq)->rq_pos_tree; -+ struct rb_node *parent, *node; -+ struct bfq_queue *__bfqq; -+ -+ if (RB_EMPTY_ROOT(root)) -+ return NULL; -+ -+ /* -+ * First, if we find a request starting at the end of the last -+ * request, choose it. -+ */ -+ __bfqq = bfq_rq_pos_tree_lookup(bfqd, root, sector, &parent, NULL); -+ if (__bfqq) -+ return __bfqq; -+ -+ /* -+ * If the exact sector wasn't found, the parent of the NULL leaf -+ * will contain the closest sector (rq_pos_tree sorted by -+ * next_request position). -+ */ -+ __bfqq = rb_entry(parent, struct bfq_queue, pos_node); -+ if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector)) -+ return __bfqq; -+ -+ if (blk_rq_pos(__bfqq->next_rq) < sector) -+ node = rb_next(&__bfqq->pos_node); -+ else -+ node = rb_prev(&__bfqq->pos_node); -+ if (!node) -+ return NULL; -+ -+ __bfqq = rb_entry(node, struct bfq_queue, pos_node); -+ if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector)) -+ return __bfqq; -+ -+ return NULL; -+} -+ -+static struct bfq_queue *bfq_find_close_cooperator(struct bfq_data *bfqd, -+ struct bfq_queue *cur_bfqq, -+ sector_t sector) -+{ -+ struct bfq_queue *bfqq; -+ -+ /* -+ * We shall notice if some of the queues are cooperating, -+ * e.g., working closely on the same area of the device. In -+ * that case, we can group them together and: 1) don't waste -+ * time idling, and 2) serve the union of their requests in -+ * the best possible order for throughput. -+ */ -+ bfqq = bfqq_find_close(bfqd, cur_bfqq, sector); -+ if (!bfqq || bfqq == cur_bfqq) -+ return NULL; -+ -+ return bfqq; -+} -+ -+static struct bfq_queue * -+bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) -+{ -+ int process_refs, new_process_refs; -+ struct bfq_queue *__bfqq; -+ -+ /* -+ * If there are no process references on the new_bfqq, then it is -+ * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain -+ * may have dropped their last reference (not just their last process -+ * reference). -+ */ -+ if (!bfqq_process_refs(new_bfqq)) -+ return NULL; -+ -+ /* Avoid a circular list and skip interim queue merges. */ -+ while ((__bfqq = new_bfqq->new_bfqq)) { -+ if (__bfqq == bfqq) -+ return NULL; -+ new_bfqq = __bfqq; -+ } -+ -+ process_refs = bfqq_process_refs(bfqq); -+ new_process_refs = bfqq_process_refs(new_bfqq); -+ /* -+ * If the process for the bfqq has gone away, there is no -+ * sense in merging the queues. -+ */ -+ if (process_refs == 0 || new_process_refs == 0) -+ return NULL; -+ -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d", -+ new_bfqq->pid); -+ -+ /* -+ * Merging is just a redirection: the requests of the process -+ * owning one of the two queues are redirected to the other queue. -+ * The latter queue, in its turn, is set as shared if this is the -+ * first time that the requests of some process are redirected to -+ * it. -+ * -+ * We redirect bfqq to new_bfqq and not the opposite, because -+ * we are in the context of the process owning bfqq, thus we -+ * have the io_cq of this process. So we can immediately -+ * configure this io_cq to redirect the requests of the -+ * process to new_bfqq. In contrast, the io_cq of new_bfqq is -+ * not available any more (new_bfqq->bic == NULL). -+ * -+ * Anyway, even in case new_bfqq coincides with the in-service -+ * queue, redirecting requests the in-service queue is the -+ * best option, as we feed the in-service queue with new -+ * requests close to the last request served and, by doing so, -+ * are likely to increase the throughput. -+ */ -+ bfqq->new_bfqq = new_bfqq; -+ new_bfqq->ref += process_refs; -+ return new_bfqq; -+} -+ -+static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq, -+ struct bfq_queue *new_bfqq) -+{ -+ if (bfq_too_late_for_merging(new_bfqq)) { -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "too late for bfq%d to be merged", -+ new_bfqq->pid); -+ return false; -+ } -+ -+ if (bfq_class_idle(bfqq) || bfq_class_idle(new_bfqq) || -+ (bfqq->ioprio_class != new_bfqq->ioprio_class)) -+ return false; -+ -+ /* -+ * If either of the queues has already been detected as seeky, -+ * then merging it with the other queue is unlikely to lead to -+ * sequential I/O. -+ */ -+ if (BFQQ_SEEKY(bfqq) || BFQQ_SEEKY(new_bfqq)) -+ return false; -+ -+ /* -+ * Interleaved I/O is known to be done by (some) applications -+ * only for reads, so it does not make sense to merge async -+ * queues. -+ */ -+ if (!bfq_bfqq_sync(bfqq) || !bfq_bfqq_sync(new_bfqq)) -+ return false; -+ -+ return true; -+} -+ -+/* -+ * Attempt to schedule a merge of bfqq with the currently in-service -+ * queue or with a close queue among the scheduled queues. Return -+ * NULL if no merge was scheduled, a pointer to the shared bfq_queue -+ * structure otherwise. -+ * -+ * The OOM queue is not allowed to participate to cooperation: in fact, since -+ * the requests temporarily redirected to the OOM queue could be redirected -+ * again to dedicated queues at any time, the state needed to correctly -+ * handle merging with the OOM queue would be quite complex and expensive -+ * to maintain. Besides, in such a critical condition as an out of memory, -+ * the benefits of queue merging may be little relevant, or even negligible. -+ * -+ * WARNING: queue merging may impair fairness among non-weight raised -+ * queues, for at least two reasons: 1) the original weight of a -+ * merged queue may change during the merged state, 2) even being the -+ * weight the same, a merged queue may be bloated with many more -+ * requests than the ones produced by its originally-associated -+ * process. -+ */ -+static struct bfq_queue * -+bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, -+ void *io_struct, bool request) -+{ -+ struct bfq_queue *in_service_bfqq, *new_bfqq; -+ -+ /* -+ * Prevent bfqq from being merged if it has been created too -+ * long ago. The idea is that true cooperating processes, and -+ * thus their associated bfq_queues, are supposed to be -+ * created shortly after each other. This is the case, e.g., -+ * for KVM/QEMU and dump I/O threads. Basing on this -+ * assumption, the following filtering greatly reduces the -+ * probability that two non-cooperating processes, which just -+ * happen to do close I/O for some short time interval, have -+ * their queues merged by mistake. -+ */ -+ if (bfq_too_late_for_merging(bfqq)) { -+ bfq_log_bfqq(bfqd, bfqq, -+ "would have looked for coop, but too late"); -+ return NULL; -+ } -+ -+ if (bfqq->new_bfqq) -+ return bfqq->new_bfqq; -+ -+ if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq)) -+ return NULL; -+ -+ /* If there is only one backlogged queue, don't search. */ -+ if (bfq_tot_busy_queues(bfqd) == 1) -+ return NULL; -+ -+ in_service_bfqq = bfqd->in_service_queue; -+ -+ if (in_service_bfqq && in_service_bfqq != bfqq && -+ likely(in_service_bfqq != &bfqd->oom_bfqq) && -+ bfq_rq_close_to_sector(io_struct, request, bfqd->in_serv_last_pos) && -+ bfqq->entity.parent == in_service_bfqq->entity.parent && -+ bfq_may_be_close_cooperator(bfqq, in_service_bfqq)) { -+ new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq); -+ if (new_bfqq) -+ return new_bfqq; -+ } -+ /* -+ * Check whether there is a cooperator among currently scheduled -+ * queues. The only thing we need is that the bio/request is not -+ * NULL, as we need it to establish whether a cooperator exists. -+ */ -+ new_bfqq = bfq_find_close_cooperator(bfqd, bfqq, -+ bfq_io_struct_pos(io_struct, request)); -+ -+ BUG_ON(new_bfqq && bfqq->entity.parent != new_bfqq->entity.parent); -+ -+ if (new_bfqq && likely(new_bfqq != &bfqd->oom_bfqq) && -+ bfq_may_be_close_cooperator(bfqq, new_bfqq)) -+ return bfq_setup_merge(bfqq, new_bfqq); -+ -+ return NULL; -+} -+ -+static void bfq_bfqq_save_state(struct bfq_queue *bfqq) -+{ -+ struct bfq_io_cq *bic = bfqq->bic; -+ -+ /* -+ * If !bfqq->bic, the queue is already shared or its requests -+ * have already been redirected to a shared queue; both idle window -+ * and weight raising state have already been saved. Do nothing. -+ */ -+ if (!bic) -+ return; -+ -+ bic->saved_ttime = bfqq->ttime; -+ bic->saved_has_short_ttime = bfq_bfqq_has_short_ttime(bfqq); -+ bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq); -+ bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq); -+ bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node); -+ if (unlikely(bfq_bfqq_just_created(bfqq) && -+ !bfq_bfqq_in_large_burst(bfqq) && -+ bfqq->bfqd->low_latency)) { -+ /* -+ * bfqq being merged ritgh after being created: bfqq -+ * would have deserved interactive weight raising, but -+ * did not make it to be set in a weight-raised state, -+ * because of this early merge. Store directly the -+ * weight-raising state that would have been assigned -+ * to bfqq, so that to avoid that bfqq unjustly fails -+ * to enjoy weight raising if split soon. -+ */ -+ bic->saved_wr_coeff = bfqq->bfqd->bfq_wr_coeff; -+ bic->saved_wr_cur_max_time = bfq_wr_duration(bfqq->bfqd); -+ bic->saved_last_wr_start_finish = jiffies; -+ } else { -+ bic->saved_wr_coeff = bfqq->wr_coeff; -+ bic->saved_wr_start_at_switch_to_srt = -+ bfqq->wr_start_at_switch_to_srt; -+ bic->saved_last_wr_start_finish = bfqq->last_wr_start_finish; -+ bic->saved_wr_cur_max_time = bfqq->wr_cur_max_time; -+ } -+ BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish)); -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "bic %p wr_coeff %d start_finish %lu max_time %lu", -+ bic, bfqq->wr_coeff, bfqq->last_wr_start_finish, -+ bfqq->wr_cur_max_time); -+} -+ -+static void -+bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic, -+ struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) -+{ -+ bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu", -+ (unsigned long) new_bfqq->pid); -+ BUG_ON(bfqq->bic && bfqq->bic == new_bfqq->bic); -+ /* Save weight raising and idle window of the merged queues */ -+ bfq_bfqq_save_state(bfqq); -+ bfq_bfqq_save_state(new_bfqq); -+ -+ if (bfq_bfqq_IO_bound(bfqq)) -+ bfq_mark_bfqq_IO_bound(new_bfqq); -+ bfq_clear_bfqq_IO_bound(bfqq); -+ -+ /* -+ * If bfqq is weight-raised, then let new_bfqq inherit -+ * weight-raising. To reduce false positives, neglect the case -+ * where bfqq has just been created, but has not yet made it -+ * to be weight-raised (which may happen because EQM may merge -+ * bfqq even before bfq_add_request is executed for the first -+ * time for bfqq). Handling this case would however be very -+ * easy, thanks to the flag just_created. -+ */ -+ if (new_bfqq->wr_coeff == 1 && bfqq->wr_coeff > 1) { -+ new_bfqq->wr_coeff = bfqq->wr_coeff; -+ new_bfqq->wr_cur_max_time = bfqq->wr_cur_max_time; -+ new_bfqq->last_wr_start_finish = bfqq->last_wr_start_finish; -+ new_bfqq->wr_start_at_switch_to_srt = -+ bfqq->wr_start_at_switch_to_srt; -+ if (bfq_bfqq_busy(new_bfqq)) { -+ bfqd->wr_busy_queues++; -+ BUG_ON(bfqd->wr_busy_queues > -+ bfq_tot_busy_queues(bfqd)); -+ } -+ -+ new_bfqq->entity.prio_changed = 1; -+ bfq_log_bfqq(bfqd, new_bfqq, -+ "wr start after merge with %d, rais_max_time %u", -+ bfqq->pid, -+ jiffies_to_msecs(bfqq->wr_cur_max_time)); -+ } -+ -+ if (bfqq->wr_coeff > 1) { /* bfqq has given its wr to new_bfqq */ -+ bfqq->wr_coeff = 1; -+ bfqq->entity.prio_changed = 1; -+ if (bfq_bfqq_busy(bfqq)) { -+ bfqd->wr_busy_queues--; -+ BUG_ON(bfqd->wr_busy_queues < 0); -+ } -+ -+ } -+ -+ bfq_log_bfqq(bfqd, new_bfqq, "wr_busy %d", -+ bfqd->wr_busy_queues); -+ -+ /* -+ * Merge queues (that is, let bic redirect its requests to new_bfqq) -+ */ -+ bic_set_bfqq(bic, new_bfqq, 1); -+ bfq_mark_bfqq_coop(new_bfqq); -+ /* -+ * new_bfqq now belongs to at least two bics (it is a shared queue): -+ * set new_bfqq->bic to NULL. bfqq either: -+ * - does not belong to any bic any more, and hence bfqq->bic must -+ * be set to NULL, or -+ * - is a queue whose owning bics have already been redirected to a -+ * different queue, hence the queue is destined to not belong to -+ * any bic soon and bfqq->bic is already NULL (therefore the next -+ * assignment causes no harm). -+ */ -+ new_bfqq->bic = NULL; -+ bfqq->bic = NULL; -+ /* release process reference to bfqq */ -+ bfq_put_queue(bfqq); -+} -+ -+static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq, -+ struct bio *bio) -+{ -+ struct bfq_data *bfqd = q->elevator->elevator_data; -+ bool is_sync = op_is_sync(bio->bi_opf); -+ struct bfq_queue *bfqq = bfqd->bio_bfqq, *new_bfqq; -+ -+ assert_spin_locked(&bfqd->lock); -+ /* -+ * Disallow merge of a sync bio into an async request. -+ */ -+ if (is_sync && !rq_is_sync(rq)) -+ return false; -+ -+ /* -+ * Lookup the bfqq that this bio will be queued with. Allow -+ * merge only if rq is queued there. -+ */ -+ BUG_ON(!bfqd->bio_bfqq_set); -+ if (!bfqq) -+ return false; -+ -+ /* -+ * We take advantage of this function to perform an early merge -+ * of the queues of possible cooperating processes. -+ */ -+ new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false); -+ BUG_ON(new_bfqq == bfqq); -+ if (new_bfqq) { -+ /* -+ * bic still points to bfqq, then it has not yet been -+ * redirected to some other bfq_queue, and a queue -+ * merge beween bfqq and new_bfqq can be safely -+ * fulfillled, i.e., bic can be redirected to new_bfqq -+ * and bfqq can be put. -+ */ -+ bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq, -+ new_bfqq); -+ /* -+ * If we get here, bio will be queued into new_queue, -+ * so use new_bfqq to decide whether bio and rq can be -+ * merged. -+ */ -+ bfqq = new_bfqq; -+ -+ /* -+ * Change also bqfd->bio_bfqq, as -+ * bfqd->bio_bic now points to new_bfqq, and -+ * this function may be invoked again (and then may -+ * use again bqfd->bio_bfqq). -+ */ -+ bfqd->bio_bfqq = bfqq; -+ } -+ return bfqq == RQ_BFQQ(rq); -+} -+ -+/* -+ * Set the maximum time for the in-service queue to consume its -+ * budget. This prevents seeky processes from lowering the throughput. -+ * In practice, a time-slice service scheme is used with seeky -+ * processes. -+ */ -+static void bfq_set_budget_timeout(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq) -+{ -+ unsigned int timeout_coeff; -+ -+ if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time) -+ timeout_coeff = 1; -+ else -+ timeout_coeff = bfqq->entity.weight / bfqq->entity.orig_weight; -+ -+ bfqd->last_budget_start = ktime_get(); -+ -+ bfqq->budget_timeout = jiffies + -+ bfqd->bfq_timeout * timeout_coeff; -+ -+ bfq_log_bfqq(bfqd, bfqq, "%u", -+ jiffies_to_msecs(bfqd->bfq_timeout * timeout_coeff)); -+} -+ -+static void __bfq_set_in_service_queue(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq) -+{ -+ if (bfqq) { -+ bfq_clear_bfqq_fifo_expire(bfqq); -+ -+ bfqd->budgets_assigned = (bfqd->budgets_assigned*7 + 256) / 8; -+ -+ BUG_ON(bfqq == bfqd->in_service_queue); -+ BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list)); -+ -+ if (time_is_before_jiffies(bfqq->last_wr_start_finish) && -+ bfqq->wr_coeff > 1 && -+ bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time && -+ time_is_before_jiffies(bfqq->budget_timeout)) { -+ /* -+ * For soft real-time queues, move the start -+ * of the weight-raising period forward by the -+ * time the queue has not received any -+ * service. Otherwise, a relatively long -+ * service delay is likely to cause the -+ * weight-raising period of the queue to end, -+ * because of the short duration of the -+ * weight-raising period of a soft real-time -+ * queue. It is worth noting that this move -+ * is not so dangerous for the other queues, -+ * because soft real-time queues are not -+ * greedy. -+ * -+ * To not add a further variable, we use the -+ * overloaded field budget_timeout to -+ * determine for how long the queue has not -+ * received service, i.e., how much time has -+ * elapsed since the queue expired. However, -+ * this is a little imprecise, because -+ * budget_timeout is set to jiffies if bfqq -+ * not only expires, but also remains with no -+ * request. -+ */ -+ if (time_after(bfqq->budget_timeout, -+ bfqq->last_wr_start_finish)) -+ bfqq->last_wr_start_finish += -+ jiffies - bfqq->budget_timeout; -+ else -+ bfqq->last_wr_start_finish = jiffies; -+ -+ if (time_is_after_jiffies(bfqq->last_wr_start_finish)) { -+ pr_crit( -+ "BFQ WARNING:last %lu budget %lu jiffies %lu", -+ bfqq->last_wr_start_finish, -+ bfqq->budget_timeout, -+ jiffies); -+ pr_crit("diff %lu", jiffies - -+ max_t(unsigned long, -+ bfqq->last_wr_start_finish, -+ bfqq->budget_timeout)); -+ bfqq->last_wr_start_finish = jiffies; -+ } -+ } -+ -+ bfq_set_budget_timeout(bfqd, bfqq); -+ bfq_log_bfqq(bfqd, bfqq, -+ "cur-budget = %d prio_class %d", -+ bfqq->entity.budget, bfqq->ioprio_class); -+ } else -+ bfq_log(bfqd, "NULL"); -+ -+ bfqd->in_service_queue = bfqq; -+} -+ -+/* -+ * Get and set a new queue for service. -+ */ -+static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd) -+{ -+ struct bfq_queue *bfqq = bfq_get_next_queue(bfqd); -+ -+ __bfq_set_in_service_queue(bfqd, bfqq); -+ return bfqq; -+} -+ -+static void bfq_arm_slice_timer(struct bfq_data *bfqd) -+{ -+ struct bfq_queue *bfqq = bfqd->in_service_queue; -+ u32 sl; -+ -+ BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list)); -+ -+ bfq_mark_bfqq_wait_request(bfqq); -+ -+ /* -+ * We don't want to idle for seeks, but we do want to allow -+ * fair distribution of slice time for a process doing back-to-back -+ * seeks. So allow a little bit of time for him to submit a new rq. -+ * -+ * To prevent processes with (partly) seeky workloads from -+ * being too ill-treated, grant them a small fraction of the -+ * assigned budget before reducing the waiting time to -+ * BFQ_MIN_TT. This happened to help reduce latency. -+ */ -+ sl = bfqd->bfq_slice_idle; -+ /* -+ * Unless the queue is being weight-raised or the scenario is -+ * asymmetric, grant only minimum idle time if the queue -+ * is seeky. A long idling is preserved for a weight-raised -+ * queue, or, more in general, in an asymemtric scenario, -+ * because a long idling is needed for guaranteeing to a queue -+ * its reserved share of the throughput (in particular, it is -+ * needed if the queue has a higher weight than some other -+ * queue). -+ */ -+ if (BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 && -+ bfq_symmetric_scenario(bfqd)) -+ sl = min_t(u32, sl, BFQ_MIN_TT); -+ -+ bfqd->last_idling_start = ktime_get(); -+ hrtimer_start(&bfqd->idle_slice_timer, ns_to_ktime(sl), -+ HRTIMER_MODE_REL); -+ bfqg_stats_set_start_idle_time(bfqq_group(bfqq)); -+ bfq_log(bfqd, "arm idle: %ld/%ld ms", -+ sl / NSEC_PER_MSEC, bfqd->bfq_slice_idle / NSEC_PER_MSEC); -+} -+ -+/* -+ * In autotuning mode, max_budget is dynamically recomputed as the -+ * amount of sectors transferred in timeout at the estimated peak -+ * rate. This enables BFQ to utilize a full timeslice with a full -+ * budget, even if the in-service queue is served at peak rate. And -+ * this maximises throughput with sequential workloads. -+ */ -+static unsigned long bfq_calc_max_budget(struct bfq_data *bfqd) -+{ -+ return (u64)bfqd->peak_rate * USEC_PER_MSEC * -+ jiffies_to_msecs(bfqd->bfq_timeout)>>BFQ_RATE_SHIFT; -+} -+ -+/* -+ * Update parameters related to throughput and responsiveness, as a -+ * function of the estimated peak rate. See comments on -+ * bfq_calc_max_budget(), and on the ref_wr_duration array. -+ */ -+static void update_thr_responsiveness_params(struct bfq_data *bfqd) -+{ -+ if (bfqd->bfq_user_max_budget == 0) { -+ bfqd->bfq_max_budget = -+ bfq_calc_max_budget(bfqd); -+ BUG_ON(bfqd->bfq_max_budget < 0); -+ bfq_log(bfqd, "new max_budget = %d", -+ bfqd->bfq_max_budget); -+ } -+} -+ -+static void bfq_reset_rate_computation(struct bfq_data *bfqd, struct request *rq) -+{ -+ if (rq != NULL) { /* new rq dispatch now, reset accordingly */ -+ bfqd->last_dispatch = bfqd->first_dispatch = ktime_get_ns() ; -+ bfqd->peak_rate_samples = 1; -+ bfqd->sequential_samples = 0; -+ bfqd->tot_sectors_dispatched = bfqd->last_rq_max_size = -+ blk_rq_sectors(rq); -+ } else /* no new rq dispatched, just reset the number of samples */ -+ bfqd->peak_rate_samples = 0; /* full re-init on next disp. */ -+ -+ bfq_log(bfqd, -+ "at end, sample %u/%u tot_sects %llu", -+ bfqd->peak_rate_samples, bfqd->sequential_samples, -+ bfqd->tot_sectors_dispatched); -+} -+ -+static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq) -+{ -+ u32 rate, weight, divisor; -+ -+ /* -+ * For the convergence property to hold (see comments on -+ * bfq_update_peak_rate()) and for the assessment to be -+ * reliable, a minimum number of samples must be present, and -+ * a minimum amount of time must have elapsed. If not so, do -+ * not compute new rate. Just reset parameters, to get ready -+ * for a new evaluation attempt. -+ */ -+ if (bfqd->peak_rate_samples < BFQ_RATE_MIN_SAMPLES || -+ bfqd->delta_from_first < BFQ_RATE_MIN_INTERVAL) { -+ bfq_log(bfqd, -+ "only resetting, delta_first %lluus samples %d", -+ bfqd->delta_from_first>>10, bfqd->peak_rate_samples); -+ goto reset_computation; -+ } -+ -+ /* -+ * If a new request completion has occurred after last -+ * dispatch, then, to approximate the rate at which requests -+ * have been served by the device, it is more precise to -+ * extend the observation interval to the last completion. -+ */ -+ bfqd->delta_from_first = -+ max_t(u64, bfqd->delta_from_first, -+ bfqd->last_completion - bfqd->first_dispatch); -+ -+ BUG_ON(bfqd->delta_from_first == 0); -+ /* -+ * Rate computed in sects/usec, and not sects/nsec, for -+ * precision issues. -+ */ -+ rate = div64_ul(bfqd->tot_sectors_dispatched<delta_from_first, NSEC_PER_USEC)); -+ -+ bfq_log(bfqd, -+"tot_sects %llu delta_first %lluus rate %llu sects/s (%d)", -+ bfqd->tot_sectors_dispatched, bfqd->delta_from_first>>10, -+ ((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT), -+ rate > 20< 20M sectors/sec) -+ */ -+ if ((bfqd->sequential_samples < (3 * bfqd->peak_rate_samples)>>2 && -+ rate <= bfqd->peak_rate) || -+ rate > 20<peak_rate_samples, bfqd->sequential_samples, -+ ((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT), -+ ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT)); -+ goto reset_computation; -+ } else { -+ bfq_log(bfqd, -+ "do update, samples %u/%u rate/peak %llu/%llu", -+ bfqd->peak_rate_samples, bfqd->sequential_samples, -+ ((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT), -+ ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT)); -+ } -+ -+ /* -+ * We have to update the peak rate, at last! To this purpose, -+ * we use a low-pass filter. We compute the smoothing constant -+ * of the filter as a function of the 'weight' of the new -+ * measured rate. -+ * -+ * As can be seen in next formulas, we define this weight as a -+ * quantity proportional to how sequential the workload is, -+ * and to how long the observation time interval is. -+ * -+ * The weight runs from 0 to 8. The maximum value of the -+ * weight, 8, yields the minimum value for the smoothing -+ * constant. At this minimum value for the smoothing constant, -+ * the measured rate contributes for half of the next value of -+ * the estimated peak rate. -+ * -+ * So, the first step is to compute the weight as a function -+ * of how sequential the workload is. Note that the weight -+ * cannot reach 9, because bfqd->sequential_samples cannot -+ * become equal to bfqd->peak_rate_samples, which, in its -+ * turn, holds true because bfqd->sequential_samples is not -+ * incremented for the first sample. -+ */ -+ weight = (9 * bfqd->sequential_samples) / bfqd->peak_rate_samples; -+ -+ /* -+ * Second step: further refine the weight as a function of the -+ * duration of the observation interval. -+ */ -+ weight = min_t(u32, 8, -+ div_u64(weight * bfqd->delta_from_first, -+ BFQ_RATE_REF_INTERVAL)); -+ -+ /* -+ * Divisor ranging from 10, for minimum weight, to 2, for -+ * maximum weight. -+ */ -+ divisor = 10 - weight; -+ BUG_ON(divisor == 0); -+ -+ /* -+ * Finally, update peak rate: -+ * -+ * peak_rate = peak_rate * (divisor-1) / divisor + rate / divisor -+ */ -+ bfqd->peak_rate *= divisor-1; -+ bfqd->peak_rate /= divisor; -+ rate /= divisor; /* smoothing constant alpha = 1/divisor */ -+ -+ bfq_log(bfqd, -+ "divisor %d tmp_peak_rate %llu tmp_rate %u", -+ divisor, -+ ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT), -+ (u32)((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT)); -+ -+ BUG_ON(bfqd->peak_rate == 0); -+ BUG_ON(bfqd->peak_rate > 20<peak_rate += rate; -+ -+ /* -+ * For a very slow device, bfqd->peak_rate can reach 0 (see -+ * the minimum representable values reported in the comments -+ * on BFQ_RATE_SHIFT). Push to 1 if this happens, to avoid -+ * divisions by zero where bfqd->peak_rate is used as a -+ * divisor. -+ */ -+ bfqd->peak_rate = max_t(u32, 1, bfqd->peak_rate); -+ -+ update_thr_responsiveness_params(bfqd); -+ BUG_ON(bfqd->peak_rate > 20<peak_rate_samples == 0) { /* first dispatch */ -+ bfq_log(bfqd, -+ "goto reset, samples %d", -+ bfqd->peak_rate_samples) ; -+ bfq_reset_rate_computation(bfqd, rq); -+ goto update_last_values; /* will add one sample */ -+ } -+ -+ /* -+ * Device idle for very long: the observation interval lasting -+ * up to this dispatch cannot be a valid observation interval -+ * for computing a new peak rate (similarly to the late- -+ * completion event in bfq_completed_request()). Go to -+ * update_rate_and_reset to have the following three steps -+ * taken: -+ * - close the observation interval at the last (previous) -+ * request dispatch or completion -+ * - compute rate, if possible, for that observation interval -+ * - start a new observation interval with this dispatch -+ */ -+ if (now_ns - bfqd->last_dispatch > 100*NSEC_PER_MSEC && -+ bfqd->rq_in_driver == 0) { -+ bfq_log(bfqd, -+"jumping to updating&resetting delta_last %lluus samples %d", -+ (now_ns - bfqd->last_dispatch)>>10, -+ bfqd->peak_rate_samples) ; -+ goto update_rate_and_reset; -+ } -+ -+ /* Update sampling information */ -+ bfqd->peak_rate_samples++; -+ -+ if ((bfqd->rq_in_driver > 0 || -+ now_ns - bfqd->last_completion < BFQ_MIN_TT) -+ && !BFQ_RQ_SEEKY(bfqd, bfqd->last_position, rq)) -+ bfqd->sequential_samples++; -+ -+ bfqd->tot_sectors_dispatched += blk_rq_sectors(rq); -+ -+ /* Reset max observed rq size every 32 dispatches */ -+ if (likely(bfqd->peak_rate_samples % 32)) -+ bfqd->last_rq_max_size = -+ max_t(u32, blk_rq_sectors(rq), bfqd->last_rq_max_size); -+ else -+ bfqd->last_rq_max_size = blk_rq_sectors(rq); -+ -+ bfqd->delta_from_first = now_ns - bfqd->first_dispatch; -+ -+ bfq_log(bfqd, -+ "added samples %u/%u tot_sects %llu delta_first %lluus", -+ bfqd->peak_rate_samples, bfqd->sequential_samples, -+ bfqd->tot_sectors_dispatched, -+ bfqd->delta_from_first>>10); -+ -+ /* Target observation interval not yet reached, go on sampling */ -+ if (bfqd->delta_from_first < BFQ_RATE_REF_INTERVAL) -+ goto update_last_values; -+ -+update_rate_and_reset: -+ bfq_update_rate_reset(bfqd, rq); -+update_last_values: -+ bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq); -+ if (RQ_BFQQ(rq) == bfqd->in_service_queue) -+ bfqd->in_serv_last_pos = bfqd->last_position; -+ bfqd->last_dispatch = now_ns; -+ -+ bfq_log(bfqd, -+ "delta_first %lluus last_pos %llu peak_rate %llu", -+ (now_ns - bfqd->first_dispatch)>>10, -+ (unsigned long long) bfqd->last_position, -+ ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT)); -+ bfq_log(bfqd, -+ "samples at end %d", bfqd->peak_rate_samples); -+} -+ -+/* -+ * Remove request from internal lists. -+ */ -+static void bfq_dispatch_remove(struct request_queue *q, struct request *rq) -+{ -+ struct bfq_queue *bfqq = RQ_BFQQ(rq); -+ -+ /* -+ * For consistency, the next instruction should have been -+ * executed after removing the request from the queue and -+ * dispatching it. We execute instead this instruction before -+ * bfq_remove_request() (and hence introduce a temporary -+ * inconsistency), for efficiency. In fact, should this -+ * dispatch occur for a non in-service bfqq, this anticipated -+ * increment prevents two counters related to bfqq->dispatched -+ * from risking to be, first, uselessly decremented, and then -+ * incremented again when the (new) value of bfqq->dispatched -+ * happens to be taken into account. -+ */ -+ bfqq->dispatched++; -+ bfq_update_peak_rate(q->elevator->elevator_data, rq); -+ -+ bfq_remove_request(q, rq); -+} -+ -+static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq) -+{ -+ BUG_ON(bfqq != bfqd->in_service_queue); -+ -+ /* -+ * If this bfqq is shared between multiple processes, check -+ * to make sure that those processes are still issuing I/Os -+ * within the mean seek distance. If not, it may be time to -+ * break the queues apart again. -+ */ -+ if (bfq_bfqq_coop(bfqq) && BFQQ_SEEKY(bfqq)) -+ bfq_mark_bfqq_split_coop(bfqq); -+ -+ if (RB_EMPTY_ROOT(&bfqq->sort_list)) { -+ if (bfqq->dispatched == 0) -+ /* -+ * Overloading budget_timeout field to store -+ * the time at which the queue remains with no -+ * backlog and no outstanding request; used by -+ * the weight-raising mechanism. -+ */ -+ bfqq->budget_timeout = jiffies; -+ -+ bfq_del_bfqq_busy(bfqd, bfqq, true); -+ } else { -+ bfq_requeue_bfqq(bfqd, bfqq, true); -+ /* -+ * Resort priority tree of potential close cooperators. -+ */ -+ bfq_pos_tree_add_move(bfqd, bfqq); -+ } -+ -+ /* -+ * All in-service entities must have been properly deactivated -+ * or requeued before executing the next function, which -+ * resets all in-service entites as no more in service. -+ */ -+ __bfq_bfqd_reset_in_service(bfqd); -+} -+ -+/** -+ * __bfq_bfqq_recalc_budget - try to adapt the budget to the @bfqq behavior. -+ * @bfqd: device data. -+ * @bfqq: queue to update. -+ * @reason: reason for expiration. -+ * -+ * Handle the feedback on @bfqq budget at queue expiration. -+ * See the body for detailed comments. -+ */ -+static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq, -+ enum bfqq_expiration reason) -+{ -+ struct request *next_rq; -+ int budget, min_budget; -+ -+ BUG_ON(bfqq != bfqd->in_service_queue); -+ -+ min_budget = bfq_min_budget(bfqd); -+ -+ if (bfqq->wr_coeff == 1) -+ budget = bfqq->max_budget; -+ else /* -+ * Use a constant, low budget for weight-raised queues, -+ * to help achieve a low latency. Keep it slightly higher -+ * than the minimum possible budget, to cause a little -+ * bit fewer expirations. -+ */ -+ budget = 2 * min_budget; -+ -+ bfq_log_bfqq(bfqd, bfqq, "last budg %d, budg left %d", -+ bfqq->entity.budget, bfq_bfqq_budget_left(bfqq)); -+ bfq_log_bfqq(bfqd, bfqq, "last max_budg %d, min budg %d", -+ budget, bfq_min_budget(bfqd)); -+ bfq_log_bfqq(bfqd, bfqq, "sync %d, seeky %d", -+ bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue)); -+ -+ if (bfq_bfqq_sync(bfqq) && bfqq->wr_coeff == 1) { -+ switch (reason) { -+ /* -+ * Caveat: in all the following cases we trade latency -+ * for throughput. -+ */ -+ case BFQ_BFQQ_TOO_IDLE: -+ /* -+ * This is the only case where we may reduce -+ * the budget: if there is no request of the -+ * process still waiting for completion, then -+ * we assume (tentatively) that the timer has -+ * expired because the batch of requests of -+ * the process could have been served with a -+ * smaller budget. Hence, betting that -+ * process will behave in the same way when it -+ * becomes backlogged again, we reduce its -+ * next budget. As long as we guess right, -+ * this budget cut reduces the latency -+ * experienced by the process. -+ * -+ * However, if there are still outstanding -+ * requests, then the process may have not yet -+ * issued its next request just because it is -+ * still waiting for the completion of some of -+ * the still outstanding ones. So in this -+ * subcase we do not reduce its budget, on the -+ * contrary we increase it to possibly boost -+ * the throughput, as discussed in the -+ * comments to the BUDGET_TIMEOUT case. -+ */ -+ if (bfqq->dispatched > 0) /* still outstanding reqs */ -+ budget = min(budget * 2, bfqd->bfq_max_budget); -+ else { -+ if (budget > 5 * min_budget) -+ budget -= 4 * min_budget; -+ else -+ budget = min_budget; -+ } -+ break; -+ case BFQ_BFQQ_BUDGET_TIMEOUT: -+ /* -+ * We double the budget here because it gives -+ * the chance to boost the throughput if this -+ * is not a seeky process (and has bumped into -+ * this timeout because of, e.g., ZBR). -+ */ -+ budget = min(budget * 2, bfqd->bfq_max_budget); -+ break; -+ case BFQ_BFQQ_BUDGET_EXHAUSTED: -+ /* -+ * The process still has backlog, and did not -+ * let either the budget timeout or the disk -+ * idling timeout expire. Hence it is not -+ * seeky, has a short thinktime and may be -+ * happy with a higher budget too. So -+ * definitely increase the budget of this good -+ * candidate to boost the disk throughput. -+ */ -+ budget = min(budget * 4, bfqd->bfq_max_budget); -+ break; -+ case BFQ_BFQQ_NO_MORE_REQUESTS: -+ /* -+ * For queues that expire for this reason, it -+ * is particularly important to keep the -+ * budget close to the actual service they -+ * need. Doing so reduces the timestamp -+ * misalignment problem described in the -+ * comments in the body of -+ * __bfq_activate_entity. In fact, suppose -+ * that a queue systematically expires for -+ * BFQ_BFQQ_NO_MORE_REQUESTS and presents a -+ * new request in time to enjoy timestamp -+ * back-shifting. The larger the budget of the -+ * queue is with respect to the service the -+ * queue actually requests in each service -+ * slot, the more times the queue can be -+ * reactivated with the same virtual finish -+ * time. It follows that, even if this finish -+ * time is pushed to the system virtual time -+ * to reduce the consequent timestamp -+ * misalignment, the queue unjustly enjoys for -+ * many re-activations a lower finish time -+ * than all newly activated queues. -+ * -+ * The service needed by bfqq is measured -+ * quite precisely by bfqq->entity.service. -+ * Since bfqq does not enjoy device idling, -+ * bfqq->entity.service is equal to the number -+ * of sectors that the process associated with -+ * bfqq requested to read/write before waiting -+ * for request completions, or blocking for -+ * other reasons. -+ */ -+ budget = max_t(int, bfqq->entity.service, min_budget); -+ break; -+ default: -+ return; -+ } -+ } else if (!bfq_bfqq_sync(bfqq)) -+ /* -+ * Async queues get always the maximum possible -+ * budget, as for them we do not care about latency -+ * (in addition, their ability to dispatch is limited -+ * by the charging factor). -+ */ -+ budget = bfqd->bfq_max_budget; -+ -+ bfqq->max_budget = budget; -+ -+ if (bfqd->budgets_assigned >= bfq_stats_min_budgets && -+ !bfqd->bfq_user_max_budget) -+ bfqq->max_budget = min(bfqq->max_budget, bfqd->bfq_max_budget); -+ -+ /* -+ * If there is still backlog, then assign a new budget, making -+ * sure that it is large enough for the next request. Since -+ * the finish time of bfqq must be kept in sync with the -+ * budget, be sure to call __bfq_bfqq_expire() *after* this -+ * update. -+ * -+ * If there is no backlog, then no need to update the budget; -+ * it will be updated on the arrival of a new request. -+ */ -+ next_rq = bfqq->next_rq; -+ if (next_rq) { -+ BUG_ON(reason == BFQ_BFQQ_TOO_IDLE || -+ reason == BFQ_BFQQ_NO_MORE_REQUESTS); -+ bfqq->entity.budget = max_t(unsigned long, bfqq->max_budget, -+ bfq_serv_to_charge(next_rq, bfqq)); -+ BUG_ON(!bfq_bfqq_busy(bfqq)); -+ BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list)); -+ } -+ -+ bfq_log_bfqq(bfqd, bfqq, "head sect: %u, new budget %d", -+ next_rq ? blk_rq_sectors(next_rq) : 0, -+ bfqq->entity.budget); -+} -+ -+/* -+ * Return true if the process associated with bfqq is "slow". The slow -+ * flag is used, in addition to the budget timeout, to reduce the -+ * amount of service provided to seeky processes, and thus reduce -+ * their chances to lower the throughput. More details in the comments -+ * on the function bfq_bfqq_expire(). -+ * -+ * An important observation is in order: as discussed in the comments -+ * on the function bfq_update_peak_rate(), with devices with internal -+ * queues, it is hard if ever possible to know when and for how long -+ * an I/O request is processed by the device (apart from the trivial -+ * I/O pattern where a new request is dispatched only after the -+ * previous one has been completed). This makes it hard to evaluate -+ * the real rate at which the I/O requests of each bfq_queue are -+ * served. In fact, for an I/O scheduler like BFQ, serving a -+ * bfq_queue means just dispatching its requests during its service -+ * slot (i.e., until the budget of the queue is exhausted, or the -+ * queue remains idle, or, finally, a timeout fires). But, during the -+ * service slot of a bfq_queue, around 100 ms at most, the device may -+ * be even still processing requests of bfq_queues served in previous -+ * service slots. On the opposite end, the requests of the in-service -+ * bfq_queue may be completed after the service slot of the queue -+ * finishes. -+ * -+ * Anyway, unless more sophisticated solutions are used -+ * (where possible), the sum of the sizes of the requests dispatched -+ * during the service slot of a bfq_queue is probably the only -+ * approximation available for the service received by the bfq_queue -+ * during its service slot. And this sum is the quantity used in this -+ * function to evaluate the I/O speed of a process. -+ */ -+static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq, -+ bool compensate, enum bfqq_expiration reason, -+ unsigned long *delta_ms) -+{ -+ ktime_t delta_ktime; -+ u32 delta_usecs; -+ bool slow = BFQQ_SEEKY(bfqq); /* if delta too short, use seekyness */ -+ -+ if (!bfq_bfqq_sync(bfqq)) -+ return false; -+ -+ if (compensate) -+ delta_ktime = bfqd->last_idling_start; -+ else -+ delta_ktime = ktime_get(); -+ delta_ktime = ktime_sub(delta_ktime, bfqd->last_budget_start); -+ delta_usecs = ktime_to_us(delta_ktime); -+ -+ /* don't use too short time intervals */ -+ if (delta_usecs < 1000) { -+ if (blk_queue_nonrot(bfqd->queue)) -+ /* -+ * give same worst-case guarantees as idling -+ * for seeky -+ */ -+ *delta_ms = BFQ_MIN_TT / NSEC_PER_MSEC; -+ else /* charge at least one seek */ -+ *delta_ms = bfq_slice_idle / NSEC_PER_MSEC; -+ -+ bfq_log(bfqd, "too short %u", delta_usecs); -+ -+ return slow; -+ } -+ -+ *delta_ms = delta_usecs / USEC_PER_MSEC; -+ -+ /* -+ * Use only long (> 20ms) intervals to filter out excessive -+ * spikes in service rate estimation. -+ */ -+ if (delta_usecs > 20000) { -+ /* -+ * Caveat for rotational devices: processes doing I/O -+ * in the slower disk zones tend to be slow(er) even -+ * if not seeky. In this respect, the estimated peak -+ * rate is likely to be an average over the disk -+ * surface. Accordingly, to not be too harsh with -+ * unlucky processes, a process is deemed slow only if -+ * its rate has been lower than half of the estimated -+ * peak rate. -+ */ -+ slow = bfqq->entity.service < bfqd->bfq_max_budget / 2; -+ bfq_log(bfqd, "relative rate %d/%d", -+ bfqq->entity.service, bfqd->bfq_max_budget); -+ } -+ -+ bfq_log_bfqq(bfqd, bfqq, "slow %d", slow); -+ -+ return slow; -+} -+ -+/* -+ * To be deemed as soft real-time, an application must meet two -+ * requirements. First, the application must not require an average -+ * bandwidth higher than the approximate bandwidth required to playback or -+ * record a compressed high-definition video. -+ * The next function is invoked on the completion of the last request of a -+ * batch, to compute the next-start time instant, soft_rt_next_start, such -+ * that, if the next request of the application does not arrive before -+ * soft_rt_next_start, then the above requirement on the bandwidth is met. -+ * -+ * The second requirement is that the request pattern of the application is -+ * isochronous, i.e., that, after issuing a request or a batch of requests, -+ * the application stops issuing new requests until all its pending requests -+ * have been completed. After that, the application may issue a new batch, -+ * and so on. -+ * For this reason the next function is invoked to compute -+ * soft_rt_next_start only for applications that meet this requirement, -+ * whereas soft_rt_next_start is set to infinity for applications that do -+ * not. -+ * -+ * Unfortunately, even a greedy (i.e., I/O-bound) application may -+ * happen to meet, occasionally or systematically, both the above -+ * bandwidth and isochrony requirements. This may happen at least in -+ * the following circumstances. First, if the CPU load is high. The -+ * application may stop issuing requests while the CPUs are busy -+ * serving other processes, then restart, then stop again for a while, -+ * and so on. The other circumstances are related to the storage -+ * device: the storage device is highly loaded or reaches a low-enough -+ * throughput with the I/O of the application (e.g., because the I/O -+ * is random and/or the device is slow). In all these cases, the -+ * I/O of the application may be simply slowed down enough to meet -+ * the bandwidth and isochrony requirements. To reduce the probability -+ * that greedy applications are deemed as soft real-time in these -+ * corner cases, a further rule is used in the computation of -+ * soft_rt_next_start: the return value of this function is forced to -+ * be higher than the maximum between the following two quantities. -+ * -+ * (a) Current time plus: (1) the maximum time for which the arrival -+ * of a request is waited for when a sync queue becomes idle, -+ * namely bfqd->bfq_slice_idle, and (2) a few extra jiffies. We -+ * postpone for a moment the reason for adding a few extra -+ * jiffies; we get back to it after next item (b). Lower-bounding -+ * the return value of this function with the current time plus -+ * bfqd->bfq_slice_idle tends to filter out greedy applications, -+ * because the latter issue their next request as soon as possible -+ * after the last one has been completed. In contrast, a soft -+ * real-time application spends some time processing data, after a -+ * batch of its requests has been completed. -+ * -+ * (b) Current value of bfqq->soft_rt_next_start. As pointed out -+ * above, greedy applications may happen to meet both the -+ * bandwidth and isochrony requirements under heavy CPU or -+ * storage-device load. In more detail, in these scenarios, these -+ * applications happen, only for limited time periods, to do I/O -+ * slowly enough to meet all the requirements described so far, -+ * including the filtering in above item (a). These slow-speed -+ * time intervals are usually interspersed between other time -+ * intervals during which these applications do I/O at a very high -+ * speed. Fortunately, exactly because of the high speed of the -+ * I/O in the high-speed intervals, the values returned by this -+ * function happen to be so high, near the end of any such -+ * high-speed interval, to be likely to fall *after* the end of -+ * the low-speed time interval that follows. These high values are -+ * stored in bfqq->soft_rt_next_start after each invocation of -+ * this function. As a consequence, if the last value of -+ * bfqq->soft_rt_next_start is constantly used to lower-bound the -+ * next value that this function may return, then, from the very -+ * beginning of a low-speed interval, bfqq->soft_rt_next_start is -+ * likely to be constantly kept so high that any I/O request -+ * issued during the low-speed interval is considered as arriving -+ * to soon for the application to be deemed as soft -+ * real-time. Then, in the high-speed interval that follows, the -+ * application will not be deemed as soft real-time, just because -+ * it will do I/O at a high speed. And so on. -+ * -+ * Getting back to the filtering in item (a), in the following two -+ * cases this filtering might be easily passed by a greedy -+ * application, if the reference quantity was just -+ * bfqd->bfq_slice_idle: -+ * 1) HZ is so low that the duration of a jiffy is comparable to or -+ * higher than bfqd->bfq_slice_idle. This happens, e.g., on slow -+ * devices with HZ=100. The time granularity may be so coarse -+ * that the approximation, in jiffies, of bfqd->bfq_slice_idle -+ * is rather lower than the exact value. -+ * 2) jiffies, instead of increasing at a constant rate, may stop increasing -+ * for a while, then suddenly 'jump' by several units to recover the lost -+ * increments. This seems to happen, e.g., inside virtual machines. -+ * To address this issue, in the filtering in (a) we do not use as a -+ * reference time interval just bfqd->bfq_slice_idle, but -+ * bfqd->bfq_slice_idle plus a few jiffies. In particular, we add the -+ * minimum number of jiffies for which the filter seems to be quite -+ * precise also in embedded systems and KVM/QEMU virtual machines. -+ */ -+static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq) -+{ -+ bfq_log_bfqq(bfqd, bfqq, -+"service_blkg %lu soft_rate %u sects/sec interval %u", -+ bfqq->service_from_backlogged, -+ bfqd->bfq_wr_max_softrt_rate, -+ jiffies_to_msecs(HZ * bfqq->service_from_backlogged / -+ bfqd->bfq_wr_max_softrt_rate)); -+ -+ return max3(bfqq->soft_rt_next_start, -+ bfqq->last_idle_bklogged + -+ HZ * bfqq->service_from_backlogged / -+ bfqd->bfq_wr_max_softrt_rate, -+ jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4); -+} -+ -+static bool bfq_bfqq_injectable(struct bfq_queue *bfqq) -+{ -+ return BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 && -+ blk_queue_nonrot(bfqq->bfqd->queue) && -+ bfqq->bfqd->hw_tag; -+} -+ -+/** -+ * bfq_bfqq_expire - expire a queue. -+ * @bfqd: device owning the queue. -+ * @bfqq: the queue to expire. -+ * @compensate: if true, compensate for the time spent idling. -+ * @reason: the reason causing the expiration. -+ * -+ * If the process associated with bfqq does slow I/O (e.g., because it -+ * issues random requests), we charge bfqq with the time it has been -+ * in service instead of the service it has received (see -+ * bfq_bfqq_charge_time for details on how this goal is achieved). As -+ * a consequence, bfqq will typically get higher timestamps upon -+ * reactivation, and hence it will be rescheduled as if it had -+ * received more service than what it has actually received. In the -+ * end, bfqq receives less service in proportion to how slowly its -+ * associated process consumes its budgets (and hence how seriously it -+ * tends to lower the throughput). In addition, this time-charging -+ * strategy guarantees time fairness among slow processes. In -+ * contrast, if the process associated with bfqq is not slow, we -+ * charge bfqq exactly with the service it has received. -+ * -+ * Charging time to the first type of queues and the exact service to -+ * the other has the effect of using the WF2Q+ policy to schedule the -+ * former on a timeslice basis, without violating service domain -+ * guarantees among the latter. -+ */ -+static void bfq_bfqq_expire(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq, -+ bool compensate, -+ enum bfqq_expiration reason) -+{ -+ bool slow; -+ unsigned long delta = 0; -+ struct bfq_entity *entity = &bfqq->entity; -+ int ref; -+ -+ BUG_ON(bfqq != bfqd->in_service_queue); -+ -+ /* -+ * Check whether the process is slow (see bfq_bfqq_is_slow). -+ */ -+ slow = bfq_bfqq_is_slow(bfqd, bfqq, compensate, reason, &delta); -+ -+ /* -+ * As above explained, charge slow (typically seeky) and -+ * timed-out queues with the time and not the service -+ * received, to favor sequential workloads. -+ * -+ * Processes doing I/O in the slower disk zones will tend to -+ * be slow(er) even if not seeky. Therefore, since the -+ * estimated peak rate is actually an average over the disk -+ * surface, these processes may timeout just for bad luck. To -+ * avoid punishing them, do not charge time to processes that -+ * succeeded in consuming at least 2/3 of their budget. This -+ * allows BFQ to preserve enough elasticity to still perform -+ * bandwidth, and not time, distribution with little unlucky -+ * or quasi-sequential processes. -+ */ -+ if (bfqq->wr_coeff == 1 && -+ (slow || -+ (reason == BFQ_BFQQ_BUDGET_TIMEOUT && -+ bfq_bfqq_budget_left(bfqq) >= entity->budget / 3))) -+ bfq_bfqq_charge_time(bfqd, bfqq, delta); -+ -+ BUG_ON(bfqq->entity.budget < bfqq->entity.service); -+ -+ if (reason == BFQ_BFQQ_TOO_IDLE && -+ entity->service <= 2 * entity->budget / 10) -+ bfq_clear_bfqq_IO_bound(bfqq); -+ -+ if (bfqd->low_latency && bfqq->wr_coeff == 1) -+ bfqq->last_wr_start_finish = jiffies; -+ -+ if (bfqd->low_latency && bfqd->bfq_wr_max_softrt_rate > 0 && -+ RB_EMPTY_ROOT(&bfqq->sort_list)) { -+ /* -+ * If we get here, and there are no outstanding -+ * requests, then the request pattern is isochronous -+ * (see the comments on the function -+ * bfq_bfqq_softrt_next_start()). Thus we can compute -+ * soft_rt_next_start. And we do it, unless bfqq is in -+ * interactive weight raising. We do not do it in the -+ * latter subcase, for the following reason. bfqq may -+ * be conveying the I/O needed to load a soft -+ * real-time application. Such an application will -+ * actually exhibit a soft real-time I/O pattern after -+ * it finally starts doing its job. But, if -+ * soft_rt_next_start is computed here for an -+ * interactive bfqq, and bfqq had received a lot of -+ * service before remaining with no outstanding -+ * request (likely to happen on a fast device), then -+ * soft_rt_next_start would be assigned such a high -+ * value that, for a very long time, bfqq would be -+ * prevented from being possibly considered as soft -+ * real time. -+ * -+ * If, instead, the queue still has outstanding -+ * requests, then we have to wait for the completion -+ * of all the outstanding requests to discover whether -+ * the request pattern is actually isochronous. -+ */ -+ BUG_ON(bfq_tot_busy_queues(bfqd) < 1); -+ if (bfqq->dispatched == 0 && -+ bfqq->wr_coeff != bfqd->bfq_wr_coeff) { -+ bfqq->soft_rt_next_start = -+ bfq_bfqq_softrt_next_start(bfqd, bfqq); -+ bfq_log_bfqq(bfqd, bfqq, "new soft_rt_next %lu", -+ bfqq->soft_rt_next_start); -+ } else if (bfqq->dispatched > 0) { -+ /* -+ * Schedule an update of soft_rt_next_start to when -+ * the task may be discovered to be isochronous. -+ */ -+ bfq_mark_bfqq_softrt_update(bfqq); -+ } -+ } -+ -+ bfq_log_bfqq(bfqd, bfqq, -+ "expire (%s, slow %d, num_disp %d, short %d, weight %d, serv %d/%d)", -+ reason_name[reason], slow, bfqq->dispatched, -+ bfq_bfqq_has_short_ttime(bfqq), entity->weight, -+ entity->service, entity->budget); -+ -+ /* -+ * Increase, decrease or leave budget unchanged according to -+ * reason. -+ */ -+ BUG_ON(bfqq->entity.budget < bfqq->entity.service); -+ __bfq_bfqq_recalc_budget(bfqd, bfqq, reason); -+ BUG_ON(bfqq->next_rq == NULL && -+ bfqq->entity.budget < bfqq->entity.service); -+ ref = bfqq->ref; -+ __bfq_bfqq_expire(bfqd, bfqq); -+ -+ if (ref == 1) /* bfqq is gone, no more actions on it */ -+ return; -+ -+ BUG_ON(ref > 1 && -+ !bfq_bfqq_busy(bfqq) && reason == BFQ_BFQQ_BUDGET_EXHAUSTED && -+ !bfq_class_idle(bfqq)); -+ -+ bfqq->injected_service = 0; -+ -+ /* mark bfqq as waiting a request only if a bic still points to it */ -+ if (!bfq_bfqq_busy(bfqq) && -+ reason != BFQ_BFQQ_BUDGET_TIMEOUT && -+ reason != BFQ_BFQQ_BUDGET_EXHAUSTED) { -+ BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list)); -+ BUG_ON(bfqq->next_rq); -+ bfq_mark_bfqq_non_blocking_wait_rq(bfqq); -+ /* -+ * Not setting service to 0, because, if the next rq -+ * arrives in time, the queue will go on receiving -+ * service with this same budget (as if it never expired) -+ */ -+ } else { -+ entity->service = 0; -+ bfq_log_bfqq(bfqd, bfqq, "resetting service"); -+ } -+ -+ /* -+ * Reset the received-service counter for every parent entity. -+ * Differently from what happens with bfqq->entity.service, -+ * the resetting of this counter never needs to be postponed -+ * for parent entities. In fact, in case bfqq may have a -+ * chance to go on being served using the last, partially -+ * consumed budget, bfqq->entity.service needs to be kept, -+ * because if bfqq then actually goes on being served using -+ * the same budget, the last value of bfqq->entity.service is -+ * needed to properly decrement bfqq->entity.budget by the -+ * portion already consumed. In contrast, it is not necessary -+ * to keep entity->service for parent entities too, because -+ * the bubble up of the new value of bfqq->entity.budget will -+ * make sure that the budgets of parent entities are correct, -+ * even in case bfqq and thus parent entities go on receiving -+ * service with the same budget. -+ */ -+ entity = entity->parent; -+ for_each_entity(entity) -+ entity->service = 0; -+} -+ -+/* -+ * Budget timeout is not implemented through a dedicated timer, but -+ * just checked on request arrivals and completions, as well as on -+ * idle timer expirations. -+ */ -+static bool bfq_bfqq_budget_timeout(struct bfq_queue *bfqq) -+{ -+ return time_is_before_eq_jiffies(bfqq->budget_timeout); -+} -+ -+/* -+ * If we expire a queue that is actively waiting (i.e., with the -+ * device idled) for the arrival of a new request, then we may incur -+ * the timestamp misalignment problem described in the body of the -+ * function __bfq_activate_entity. Hence we return true only if this -+ * condition does not hold, or if the queue is slow enough to deserve -+ * only to be kicked off for preserving a high throughput. -+ */ -+static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq) -+{ -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "wait_request %d left %d timeout %d", -+ bfq_bfqq_wait_request(bfqq), -+ bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3, -+ bfq_bfqq_budget_timeout(bfqq)); -+ -+ return (!bfq_bfqq_wait_request(bfqq) || -+ bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3) -+ && -+ bfq_bfqq_budget_timeout(bfqq); -+} -+ -+static bool idling_boosts_thr_without_issues(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq) -+{ -+ bool rot_without_queueing = -+ !blk_queue_nonrot(bfqd->queue) && !bfqd->hw_tag, -+ bfqq_sequential_and_IO_bound, -+ idling_boosts_thr; -+ -+ bfqq_sequential_and_IO_bound = !BFQQ_SEEKY(bfqq) && -+ bfq_bfqq_IO_bound(bfqq) && bfq_bfqq_has_short_ttime(bfqq); -+ /* -+ * The next variable takes into account the cases where idling -+ * boosts the throughput. -+ * -+ * The value of the variable is computed considering, first, that -+ * idling is virtually always beneficial for the throughput if: -+ * (a) the device is not NCQ-capable and rotational, or -+ * (b) regardless of the presence of NCQ, the device is rotational and -+ * the request pattern for bfqq is I/O-bound and sequential, or -+ * (c) regardless of whether it is rotational, the device is -+ * not NCQ-capable and the request pattern for bfqq is -+ * I/O-bound and sequential. -+ * -+ * Secondly, and in contrast to the above item (b), idling an -+ * NCQ-capable flash-based device would not boost the -+ * throughput even with sequential I/O; rather it would lower -+ * the throughput in proportion to how fast the device -+ * is. Accordingly, the next variable is true if any of the -+ * above conditions (a), (b) or (c) is true, and, in -+ * particular, happens to be false if bfqd is an NCQ-capable -+ * flash-based device. -+ */ -+ idling_boosts_thr = rot_without_queueing || -+ ((!blk_queue_nonrot(bfqd->queue) || !bfqd->hw_tag) && -+ bfqq_sequential_and_IO_bound); -+ -+ bfq_log_bfqq(bfqd, bfqq, "idling_boosts_thr %d", idling_boosts_thr); -+ -+ /* -+ * The return value of this function is equal to that of -+ * idling_boosts_thr, unless a special case holds. In this -+ * special case, described below, idling may cause problems to -+ * weight-raised queues. -+ * -+ * When the request pool is saturated (e.g., in the presence -+ * of write hogs), if the processes associated with -+ * non-weight-raised queues ask for requests at a lower rate, -+ * then processes associated with weight-raised queues have a -+ * higher probability to get a request from the pool -+ * immediately (or at least soon) when they need one. Thus -+ * they have a higher probability to actually get a fraction -+ * of the device throughput proportional to their high -+ * weight. This is especially true with NCQ-capable drives, -+ * which enqueue several requests in advance, and further -+ * reorder internally-queued requests. -+ * -+ * For this reason, we force to false the return value if -+ * there are weight-raised busy queues. In this case, and if -+ * bfqq is not weight-raised, this guarantees that the device -+ * is not idled for bfqq (if, instead, bfqq is weight-raised, -+ * then idling will be guaranteed by another variable, see -+ * below). Combined with the timestamping rules of BFQ (see -+ * [1] for details), this behavior causes bfqq, and hence any -+ * sync non-weight-raised queue, to get a lower number of -+ * requests served, and thus to ask for a lower number of -+ * requests from the request pool, before the busy -+ * weight-raised queues get served again. This often mitigates -+ * starvation problems in the presence of heavy write -+ * workloads and NCQ, thereby guaranteeing a higher -+ * application and system responsiveness in these hostile -+ * scenarios. -+ */ -+ return idling_boosts_thr && -+ bfqd->wr_busy_queues == 0; -+} -+ -+/* -+ * There is a case where idling must be performed not for -+ * throughput concerns, but to preserve service guarantees. -+ * -+ * To introduce this case, we can note that allowing the drive -+ * to enqueue more than one request at a time, and hence -+ * delegating de facto final scheduling decisions to the -+ * drive's internal scheduler, entails loss of control on the -+ * actual request service order. In particular, the critical -+ * situation is when requests from different processes happen -+ * to be present, at the same time, in the internal queue(s) -+ * of the drive. In such a situation, the drive, by deciding -+ * the service order of the internally-queued requests, does -+ * determine also the actual throughput distribution among -+ * these processes. But the drive typically has no notion or -+ * concern about per-process throughput distribution, and -+ * makes its decisions only on a per-request basis. Therefore, -+ * the service distribution enforced by the drive's internal -+ * scheduler is likely to coincide with the desired -+ * device-throughput distribution only in a completely -+ * symmetric scenario where: -+ * (i) each of these processes must get the same throughput as -+ * the others; -+ * (ii) the I/O of each process has the same properties, in -+ * terms of locality (sequential or random), direction -+ * (reads or writes), request sizes, greediness -+ * (from I/O-bound to sporadic), and so on. -+ * In fact, in such a scenario, the drive tends to treat -+ * the requests of each of these processes in about the same -+ * way as the requests of the others, and thus to provide -+ * each of these processes with about the same throughput -+ * (which is exactly the desired throughput distribution). In -+ * contrast, in any asymmetric scenario, device idling is -+ * certainly needed to guarantee that bfqq receives its -+ * assigned fraction of the device throughput (see [1] for -+ * details). -+ * The problem is that idling may significantly reduce -+ * throughput with certain combinations of types of I/O and -+ * devices. An important example is sync random I/O, on flash -+ * storage with command queueing. So, unless bfqq falls in the -+ * above cases where idling also boosts throughput, it would -+ * be important to check conditions (i) and (ii) accurately, -+ * so as to avoid idling when not strictly needed for service -+ * guarantees. -+ * -+ * Unfortunately, it is extremely difficult to thoroughly -+ * check condition (ii). And, in case there are active groups, -+ * it becomes very difficult to check condition (i) too. In -+ * fact, if there are active groups, then, for condition (i) -+ * to become false, it is enough that an active group contains -+ * more active processes or sub-groups than some other active -+ * group. More precisely, for condition (i) to hold because of -+ * such a group, it is not even necessary that the group is -+ * (still) active: it is sufficient that, even if the group -+ * has become inactive, some of its descendant processes still -+ * have some request already dispatched but still waiting for -+ * completion. In fact, requests have still to be guaranteed -+ * their share of the throughput even after being -+ * dispatched. In this respect, it is easy to show that, if a -+ * group frequently becomes inactive while still having -+ * in-flight requests, and if, when this happens, the group is -+ * not considered in the calculation of whether the scenario -+ * is asymmetric, then the group may fail to be guaranteed its -+ * fair share of the throughput (basically because idling may -+ * not be performed for the descendant processes of the group, -+ * but it had to be). We address this issue with the -+ * following bi-modal behavior, implemented in the function -+ * bfq_symmetric_scenario(). -+ * -+ * If there are groups with requests waiting for completion -+ * (as commented above, some of these groups may even be -+ * already inactive), then the scenario is tagged as -+ * asymmetric, conservatively, without checking any of the -+ * conditions (i) and (ii). So the device is idled for bfqq. -+ * This behavior matches also the fact that groups are created -+ * exactly if controlling I/O is a primary concern (to -+ * preserve bandwidth and latency guarantees). -+ * -+ * On the opposite end, if there are no groups with requests -+ * waiting for completion, then only condition (i) is actually -+ * controlled, i.e., provided that condition (i) holds, idling -+ * is not performed, regardless of whether condition (ii) -+ * holds. In other words, only if condition (i) does not hold, -+ * then idling is allowed, and the device tends to be -+ * prevented from queueing many requests, possibly of several -+ * processes. Since there are no groups with requests waiting -+ * for completion, then, to control condition (i) it is enough -+ * to check just whether all the queues with requests waiting -+ * for completion also have the same weight. -+ * -+ * Not checking condition (ii) evidently exposes bfqq to the -+ * risk of getting less throughput than its fair share. -+ * However, for queues with the same weight, a further -+ * mechanism, preemption, mitigates or even eliminates this -+ * problem. And it does so without consequences on overall -+ * throughput. This mechanism and its benefits are explained -+ * in the next three paragraphs. -+ * -+ * Even if a queue, say Q, is expired when it remains idle, Q -+ * can still preempt the new in-service queue if the next -+ * request of Q arrives soon (see the comments on -+ * bfq_bfqq_update_budg_for_activation). If all queues and -+ * groups have the same weight, this form of preemption, -+ * combined with the hole-recovery heuristic described in the -+ * comments on function bfq_bfqq_update_budg_for_activation, -+ * are enough to preserve a correct bandwidth distribution in -+ * the mid term, even without idling. In fact, even if not -+ * idling allows the internal queues of the device to contain -+ * many requests, and thus to reorder requests, we can rather -+ * safely assume that the internal scheduler still preserves a -+ * minimum of mid-term fairness. -+ * -+ * More precisely, this preemption-based, idleless approach -+ * provides fairness in terms of IOPS, and not sectors per -+ * second. This can be seen with a simple example. Suppose -+ * that there are two queues with the same weight, but that -+ * the first queue receives requests of 8 sectors, while the -+ * second queue receives requests of 1024 sectors. In -+ * addition, suppose that each of the two queues contains at -+ * most one request at a time, which implies that each queue -+ * always remains idle after it is served. Finally, after -+ * remaining idle, each queue receives very quickly a new -+ * request. It follows that the two queues are served -+ * alternatively, preempting each other if needed. This -+ * implies that, although both queues have the same weight, -+ * the queue with large requests receives a service that is -+ * 1024/8 times as high as the service received by the other -+ * queue. -+ * -+ * The motivation for using preemption instead of idling (for -+ * queues with the same weight) is that, by not idling, -+ * service guarantees are preserved (completely or at least in -+ * part) without minimally sacrificing throughput. And, if -+ * there is no active group, then the primary expectation for -+ * this device is probably a high throughput. -+ * -+ * We are now left only with explaining the additional -+ * compound condition that is checked below for deciding -+ * whether the scenario is asymmetric. To explain this -+ * compound condition, we need to add that the function -+ * bfq_symmetric_scenario checks the weights of only -+ * non-weight-raised queues, for efficiency reasons (see -+ * comments on bfq_weights_tree_add()). Then the fact that -+ * bfqq is weight-raised is checked explicitly here. More -+ * precisely, the compound condition below takes into account -+ * also the fact that, even if bfqq is being weight-raised, -+ * the scenario is still symmetric if all queues with requests -+ * waiting for completion happen to be -+ * weight-raised. Actually, we should be even more precise -+ * here, and differentiate between interactive weight raising -+ * and soft real-time weight raising. -+ * -+ * As a side note, it is worth considering that the above -+ * device-idling countermeasures may however fail in the -+ * following unlucky scenario: if idling is (correctly) -+ * disabled in a time period during which all symmetry -+ * sub-conditions hold, and hence the device is allowed to -+ * enqueue many requests, but at some later point in time some -+ * sub-condition stops to hold, then it may become impossible -+ * to let requests be served in the desired order until all -+ * the requests already queued in the device have been served. -+ */ -+static bool idling_needed_for_service_guarantees(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq) -+{ -+ bool asymmetric_scenario = (bfqq->wr_coeff > 1 && -+ bfqd->wr_busy_queues < -+ bfq_tot_busy_queues(bfqd)) || -+ !bfq_symmetric_scenario(bfqd); -+ -+ bfq_log_bfqq(bfqd, bfqq, -+ "wr_coeff %d wr_busy %d busy %d asymmetric %d", -+ bfqq->wr_coeff, -+ bfqd->wr_busy_queues, -+ bfq_tot_busy_queues(bfqd), -+ asymmetric_scenario); -+ -+ return asymmetric_scenario; -+} -+ -+/* -+ * For a queue that becomes empty, device idling is allowed only if -+ * this function returns true for that queue. As a consequence, since -+ * device idling plays a critical role for both throughput boosting -+ * and service guarantees, the return value of this function plays a -+ * critical role as well. -+ * -+ * In a nutshell, this function returns true only if idling is -+ * beneficial for throughput or, even if detrimental for throughput, -+ * idling is however necessary to preserve service guarantees (low -+ * latency, desired throughput distribution, ...). In particular, on -+ * NCQ-capable devices, this function tries to return false, so as to -+ * help keep the drives' internal queues full, whenever this helps the -+ * device boost the throughput without causing any service-guarantee -+ * issue. -+ * -+ * Most of the issues taken into account to get the return value of -+ * this function are not trivial. We discuss these issues in the two -+ * functions providing the main pieces of information needed by this -+ * function. -+ */ -+static bool bfq_better_to_idle(struct bfq_queue *bfqq) -+{ -+ struct bfq_data *bfqd = bfqq->bfqd; -+ bool idling_boosts_thr_with_no_issue, idling_needed_for_service_guar; -+ -+ if (unlikely(bfqd->strict_guarantees)) -+ return true; -+ -+ /* -+ * Idling is performed only if slice_idle > 0. In addition, we -+ * do not idle if -+ * (a) bfqq is async -+ * (b) bfqq is in the idle io prio class: in this case we do -+ * not idle because we want to minimize the bandwidth that -+ * queues in this class can steal to higher-priority queues -+ */ -+ if (bfqd->bfq_slice_idle == 0 || !bfq_bfqq_sync(bfqq) || -+ bfq_class_idle(bfqq)) -+ return false; -+ -+ idling_boosts_thr_with_no_issue = -+ idling_boosts_thr_without_issues(bfqd, bfqq); -+ -+ idling_needed_for_service_guar = -+ idling_needed_for_service_guarantees(bfqd, bfqq); -+ -+ /* -+ * We have now the two components we need to compute the -+ * return value of the function, which is true only if idling -+ * either boosts the throughput (without issues), or is -+ * necessary to preserve service guarantees. -+ */ -+ bfq_log_bfqq(bfqd, bfqq, -+ "wr_busy %d boosts %d IO-bound %d guar %d", -+ bfqd->wr_busy_queues, -+ idling_boosts_thr_with_no_issue, -+ bfq_bfqq_IO_bound(bfqq), -+ idling_needed_for_service_guar); -+ -+ return idling_boosts_thr_with_no_issue || -+ idling_needed_for_service_guar; -+} -+ -+/* -+ * If the in-service queue is empty but the function bfq_better_to_idle -+ * returns true, then: -+ * 1) the queue must remain in service and cannot be expired, and -+ * 2) the device must be idled to wait for the possible arrival of a new -+ * request for the queue. -+ * See the comments on the function bfq_better_to_idle for the reasons -+ * why performing device idling is the best choice to boost the throughput -+ * and preserve service guarantees when bfq_better_to_idle itself -+ * returns true. -+ */ -+static bool bfq_bfqq_must_idle(struct bfq_queue *bfqq) -+{ -+ return RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_better_to_idle(bfqq); -+} -+ -+static struct bfq_queue *bfq_choose_bfqq_for_injection(struct bfq_data *bfqd) -+{ -+ struct bfq_queue *bfqq; -+ -+ /* -+ * A linear search; but, with a high probability, very few -+ * steps are needed to find a candidate queue, i.e., a queue -+ * with enough budget left for its next request. In fact: -+ * - BFQ dynamically updates the budget of every queue so as -+ * to accomodate the expected backlog of the queue; -+ * - if a queue gets all its requests dispatched as injected -+ * service, then the queue is removed from the active list -+ * (and re-added only if it gets new requests, but with -+ * enough budget for its new backlog). -+ */ -+ list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list) -+ if (!RB_EMPTY_ROOT(&bfqq->sort_list) && -+ bfq_serv_to_charge(bfqq->next_rq, bfqq) <= -+ bfq_bfqq_budget_left(bfqq)) { -+ bfq_log_bfqq(bfqd, bfqq, "returned this queue"); -+ return bfqq; -+ } -+ -+ bfq_log(bfqd, "no queue found"); -+ return NULL; -+} -+ -+/* -+ * Select a queue for service. If we have a current queue in service, -+ * check whether to continue servicing it, or retrieve and set a new one. -+ */ -+static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd) -+{ -+ struct bfq_queue *bfqq; -+ struct request *next_rq; -+ enum bfqq_expiration reason = BFQ_BFQQ_BUDGET_TIMEOUT; -+ -+ bfqq = bfqd->in_service_queue; -+ if (!bfqq) -+ goto new_queue; -+ -+ bfq_log_bfqq(bfqd, bfqq, "already in-service queue"); -+ -+ /* -+ * Do not expire bfqq for budget timeout if bfqq may be about -+ * to enjoy device idling. The reason why, in this case, we -+ * prevent bfqq from expiring is the same as in the comments -+ * on the case where bfq_bfqq_must_idle() returns true, in -+ * bfq_completed_request(). -+ */ -+ if (bfq_may_expire_for_budg_timeout(bfqq) && -+ !bfq_bfqq_must_idle(bfqq)) -+ goto expire; -+ -+check_queue: -+ /* -+ * This loop is rarely executed more than once. Even when it -+ * happens, it is much more convenient to re-execute this loop -+ * than to return NULL and trigger a new dispatch to get a -+ * request served. -+ */ -+ next_rq = bfqq->next_rq; -+ /* -+ * If bfqq has requests queued and it has enough budget left to -+ * serve them, keep the queue, otherwise expire it. -+ */ -+ if (next_rq) { -+ BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list)); -+ -+ if (bfq_serv_to_charge(next_rq, bfqq) > -+ bfq_bfqq_budget_left(bfqq)) { -+ /* -+ * Expire the queue for budget exhaustion, -+ * which makes sure that the next budget is -+ * enough to serve the next request, even if -+ * it comes from the fifo expired path. -+ */ -+ reason = BFQ_BFQQ_BUDGET_EXHAUSTED; -+ goto expire; -+ } else { -+ /* -+ * The idle timer may be pending because we may -+ * not disable disk idling even when a new request -+ * arrives. -+ */ -+ if (bfq_bfqq_wait_request(bfqq)) { -+ /* -+ * If we get here: 1) at least a new request -+ * has arrived but we have not disabled the -+ * timer because the request was too small, -+ * 2) then the block layer has unplugged -+ * the device, causing the dispatch to be -+ * invoked. -+ * -+ * Since the device is unplugged, now the -+ * requests are probably large enough to -+ * provide a reasonable throughput. -+ * So we disable idling. -+ */ -+ bfq_clear_bfqq_wait_request(bfqq); -+ hrtimer_try_to_cancel(&bfqd->idle_slice_timer); -+ } -+ goto keep_queue; -+ } -+ } -+ -+ /* -+ * No requests pending. However, if the in-service queue is idling -+ * for a new request, or has requests waiting for a completion and -+ * may idle after their completion, then keep it anyway. -+ * -+ * Yet, to boost throughput, inject service from other queues if -+ * possible. -+ */ -+ if (bfq_bfqq_wait_request(bfqq) || -+ (bfqq->dispatched != 0 && bfq_better_to_idle(bfqq))) { -+ if (bfq_bfqq_injectable(bfqq) && -+ bfqq->injected_service * bfqq->inject_coeff < -+ bfqq->entity.service * 10) { -+ bfq_log_bfqq(bfqd, bfqq, "looking for queue for injection"); -+ bfqq = bfq_choose_bfqq_for_injection(bfqd); -+ } else { -+ if (BFQQ_SEEKY(bfqq)) -+ bfq_log_bfqq(bfqd, bfqq, -+ "injection saturated %d * %d >= %d * 10", -+ bfqq->injected_service, bfqq->inject_coeff, -+ bfqq->entity.service); -+ bfqq = NULL; -+ } -+ goto keep_queue; -+ } -+ -+ reason = BFQ_BFQQ_NO_MORE_REQUESTS; -+expire: -+ bfq_bfqq_expire(bfqd, bfqq, false, reason); -+new_queue: -+ bfqq = bfq_set_in_service_queue(bfqd); -+ if (bfqq) { -+ bfq_log_bfqq(bfqd, bfqq, "checking new queue"); -+ goto check_queue; -+ } -+keep_queue: -+ if (bfqq) -+ bfq_log_bfqq(bfqd, bfqq, "returned this queue"); -+ else -+ bfq_log(bfqd, "no queue returned"); -+ -+ return bfqq; -+} -+ -+static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq) -+{ -+ struct bfq_entity *entity = &bfqq->entity; -+ -+ if (bfqq->wr_coeff > 1) { /* queue is being weight-raised */ -+ BUG_ON(bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time && -+ time_is_after_jiffies(bfqq->last_wr_start_finish)); -+ -+ bfq_log_bfqq(bfqd, bfqq, -+ "raising period dur %u/%u msec, old coeff %u, w %d(%d)", -+ jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish), -+ jiffies_to_msecs(bfqq->wr_cur_max_time), -+ bfqq->wr_coeff, -+ bfqq->entity.weight, bfqq->entity.orig_weight); -+ -+ BUG_ON(bfqq != bfqd->in_service_queue && entity->weight != -+ entity->orig_weight * bfqq->wr_coeff); -+ if (entity->prio_changed) -+ bfq_log_bfqq(bfqd, bfqq, "WARN: pending prio change"); -+ -+ /* -+ * If the queue was activated in a burst, or too much -+ * time has elapsed from the beginning of this -+ * weight-raising period, then end weight raising. -+ */ -+ if (bfq_bfqq_in_large_burst(bfqq)) -+ bfq_bfqq_end_wr(bfqq); -+ else if (time_is_before_jiffies(bfqq->last_wr_start_finish + -+ bfqq->wr_cur_max_time)) { -+ if (bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time || -+ time_is_before_jiffies(bfqq->wr_start_at_switch_to_srt + -+ bfq_wr_duration(bfqd))) -+ bfq_bfqq_end_wr(bfqq); -+ else { -+ switch_back_to_interactive_wr(bfqq, bfqd); -+ BUG_ON(time_is_after_jiffies( -+ bfqq->last_wr_start_finish)); -+ bfqq->entity.prio_changed = 1; -+ bfq_log_bfqq(bfqd, bfqq, -+ "back to interactive wr"); -+ } -+ } -+ if (bfqq->wr_coeff > 1 && -+ bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time && -+ bfqq->service_from_wr > max_service_from_wr) { -+ /* see comments on max_service_from_wr */ -+ bfq_bfqq_end_wr(bfqq); -+ bfq_log_bfqq(bfqd, bfqq, -+ "too much service"); -+ } -+ } -+ /* -+ * To improve latency (for this or other queues), immediately -+ * update weight both if it must be raised and if it must be -+ * lowered. Since, entity may be on some active tree here, and -+ * might have a pending change of its ioprio class, invoke -+ * next function with the last parameter unset (see the -+ * comments on the function). -+ */ -+ if ((entity->weight > entity->orig_weight) != (bfqq->wr_coeff > 1)) -+ __bfq_entity_update_weight_prio(bfq_entity_service_tree(entity), -+ entity, false); -+} -+ -+/* -+ * Dispatch next request from bfqq. -+ */ -+static struct request *bfq_dispatch_rq_from_bfqq(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq) -+{ -+ struct request *rq = bfqq->next_rq; -+ unsigned long service_to_charge; -+ -+ BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list)); -+ BUG_ON(!rq); -+ service_to_charge = bfq_serv_to_charge(rq, bfqq); -+ -+ BUG_ON(service_to_charge > bfq_bfqq_budget_left(bfqq)); -+ -+ BUG_ON(bfqq->entity.budget < bfqq->entity.service); -+ -+ bfq_bfqq_served(bfqq, service_to_charge); -+ -+ BUG_ON(bfqq->entity.budget < bfqq->entity.service); -+ -+ bfq_dispatch_remove(bfqd->queue, rq); -+ -+ bfq_log_bfqq(bfqd, bfqq, -+ "dispatched %u sec req (%llu), budg left %d, new disp_nr %d", -+ blk_rq_sectors(rq), -+ (unsigned long long) blk_rq_pos(rq), -+ bfq_bfqq_budget_left(bfqq), -+ bfqq->dispatched); -+ -+ if (bfqq != bfqd->in_service_queue) { -+ if (likely(bfqd->in_service_queue)) { -+ bfqd->in_service_queue->injected_service += -+ bfq_serv_to_charge(rq, bfqq); -+ bfq_log_bfqq(bfqd, bfqd->in_service_queue, -+ "injected_service increased to %d", -+ bfqd->in_service_queue->injected_service); -+ } -+ goto return_rq; -+ } -+ -+ /* -+ * If weight raising has to terminate for bfqq, then next -+ * function causes an immediate update of bfqq's weight, -+ * without waiting for next activation. As a consequence, on -+ * expiration, bfqq will be timestamped as if has never been -+ * weight-raised during this service slot, even if it has -+ * received part or even most of the service as a -+ * weight-raised queue. This inflates bfqq's timestamps, which -+ * is beneficial, as bfqq is then more willing to leave the -+ * device immediately to possible other weight-raised queues. -+ */ -+ bfq_update_wr_data(bfqd, bfqq); -+ -+ /* -+ * Expire bfqq, pretending that its budget expired, if bfqq -+ * belongs to CLASS_IDLE and other queues are waiting for -+ * service. -+ */ -+ if (!(bfq_tot_busy_queues(bfqd) > 1 && bfq_class_idle(bfqq))) -+ goto return_rq; -+ -+ bfq_bfqq_expire(bfqd, bfqq, false, BFQ_BFQQ_BUDGET_EXHAUSTED); -+ -+return_rq: -+ return rq; -+} -+ -+static bool bfq_has_work(struct blk_mq_hw_ctx *hctx) -+{ -+ struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; -+ -+ bfq_log(bfqd, "dispatch_non_empty %d busy_queues %d", -+ !list_empty_careful(&bfqd->dispatch), bfq_tot_busy_queues(bfqd) > 0); -+ -+ /* -+ * Avoiding lock: a race on bfqd->busy_queues should cause at -+ * most a call to dispatch for nothing -+ */ -+ return !list_empty_careful(&bfqd->dispatch) || -+ bfq_tot_busy_queues(bfqd) > 0; -+} -+ -+static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) -+{ -+ struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; -+ struct request *rq = NULL; -+ struct bfq_queue *bfqq = NULL; -+ -+ if (!list_empty(&bfqd->dispatch)) { -+ rq = list_first_entry(&bfqd->dispatch, struct request, -+ queuelist); -+ list_del_init(&rq->queuelist); -+ rq->rq_flags &= ~RQF_DISP_LIST; -+ -+ bfq_log(bfqd, -+ "picked %p from dispatch list", rq); -+ bfqq = RQ_BFQQ(rq); -+ -+ if (bfqq) { -+ /* -+ * Increment counters here, because this -+ * dispatch does not follow the standard -+ * dispatch flow (where counters are -+ * incremented) -+ */ -+ bfqq->dispatched++; -+ -+ /* -+ * TESTING: reset DISP_LIST flag, because: 1) -+ * this rq this request has passed through -+ * bfq_prepare_request, 2) then it will have -+ * bfq_finish_requeue_request invoked on it, and 3) in -+ * bfq_finish_requeue_request we use this flag to check -+ * that bfq_finish_requeue_request is not invoked on -+ * requests for which bfq_prepare_request has -+ * been invoked. -+ */ -+ rq->rq_flags &= ~RQF_DISP_LIST; -+ goto inc_in_driver_start_rq; -+ } -+ -+ /* -+ * We exploit the bfq_finish_requeue_request hook to decrement -+ * rq_in_driver, but bfq_finish_requeue_request will not be -+ * invoked on this request. So, to avoid unbalance, -+ * just start this request, without incrementing -+ * rq_in_driver. As a negative consequence, -+ * rq_in_driver is deceptively lower than it should be -+ * while this request is in service. This may cause -+ * bfq_schedule_dispatch to be invoked uselessly. -+ * -+ * As for implementing an exact solution, the -+ * bfq_finish_requeue_request hook, if defined, is probably -+ * invoked also on this request. So, by exploiting -+ * this hook, we could 1) increment rq_in_driver here, -+ * and 2) decrement it in bfq_finish_requeue_request. Such a -+ * solution would let the value of the counter be -+ * always accurate, but it would entail using an extra -+ * interface function. This cost seems higher than the -+ * benefit, being the frequency of non-elevator-private -+ * requests very low. -+ */ -+ goto start_rq; -+ } -+ -+ bfq_log(bfqd, "%d busy queues", bfq_tot_busy_queues(bfqd)); -+ -+ if (bfq_tot_busy_queues(bfqd) == 0) -+ goto exit; -+ -+ /* -+ * Force device to serve one request at a time if -+ * strict_guarantees is true. Forcing this service scheme is -+ * currently the ONLY way to guarantee that the request -+ * service order enforced by the scheduler is respected by a -+ * queueing device. Otherwise the device is free even to make -+ * some unlucky request wait for as long as the device -+ * wishes. -+ * -+ * Of course, serving one request at at time may cause loss of -+ * throughput. -+ */ -+ if (bfqd->strict_guarantees && bfqd->rq_in_driver > 0) -+ goto exit; -+ -+ bfqq = bfq_select_queue(bfqd); -+ if (!bfqq) -+ goto exit; -+ -+ BUG_ON(bfqq == bfqd->in_service_queue && -+ bfqq->entity.budget < bfqq->entity.service); -+ -+ BUG_ON(bfqq == bfqd->in_service_queue && -+ bfq_bfqq_wait_request(bfqq)); -+ -+ rq = bfq_dispatch_rq_from_bfqq(bfqd, bfqq); -+ -+ BUG_ON(bfqq->entity.budget < bfqq->entity.service); -+ -+ if (rq) { -+ inc_in_driver_start_rq: -+ bfqd->rq_in_driver++; -+ start_rq: -+ rq->rq_flags |= RQF_STARTED; -+ if (bfqq) -+ bfq_log_bfqq(bfqd, bfqq, -+ "%s request %p, rq_in_driver %d", -+ bfq_bfqq_sync(bfqq) ? "sync" : "async", -+ rq, -+ bfqd->rq_in_driver); -+ else -+ bfq_log(bfqd, -+ "request %p from dispatch list, rq_in_driver %d", -+ rq, bfqd->rq_in_driver); -+ } else -+ bfq_log(bfqd, -+ "returned NULL request, rq_in_driver %d", -+ bfqd->rq_in_driver); -+ -+exit: -+ return rq; -+} -+ -+ -+#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP) -+static void bfq_update_dispatch_stats(struct request_queue *q, -+ struct request *rq, -+ struct bfq_queue *in_serv_queue, -+ bool idle_timer_disabled) -+{ -+ struct bfq_queue *bfqq = rq ? RQ_BFQQ(rq) : NULL; -+ -+ if (!idle_timer_disabled && !bfqq) -+ return; -+ -+ /* -+ * rq and bfqq are guaranteed to exist until this function -+ * ends, for the following reasons. First, rq can be -+ * dispatched to the device, and then can be completed and -+ * freed, only after this function ends. Second, rq cannot be -+ * merged (and thus freed because of a merge) any longer, -+ * because it has already started. Thus rq cannot be freed -+ * before this function ends, and, since rq has a reference to -+ * bfqq, the same guarantee holds for bfqq too. -+ * -+ * In addition, the following queue lock guarantees that -+ * bfqq_group(bfqq) exists as well. -+ */ -+ spin_lock_irq(q->queue_lock); -+ if (idle_timer_disabled) -+ /* -+ * Since the idle timer has been disabled, -+ * in_serv_queue contained some request when -+ * __bfq_dispatch_request was invoked above, which -+ * implies that rq was picked exactly from -+ * in_serv_queue. Thus in_serv_queue == bfqq, and is -+ * therefore guaranteed to exist because of the above -+ * arguments. -+ */ -+ bfqg_stats_update_idle_time(bfqq_group(in_serv_queue)); -+ if (bfqq) { -+ struct bfq_group *bfqg = bfqq_group(bfqq); -+ -+ bfqg_stats_update_avg_queue_size(bfqg); -+ bfqg_stats_set_start_empty_time(bfqg); -+ bfqg_stats_update_io_remove(bfqg, rq->cmd_flags); -+ } -+ spin_unlock_irq(q->queue_lock); -+} -+#else -+static inline void bfq_update_dispatch_stats(struct request_queue *q, -+ struct request *rq, -+ struct bfq_queue *in_serv_queue, -+ bool idle_timer_disabled) {} -+#endif -+static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) -+{ -+ struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; -+ struct request *rq; -+ struct bfq_queue *in_serv_queue; -+ bool waiting_rq, idle_timer_disabled; -+ -+ spin_lock_irq(&bfqd->lock); -+ -+ in_serv_queue = bfqd->in_service_queue; -+ waiting_rq = in_serv_queue && bfq_bfqq_wait_request(in_serv_queue); -+ -+ rq = __bfq_dispatch_request(hctx); -+ -+ idle_timer_disabled = -+ waiting_rq && !bfq_bfqq_wait_request(in_serv_queue); -+ -+ spin_unlock_irq(&bfqd->lock); -+ -+ bfq_update_dispatch_stats(hctx->queue, rq, in_serv_queue, -+ idle_timer_disabled); -+ -+ return rq; -+} -+ -+/* -+ * Task holds one reference to the queue, dropped when task exits. Each rq -+ * in-flight on this queue also holds a reference, dropped when rq is freed. -+ * -+ * Scheduler lock must be held here. Recall not to use bfqq after calling -+ * this function on it. -+ */ -+static void bfq_put_queue(struct bfq_queue *bfqq) -+{ -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ struct bfq_group *bfqg = bfqq_group(bfqq); -+#endif -+ -+ assert_spin_locked(&bfqq->bfqd->lock); -+ -+ BUG_ON(bfqq->ref <= 0); -+ -+ if (bfqq->bfqd) -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "%p %d", bfqq, bfqq->ref); -+ -+ bfqq->ref--; -+ if (bfqq->ref) -+ return; -+ -+ BUG_ON(rb_first(&bfqq->sort_list)); -+ BUG_ON(bfqq->allocated != 0); -+ BUG_ON(bfqq->entity.tree); -+ BUG_ON(bfq_bfqq_busy(bfqq)); -+ -+ if (!hlist_unhashed(&bfqq->burst_list_node)) { -+ hlist_del_init(&bfqq->burst_list_node); -+ /* -+ * Decrement also burst size after the removal, if the -+ * process associated with bfqq is exiting, and thus -+ * does not contribute to the burst any longer. This -+ * decrement helps filter out false positives of large -+ * bursts, when some short-lived process (often due to -+ * the execution of commands by some service) happens -+ * to start and exit while a complex application is -+ * starting, and thus spawning several processes that -+ * do I/O (and that *must not* be treated as a large -+ * burst, see comments on bfq_handle_burst). -+ * -+ * In particular, the decrement is performed only if: -+ * 1) bfqq is not a merged queue, because, if it is, -+ * then this free of bfqq is not triggered by the exit -+ * of the process bfqq is associated with, but exactly -+ * by the fact that bfqq has just been merged. -+ * 2) burst_size is greater than 0, to handle -+ * unbalanced decrements. Unbalanced decrements may -+ * happen in te following case: bfqq is inserted into -+ * the current burst list--without incrementing -+ * bust_size--because of a split, but the current -+ * burst list is not the burst list bfqq belonged to -+ * (see comments on the case of a split in -+ * bfq_set_request). -+ */ -+ if (bfqq->bic && bfqq->bfqd->burst_size > 0) -+ bfqq->bfqd->burst_size--; -+ } -+ -+ if (bfqq->bfqd) -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "%p freed", bfqq); -+ -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "putting blkg and bfqg %p\n", bfqg); -+ bfqg_and_blkg_put(bfqg); -+#endif -+ kmem_cache_free(bfq_pool, bfqq); -+} -+ -+static void bfq_put_cooperator(struct bfq_queue *bfqq) -+{ -+ struct bfq_queue *__bfqq, *next; -+ -+ /* -+ * If this queue was scheduled to merge with another queue, be -+ * sure to drop the reference taken on that queue (and others in -+ * the merge chain). See bfq_setup_merge and bfq_merge_bfqqs. -+ */ -+ __bfqq = bfqq->new_bfqq; -+ while (__bfqq) { -+ if (__bfqq == bfqq) -+ break; -+ next = __bfqq->new_bfqq; -+ bfq_put_queue(__bfqq); -+ __bfqq = next; -+ } -+} -+ -+static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) -+{ -+ if (bfqq == bfqd->in_service_queue) { -+ __bfq_bfqq_expire(bfqd, bfqq); -+ bfq_schedule_dispatch(bfqd); -+ } -+ -+ bfq_log_bfqq(bfqd, bfqq, "%p, %d", bfqq, bfqq->ref); -+ -+ bfq_put_cooperator(bfqq); -+ -+ bfq_put_queue(bfqq); /* release process reference */ -+} -+ -+static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync) -+{ -+ struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync); -+ struct bfq_data *bfqd; -+ -+ if (bfqq) -+ bfqd = bfqq->bfqd; /* NULL if scheduler already exited */ -+ -+ if (bfqq && bfqd) { -+ unsigned long flags; -+ -+ spin_lock_irqsave(&bfqd->lock, flags); -+ bfq_exit_bfqq(bfqd, bfqq); -+ bic_set_bfqq(bic, NULL, is_sync); -+ spin_unlock_irqrestore(&bfqd->lock, flags); -+ } -+} -+ -+static void bfq_exit_icq(struct io_cq *icq) -+{ -+ struct bfq_io_cq *bic = icq_to_bic(icq); -+ -+ BUG_ON(!bic); -+ bfq_exit_icq_bfqq(bic, true); -+ bfq_exit_icq_bfqq(bic, false); -+} -+ -+/* -+ * Update the entity prio values; note that the new values will not -+ * be used until the next (re)activation. -+ */ -+static void bfq_set_next_ioprio_data(struct bfq_queue *bfqq, -+ struct bfq_io_cq *bic) -+{ -+ struct task_struct *tsk = current; -+ int ioprio_class; -+ struct bfq_data *bfqd = bfqq->bfqd; -+ -+ WARN_ON(!bfqd); -+ if (!bfqd) -+ return; -+ -+ ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio); -+ switch (ioprio_class) { -+ default: -+ dev_err(bfqq->bfqd->queue->backing_dev_info->dev, -+ "bfq: bad prio class %d\n", ioprio_class); -+ case IOPRIO_CLASS_NONE: -+ /* -+ * No prio set, inherit CPU scheduling settings. -+ */ -+ bfqq->new_ioprio = task_nice_ioprio(tsk); -+ bfqq->new_ioprio_class = task_nice_ioclass(tsk); -+ break; -+ case IOPRIO_CLASS_RT: -+ bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio); -+ bfqq->new_ioprio_class = IOPRIO_CLASS_RT; -+ break; -+ case IOPRIO_CLASS_BE: -+ bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio); -+ bfqq->new_ioprio_class = IOPRIO_CLASS_BE; -+ break; -+ case IOPRIO_CLASS_IDLE: -+ bfqq->new_ioprio_class = IOPRIO_CLASS_IDLE; -+ bfqq->new_ioprio = 7; -+ break; -+ } -+ -+ if (bfqq->new_ioprio >= IOPRIO_BE_NR) { -+ pr_crit("bfq_set_next_ioprio_data: new_ioprio %d\n", -+ bfqq->new_ioprio); -+ BUG(); -+ } -+ -+ bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->new_ioprio); -+ bfqq->entity.prio_changed = 1; -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "bic_class %d prio %d class %d", -+ ioprio_class, bfqq->new_ioprio, bfqq->new_ioprio_class); -+} -+ -+static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio) -+{ -+ struct bfq_data *bfqd = bic_to_bfqd(bic); -+ struct bfq_queue *bfqq; -+ unsigned long uninitialized_var(flags); -+ int ioprio = bic->icq.ioc->ioprio; -+ -+ /* -+ * This condition may trigger on a newly created bic, be sure to -+ * drop the lock before returning. -+ */ -+ if (unlikely(!bfqd) || likely(bic->ioprio == ioprio)) -+ return; -+ -+ bic->ioprio = ioprio; -+ -+ bfqq = bic_to_bfqq(bic, false); -+ if (bfqq) { -+ /* release process reference on this queue */ -+ bfq_put_queue(bfqq); -+ bfqq = bfq_get_queue(bfqd, bio, BLK_RW_ASYNC, bic); -+ bic_set_bfqq(bic, bfqq, false); -+ bfq_log_bfqq(bfqd, bfqq, -+ "bfqq %p %d", -+ bfqq, bfqq->ref); -+ } -+ -+ bfqq = bic_to_bfqq(bic, true); -+ if (bfqq) -+ bfq_set_next_ioprio_data(bfqq, bic); -+} -+ -+static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, -+ struct bfq_io_cq *bic, pid_t pid, int is_sync) -+{ -+ RB_CLEAR_NODE(&bfqq->entity.rb_node); -+ INIT_LIST_HEAD(&bfqq->fifo); -+ INIT_HLIST_NODE(&bfqq->burst_list_node); -+ BUG_ON(!hlist_unhashed(&bfqq->burst_list_node)); -+ -+ bfqq->ref = 0; -+ bfqq->bfqd = bfqd; -+ -+ if (bic) -+ bfq_set_next_ioprio_data(bfqq, bic); -+ -+ if (is_sync) { -+ /* -+ * No need to mark as has_short_ttime if in -+ * idle_class, because no device idling is performed -+ * for queues in idle class -+ */ -+ if (!bfq_class_idle(bfqq)) -+ /* tentatively mark as has_short_ttime */ -+ bfq_mark_bfqq_has_short_ttime(bfqq); -+ bfq_mark_bfqq_sync(bfqq); -+ bfq_mark_bfqq_just_created(bfqq); -+ /* -+ * Aggressively inject a lot of service: up to 90%. -+ * This coefficient remains constant during bfqq life, -+ * but this behavior might be changed, after enough -+ * testing and tuning. -+ */ -+ bfqq->inject_coeff = 1; -+ } else -+ bfq_clear_bfqq_sync(bfqq); -+ -+ bfqq->ttime.last_end_request = ktime_get_ns() - (1ULL<<32); -+ -+ bfq_mark_bfqq_IO_bound(bfqq); -+ -+ /* Tentative initial value to trade off between thr and lat */ -+ bfqq->max_budget = (2 * bfq_max_budget(bfqd)) / 3; -+ bfqq->pid = pid; -+ -+ bfqq->wr_coeff = 1; -+ bfqq->last_wr_start_finish = jiffies; -+ bfqq->wr_start_at_switch_to_srt = bfq_smallest_from_now(); -+ bfqq->budget_timeout = bfq_smallest_from_now(); -+ bfqq->split_time = bfq_smallest_from_now(); -+ -+ /* -+ * To not forget the possibly high bandwidth consumed by a -+ * process/queue in the recent past, -+ * bfq_bfqq_softrt_next_start() returns a value at least equal -+ * to the current value of bfqq->soft_rt_next_start (see -+ * comments on bfq_bfqq_softrt_next_start). Set -+ * soft_rt_next_start to now, to mean that bfqq has consumed -+ * no bandwidth so far. -+ */ -+ bfqq->soft_rt_next_start = jiffies; -+ -+ /* first request is almost certainly seeky */ -+ bfqq->seek_history = 1; -+} -+ -+static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd, -+ struct bfq_group *bfqg, -+ int ioprio_class, int ioprio) -+{ -+ switch (ioprio_class) { -+ case IOPRIO_CLASS_RT: -+ return &bfqg->async_bfqq[0][ioprio]; -+ case IOPRIO_CLASS_NONE: -+ ioprio = IOPRIO_NORM; -+ /* fall through */ -+ case IOPRIO_CLASS_BE: -+ return &bfqg->async_bfqq[1][ioprio]; -+ case IOPRIO_CLASS_IDLE: -+ return &bfqg->async_idle_bfqq; -+ default: -+ BUG(); -+ } -+} -+ -+static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd, -+ struct bio *bio, bool is_sync, -+ struct bfq_io_cq *bic) -+{ -+ const int ioprio = IOPRIO_PRIO_DATA(bic->ioprio); -+ const int ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio); -+ struct bfq_queue **async_bfqq = NULL; -+ struct bfq_queue *bfqq; -+ struct bfq_group *bfqg; -+ -+ rcu_read_lock(); -+ -+ bfqg = bfq_find_set_group(bfqd, bio_blkcg(bio)); -+ if (!bfqg) { -+ bfqq = &bfqd->oom_bfqq; -+ goto out; -+ } -+ -+ if (!is_sync) { -+ async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class, -+ ioprio); -+ bfqq = *async_bfqq; -+ if (bfqq) -+ goto out; -+ } -+ -+ bfqq = kmem_cache_alloc_node(bfq_pool, -+ GFP_NOWAIT | __GFP_ZERO | __GFP_NOWARN, -+ bfqd->queue->node); -+ -+ if (bfqq) { -+ bfq_init_bfqq(bfqd, bfqq, bic, current->pid, -+ is_sync); -+ bfq_init_entity(&bfqq->entity, bfqg); -+ bfq_log_bfqq(bfqd, bfqq, "allocated"); -+ } else { -+ bfqq = &bfqd->oom_bfqq; -+ bfq_log_bfqq(bfqd, bfqq, "using oom bfqq"); -+ goto out; -+ } -+ -+ /* -+ * Pin the queue now that it's allocated, scheduler exit will -+ * prune it. -+ */ -+ if (async_bfqq) { -+ bfqq->ref++; /* -+ * Extra group reference, w.r.t. sync -+ * queue. This extra reference is removed -+ * only if bfqq->bfqg disappears, to -+ * guarantee that this queue is not freed -+ * until its group goes away. -+ */ -+ bfq_log_bfqq(bfqd, bfqq, "bfqq not in async: %p, %d", -+ bfqq, bfqq->ref); -+ *async_bfqq = bfqq; -+ } -+ -+out: -+ bfqq->ref++; /* get a process reference to this queue */ -+ bfq_log_bfqq(bfqd, bfqq, "at end: %p, %d", bfqq, bfqq->ref); -+ rcu_read_unlock(); -+ return bfqq; -+} -+ -+static void bfq_update_io_thinktime(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq) -+{ -+ struct bfq_ttime *ttime = &bfqq->ttime; -+ u64 elapsed = ktime_get_ns() - bfqq->ttime.last_end_request; -+ -+ elapsed = min_t(u64, elapsed, 2 * bfqd->bfq_slice_idle); -+ -+ ttime->ttime_samples = (7*bfqq->ttime.ttime_samples + 256) / 8; -+ ttime->ttime_total = div_u64(7*ttime->ttime_total + 256*elapsed, 8); -+ ttime->ttime_mean = div64_ul(ttime->ttime_total + 128, -+ ttime->ttime_samples); -+} -+ -+static void -+bfq_update_io_seektime(struct bfq_data *bfqd, struct bfq_queue *bfqq, -+ struct request *rq) -+{ -+ bfqq->seek_history <<= 1; -+ bfqq->seek_history |= BFQ_RQ_SEEKY(bfqd, bfqq->last_request_pos, rq); -+} -+ -+static void bfq_update_has_short_ttime(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq, -+ struct bfq_io_cq *bic) -+{ -+ bool has_short_ttime = true; -+ -+ /* -+ * No need to update has_short_ttime if bfqq is async or in -+ * idle io prio class, or if bfq_slice_idle is zero, because -+ * no device idling is performed for bfqq in this case. -+ */ -+ if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq) || -+ bfqd->bfq_slice_idle == 0) -+ return; -+ -+ /* Idle window just restored, statistics are meaningless. */ -+ if (time_is_after_eq_jiffies(bfqq->split_time + -+ bfqd->bfq_wr_min_idle_time)) -+ return; -+ -+ /* Think time is infinite if no process is linked to -+ * bfqq. Otherwise check average think time to -+ * decide whether to mark as has_short_ttime -+ */ -+ if (atomic_read(&bic->icq.ioc->active_ref) == 0 || -+ (bfq_sample_valid(bfqq->ttime.ttime_samples) && -+ bfqq->ttime.ttime_mean > bfqd->bfq_slice_idle)) -+ has_short_ttime = false; -+ -+ bfq_log_bfqq(bfqd, bfqq, "has_short_ttime %d", -+ has_short_ttime); -+ -+ if (has_short_ttime) -+ bfq_mark_bfqq_has_short_ttime(bfqq); -+ else -+ bfq_clear_bfqq_has_short_ttime(bfqq); -+} -+ -+/* -+ * Called when a new fs request (rq) is added to bfqq. Check if there's -+ * something we should do about it. -+ */ -+static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, -+ struct request *rq) -+{ -+ struct bfq_io_cq *bic = RQ_BIC(rq); -+ -+ if (rq->cmd_flags & REQ_META) -+ bfqq->meta_pending++; -+ -+ bfq_update_io_thinktime(bfqd, bfqq); -+ bfq_update_has_short_ttime(bfqd, bfqq, bic); -+ bfq_update_io_seektime(bfqd, bfqq, rq); -+ -+ bfq_log_bfqq(bfqd, bfqq, -+ "has_short_ttime=%d (seeky %d)", -+ bfq_bfqq_has_short_ttime(bfqq), BFQQ_SEEKY(bfqq)); -+ -+ bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); -+ -+ if (bfqq == bfqd->in_service_queue && bfq_bfqq_wait_request(bfqq)) { -+ bool small_req = bfqq->queued[rq_is_sync(rq)] == 1 && -+ blk_rq_sectors(rq) < 32; -+ bool budget_timeout = bfq_bfqq_budget_timeout(bfqq); -+ -+ /* -+ * There is just this request queued: if -+ * - the request is small, and -+ * - we are idling to boost throughput, and -+ * - the queue is not to be expired, -+ * then just exit. -+ * -+ * In this way, if the device is being idled to wait -+ * for a new request from the in-service queue, we -+ * avoid unplugging the device and committing the -+ * device to serve just a small request. In contrast -+ * we wait for the block layer to decide when to -+ * unplug the device: hopefully, new requests will be -+ * merged to this one quickly, then the device will be -+ * unplugged and larger requests will be dispatched. -+ */ -+ if (small_req && idling_boosts_thr_without_issues(bfqd, bfqq) && -+ !budget_timeout) -+ return; -+ -+ /* -+ * A large enough request arrived, or idling is being -+ * performed to preserve service guarantees, or -+ * finally the queue is to be expired: in all these -+ * cases disk idling is to be stopped, so clear -+ * wait_request flag and reset timer. -+ */ -+ bfq_clear_bfqq_wait_request(bfqq); -+ hrtimer_try_to_cancel(&bfqd->idle_slice_timer); -+ -+ /* -+ * The queue is not empty, because a new request just -+ * arrived. Hence we can safely expire the queue, in -+ * case of budget timeout, without risking that the -+ * timestamps of the queue are not updated correctly. -+ * See [1] for more details. -+ */ -+ if (budget_timeout) -+ bfq_bfqq_expire(bfqd, bfqq, false, -+ BFQ_BFQQ_BUDGET_TIMEOUT); -+ } -+} -+ -+/* returns true if it causes the idle timer to be disabled */ -+static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq) -+{ -+ struct bfq_queue *bfqq = RQ_BFQQ(rq), *new_bfqq; -+ bool waiting, idle_timer_disabled = false; -+ BUG_ON(!bfqq); -+ -+ assert_spin_locked(&bfqd->lock); -+ -+ bfq_log_bfqq(bfqd, bfqq, "rq %p bfqq %p", rq, bfqq); -+ -+ /* -+ * An unplug may trigger a requeue of a request from the device -+ * driver: make sure we are in process context while trying to -+ * merge two bfq_queues. -+ */ -+ if (!in_interrupt()) { -+ new_bfqq = bfq_setup_cooperator(bfqd, bfqq, rq, true); -+ if (new_bfqq) { -+ BUG_ON(bic_to_bfqq(RQ_BIC(rq), 1) != bfqq); -+ /* -+ * Release the request's reference to the old bfqq -+ * and make sure one is taken to the shared queue. -+ */ -+ new_bfqq->allocated++; -+ bfqq->allocated--; -+ bfq_log_bfqq(bfqd, bfqq, -+ "new allocated %d", bfqq->allocated); -+ bfq_log_bfqq(bfqd, new_bfqq, -+ "new_bfqq new allocated %d", -+ bfqq->allocated); -+ -+ new_bfqq->ref++; -+ /* -+ * If the bic associated with the process -+ * issuing this request still points to bfqq -+ * (and thus has not been already redirected -+ * to new_bfqq or even some other bfq_queue), -+ * then complete the merge and redirect it to -+ * new_bfqq. -+ */ -+ if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq) -+ bfq_merge_bfqqs(bfqd, RQ_BIC(rq), -+ bfqq, new_bfqq); -+ -+ bfq_clear_bfqq_just_created(bfqq); -+ /* -+ * rq is about to be enqueued into new_bfqq, -+ * release rq reference on bfqq -+ */ -+ bfq_put_queue(bfqq); -+ rq->elv.priv[1] = new_bfqq; -+ bfqq = new_bfqq; -+ } -+ } -+ -+ waiting = bfqq && bfq_bfqq_wait_request(bfqq); -+ bfq_add_request(rq); -+ idle_timer_disabled = waiting && !bfq_bfqq_wait_request(bfqq); -+ -+ rq->fifo_time = ktime_get_ns() + bfqd->bfq_fifo_expire[rq_is_sync(rq)]; -+ list_add_tail(&rq->queuelist, &bfqq->fifo); -+ -+ bfq_rq_enqueued(bfqd, bfqq, rq); -+ -+ return idle_timer_disabled; -+} -+ -+#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP) -+static void bfq_update_insert_stats(struct request_queue *q, -+ struct bfq_queue *bfqq, -+ bool idle_timer_disabled, -+ unsigned int cmd_flags) -+{ -+ if (!bfqq) -+ return; -+ -+ /* -+ * bfqq still exists, because it can disappear only after -+ * either it is merged with another queue, or the process it -+ * is associated with exits. But both actions must be taken by -+ * the same process currently executing this flow of -+ * instructions. -+ * -+ * In addition, the following queue lock guarantees that -+ * bfqq_group(bfqq) exists as well. -+ */ -+ spin_lock_irq(q->queue_lock); -+ bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, cmd_flags); -+ if (idle_timer_disabled) -+ bfqg_stats_update_idle_time(bfqq_group(bfqq)); -+ spin_unlock_irq(q->queue_lock); -+} -+#else -+static inline void bfq_update_insert_stats(struct request_queue *q, -+ struct bfq_queue *bfqq, -+ bool idle_timer_disabled, -+ unsigned int cmd_flags) {} -+#endif -+ -+static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, -+ bool at_head) -+{ -+ struct request_queue *q = hctx->queue; -+ struct bfq_data *bfqd = q->elevator->elevator_data; -+ struct bfq_queue *bfqq; -+ bool idle_timer_disabled = false; -+ unsigned int cmd_flags; -+ -+ spin_lock_irq(&bfqd->lock); -+ if (blk_mq_sched_try_insert_merge(q, rq)) { -+ spin_unlock_irq(&bfqd->lock); -+ return; -+ } -+ -+ spin_unlock_irq(&bfqd->lock); -+ -+ blk_mq_sched_request_inserted(rq); -+ -+ spin_lock_irq(&bfqd->lock); -+ -+ bfqq = bfq_init_rq(rq); -+ BUG_ON(!bfqq && !(at_head || blk_rq_is_passthrough(rq))); -+ BUG_ON(bfqq && bic_to_bfqq(RQ_BIC(rq), rq_is_sync(rq)) != bfqq); -+ -+ if (at_head || blk_rq_is_passthrough(rq)) { -+ if (at_head) -+ list_add(&rq->queuelist, &bfqd->dispatch); -+ else -+ list_add_tail(&rq->queuelist, &bfqd->dispatch); -+ -+ rq->rq_flags |= RQF_DISP_LIST; -+ if (bfqq) -+ bfq_log_bfqq(bfqd, bfqq, -+ "%p in disp: at_head %d", -+ rq, at_head); -+ else -+ bfq_log(bfqd, -+ "%p in disp: at_head %d", -+ rq, at_head); -+ } else { /* bfqq is assumed to be non null here */ -+ BUG_ON(!bfqq); -+ BUG_ON(!(rq->rq_flags & RQF_GOT)); -+ rq->rq_flags &= ~RQF_GOT; -+ -+ idle_timer_disabled = __bfq_insert_request(bfqd, rq); -+ /* -+ * Update bfqq, because, if a queue merge has occurred -+ * in __bfq_insert_request, then rq has been -+ * redirected into a new queue. -+ */ -+ bfqq = RQ_BFQQ(rq); -+ -+ if (rq_mergeable(rq)) { -+ elv_rqhash_add(q, rq); -+ if (!q->last_merge) -+ q->last_merge = rq; -+ } -+ } -+ -+ /* -+ * Cache cmd_flags before releasing scheduler lock, because rq -+ * may disappear afterwards (for example, because of a request -+ * merge). -+ */ -+ cmd_flags = rq->cmd_flags; -+ -+ spin_unlock_irq(&bfqd->lock); -+ bfq_update_insert_stats(q, bfqq, idle_timer_disabled, -+ cmd_flags); -+} -+ -+static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx, -+ struct list_head *list, bool at_head) -+{ -+ while (!list_empty(list)) { -+ struct request *rq; -+ -+ rq = list_first_entry(list, struct request, queuelist); -+ list_del_init(&rq->queuelist); -+ bfq_insert_request(hctx, rq, at_head); -+ } -+} -+ -+static void bfq_update_hw_tag(struct bfq_data *bfqd) -+{ -+ struct bfq_queue *bfqq = bfqd->in_service_queue; -+ -+ bfqd->max_rq_in_driver = max_t(int, bfqd->max_rq_in_driver, -+ bfqd->rq_in_driver); -+ -+ if (bfqd->hw_tag == 1) -+ return; -+ -+ /* -+ * This sample is valid if the number of outstanding requests -+ * is large enough to allow a queueing behavior. Note that the -+ * sum is not exact, as it's not taking into account deactivated -+ * requests. -+ */ -+ if (bfqd->rq_in_driver + bfqd->queued <= BFQ_HW_QUEUE_THRESHOLD) -+ return; -+ -+ /* -+ * If active queue hasn't enough requests and can idle, bfq might not -+ * dispatch sufficient requests to hardware. Don't zero hw_tag in this -+ * case -+ */ -+ if (bfqq && bfq_bfqq_has_short_ttime(bfqq) && -+ bfqq->dispatched + bfqq->queued[0] + bfqq->queued[1] < -+ BFQ_HW_QUEUE_THRESHOLD && bfqd->rq_in_driver < BFQ_HW_QUEUE_THRESHOLD) -+ return; -+ -+ if (bfqd->hw_tag_samples++ < BFQ_HW_QUEUE_SAMPLES) -+ return; -+ -+ bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD; -+ bfqd->max_rq_in_driver = 0; -+ bfqd->hw_tag_samples = 0; -+} -+ -+static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd) -+{ -+ u64 now_ns; -+ u32 delta_us; -+ -+ bfq_update_hw_tag(bfqd); -+ -+ BUG_ON(!bfqd->rq_in_driver); -+ BUG_ON(!bfqq->dispatched); -+ bfqd->rq_in_driver--; -+ bfqq->dispatched--; -+ -+ bfq_log_bfqq(bfqd, bfqq, -+ "new disp %d, new rq_in_driver %d", -+ bfqq->dispatched, bfqd->rq_in_driver); -+ -+ if (!bfqq->dispatched && !bfq_bfqq_busy(bfqq)) { -+ BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list)); -+ /* -+ * Set budget_timeout (which we overload to store the -+ * time at which the queue remains with no backlog and -+ * no outstanding request; used by the weight-raising -+ * mechanism). -+ */ -+ bfqq->budget_timeout = jiffies; -+ -+ bfq_weights_tree_remove(bfqd, bfqq); -+ } -+ -+ now_ns = ktime_get_ns(); -+ -+ bfqq->ttime.last_end_request = now_ns; -+ -+ /* -+ * Using us instead of ns, to get a reasonable precision in -+ * computing rate in next check. -+ */ -+ delta_us = div_u64(now_ns - bfqd->last_completion, NSEC_PER_USEC); -+ -+ bfq_log_bfqq(bfqd, bfqq, -+ "delta %uus/%luus max_size %u rate %llu/%llu", -+ delta_us, BFQ_MIN_TT/NSEC_PER_USEC, bfqd->last_rq_max_size, -+ delta_us > 0 ? -+ (USEC_PER_SEC* -+ (u64)((bfqd->last_rq_max_size<>BFQ_RATE_SHIFT : -+ (USEC_PER_SEC* -+ (u64)(bfqd->last_rq_max_size<>BFQ_RATE_SHIFT, -+ (USEC_PER_SEC*(u64)(1UL<<(BFQ_RATE_SHIFT-10)))>>BFQ_RATE_SHIFT); -+ -+ /* -+ * If the request took rather long to complete, and, according -+ * to the maximum request size recorded, this completion latency -+ * implies that the request was certainly served at a very low -+ * rate (less than 1M sectors/sec), then the whole observation -+ * interval that lasts up to this time instant cannot be a -+ * valid time interval for computing a new peak rate. Invoke -+ * bfq_update_rate_reset to have the following three steps -+ * taken: -+ * - close the observation interval at the last (previous) -+ * request dispatch or completion -+ * - compute rate, if possible, for that observation interval -+ * - reset to zero samples, which will trigger a proper -+ * re-initialization of the observation interval on next -+ * dispatch -+ */ -+ if (delta_us > BFQ_MIN_TT/NSEC_PER_USEC && -+ (bfqd->last_rq_max_size<last_completion = now_ns; -+ -+ /* -+ * If we are waiting to discover whether the request pattern -+ * of the task associated with the queue is actually -+ * isochronous, and both requisites for this condition to hold -+ * are now satisfied, then compute soft_rt_next_start (see the -+ * comments on the function bfq_bfqq_softrt_next_start()). We -+ * do not compute soft_rt_next_start if bfqq is in interactive -+ * weight raising (see the comments in bfq_bfqq_expire() for -+ * an explanation). We schedule this delayed update when bfqq -+ * expires, if it still has in-flight requests. -+ */ -+ if (bfq_bfqq_softrt_update(bfqq) && bfqq->dispatched == 0 && -+ RB_EMPTY_ROOT(&bfqq->sort_list) && -+ bfqq->wr_coeff != bfqd->bfq_wr_coeff) -+ bfqq->soft_rt_next_start = -+ bfq_bfqq_softrt_next_start(bfqd, bfqq); -+ -+ /* -+ * If this is the in-service queue, check if it needs to be expired, -+ * or if we want to idle in case it has no pending requests. -+ */ -+ if (bfqd->in_service_queue == bfqq) { -+ if (bfq_bfqq_must_idle(bfqq)) { -+ if (bfqq->dispatched == 0) -+ bfq_arm_slice_timer(bfqd); -+ /* -+ * If we get here, we do not expire bfqq, even -+ * if bfqq was in budget timeout or had no -+ * more requests (as controlled in the next -+ * conditional instructions). The reason for -+ * not expiring bfqq is as follows. -+ * -+ * Here bfqq->dispatched > 0 holds, but -+ * bfq_bfqq_must_idle() returned true. This -+ * implies that, even if no request arrives -+ * for bfqq before bfqq->dispatched reaches 0, -+ * bfqq will, however, not be expired on the -+ * completion event that causes bfqq->dispatch -+ * to reach zero. In contrast, on this event, -+ * bfqq will start enjoying device idling -+ * (I/O-dispatch plugging). -+ * -+ * But, if we expired bfqq here, bfqq would -+ * not have the chance to enjoy device idling -+ * when bfqq->dispatched finally reaches -+ * zero. This would expose bfqq to violation -+ * of its reserved service guarantees. -+ */ -+ return; -+ } else if (bfq_may_expire_for_budg_timeout(bfqq)) -+ bfq_bfqq_expire(bfqd, bfqq, false, -+ BFQ_BFQQ_BUDGET_TIMEOUT); -+ else if (RB_EMPTY_ROOT(&bfqq->sort_list) && -+ (bfqq->dispatched == 0 || -+ !bfq_better_to_idle(bfqq))) -+ bfq_bfqq_expire(bfqd, bfqq, false, -+ BFQ_BFQQ_NO_MORE_REQUESTS); -+ } -+} -+ -+static void bfq_finish_requeue_request_body(struct bfq_queue *bfqq) -+{ -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "allocated %d", bfqq->allocated); -+ BUG_ON(!bfqq->allocated); -+ bfqq->allocated--; -+ -+ bfq_put_queue(bfqq); -+} -+ -+/* -+ * Handle either a requeue or a finish for rq. The things to do are -+ * the same in both cases: all references to rq are to be dropped. In -+ * particular, rq is considered completed from the point of view of -+ * the scheduler. -+ */ -+static void bfq_finish_requeue_request(struct request *rq) -+{ -+ struct bfq_queue *bfqq; -+ struct bfq_data *bfqd; -+ struct bfq_io_cq *bic; -+ -+ BUG_ON(!rq); -+ -+ bfqq = RQ_BFQQ(rq); -+ -+ /* -+ * Requeue and finish hooks are invoked in blk-mq without -+ * checking whether the involved request is actually still -+ * referenced in the scheduler. To handle this fact, the -+ * following two checks make this function exit in case of -+ * spurious invocations, for which there is nothing to do. -+ * -+ * First, check whether rq has nothing to do with an elevator. -+ */ -+ if (unlikely(!(rq->rq_flags & RQF_ELVPRIV))) -+ return; -+ -+ /* -+ * rq either is not associated with any icq, or is an already -+ * requeued request that has not (yet) been re-inserted into -+ * a bfq_queue. -+ */ -+ if (!rq->elv.icq || !bfqq) -+ return; -+ -+ bic = RQ_BIC(rq); -+ BUG_ON(!bic); -+ -+ bfqd = bfqq->bfqd; -+ BUG_ON(!bfqd); -+ -+ if (rq->rq_flags & RQF_DISP_LIST) { -+ pr_crit("putting disp rq %p for %d", rq, bfqq->pid); -+ BUG(); -+ } -+ BUG_ON(rq->rq_flags & RQF_QUEUED); -+ -+ bfq_log_bfqq(bfqd, bfqq, -+ "putting rq %p with %u sects left, STARTED %d", -+ rq, blk_rq_sectors(rq), -+ rq->rq_flags & RQF_STARTED); -+ -+ if (rq->rq_flags & RQF_STARTED) -+ bfqg_stats_update_completion(bfqq_group(bfqq), -+ rq->start_time_ns, -+ rq->io_start_time_ns, -+ rq->cmd_flags); -+ -+ WARN_ON(blk_rq_sectors(rq) == 0 && !(rq->rq_flags & RQF_STARTED)); -+ -+ if (likely(rq->rq_flags & RQF_STARTED)) { -+ unsigned long flags; -+ -+ spin_lock_irqsave(&bfqd->lock, flags); -+ -+ bfq_completed_request(bfqq, bfqd); -+ bfq_finish_requeue_request_body(bfqq); -+ -+ spin_unlock_irqrestore(&bfqd->lock, flags); -+ } else { -+ /* -+ * Request rq may be still/already in the scheduler, -+ * in which case we need to remove it (this should -+ * never happen in case of requeue). And we cannot -+ * defer such a check and removal, to avoid -+ * inconsistencies in the time interval from the end -+ * of this function to the start of the deferred work. -+ * This situation seems to occur only in process -+ * context, as a consequence of a merge. In the -+ * current version of the code, this implies that the -+ * lock is held. -+ */ -+ BUG_ON(in_interrupt()); -+ -+ assert_spin_locked(&bfqd->lock); -+ if (!RB_EMPTY_NODE(&rq->rb_node)) { -+ bfq_remove_request(rq->q, rq); -+ bfqg_stats_update_io_remove(bfqq_group(bfqq), -+ rq->cmd_flags); -+ } -+ bfq_finish_requeue_request_body(bfqq); -+ } -+ -+ /* -+ * Reset private fields. In case of a requeue, this allows -+ * this function to correctly do nothing if it is spuriously -+ * invoked again on this same request (see the check at the -+ * beginning of the function). Probably, a better general -+ * design would be to prevent blk-mq from invoking the requeue -+ * or finish hooks of an elevator, for a request that is not -+ * referred by that elevator. -+ * -+ * Resetting the following fields would break the -+ * request-insertion logic if rq is re-inserted into a bfq -+ * internal queue, without a re-preparation. Here we assume -+ * that re-insertions of requeued requests, without -+ * re-preparation, can happen only for pass_through or at_head -+ * requests (which are not re-inserted into bfq internal -+ * queues). -+ */ -+ rq->elv.priv[0] = NULL; -+ rq->elv.priv[1] = NULL; -+} -+ -+/* -+ * Returns NULL if a new bfqq should be allocated, or the old bfqq if this -+ * was the last process referring to that bfqq. -+ */ -+static struct bfq_queue * -+bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq) -+{ -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue"); -+ -+ if (bfqq_process_refs(bfqq) == 1) { -+ bfqq->pid = current->pid; -+ bfq_clear_bfqq_coop(bfqq); -+ bfq_clear_bfqq_split_coop(bfqq); -+ return bfqq; -+ } -+ -+ bic_set_bfqq(bic, NULL, 1); -+ -+ bfq_put_cooperator(bfqq); -+ -+ bfq_put_queue(bfqq); -+ return NULL; -+} -+ -+static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd, -+ struct bfq_io_cq *bic, -+ struct bio *bio, -+ bool split, bool is_sync, -+ bool *new_queue) -+{ -+ struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync); -+ -+ if (likely(bfqq && bfqq != &bfqd->oom_bfqq)) -+ return bfqq; -+ -+ if (new_queue) -+ *new_queue = true; -+ -+ if (bfqq) -+ bfq_put_queue(bfqq); -+ bfqq = bfq_get_queue(bfqd, bio, is_sync, bic); -+ BUG_ON(!hlist_unhashed(&bfqq->burst_list_node)); -+ -+ bic_set_bfqq(bic, bfqq, is_sync); -+ if (split && is_sync) { -+ bfq_log_bfqq(bfqd, bfqq, -+ "get_request: was_in_list %d " -+ "was_in_large_burst %d " -+ "large burst in progress %d", -+ bic->was_in_burst_list, -+ bic->saved_in_large_burst, -+ bfqd->large_burst); -+ -+ if ((bic->was_in_burst_list && bfqd->large_burst) || -+ bic->saved_in_large_burst) { -+ bfq_log_bfqq(bfqd, bfqq, -+ "get_request: marking in " -+ "large burst"); -+ bfq_mark_bfqq_in_large_burst(bfqq); -+ } else { -+ bfq_log_bfqq(bfqd, bfqq, -+ "get_request: clearing in " -+ "large burst"); -+ bfq_clear_bfqq_in_large_burst(bfqq); -+ if (bic->was_in_burst_list) -+ /* -+ * If bfqq was in the current -+ * burst list before being -+ * merged, then we have to add -+ * it back. And we do not need -+ * to increase burst_size, as -+ * we did not decrement -+ * burst_size when we removed -+ * bfqq from the burst list as -+ * a consequence of a merge -+ * (see comments in -+ * bfq_put_queue). In this -+ * respect, it would be rather -+ * costly to know whether the -+ * current burst list is still -+ * the same burst list from -+ * which bfqq was removed on -+ * the merge. To avoid this -+ * cost, if bfqq was in a -+ * burst list, then we add -+ * bfqq to the current burst -+ * list without any further -+ * check. This can cause -+ * inappropriate insertions, -+ * but rarely enough to not -+ * harm the detection of large -+ * bursts significantly. -+ */ -+ hlist_add_head(&bfqq->burst_list_node, -+ &bfqd->burst_list); -+ } -+ bfqq->split_time = jiffies; -+ } -+ -+ return bfqq; -+} -+ -+/* -+ * Only reset private fields. The actual request preparation will be -+ * performed by bfq_init_rq, when rq is either inserted or merged. See -+ * comments on bfq_init_rq for the reason behind this delayed -+ * preparation. -+*/ -+static void bfq_prepare_request(struct request *rq, struct bio *bio) -+{ -+ /* -+ * Regardless of whether we have an icq attached, we have to -+ * clear the scheduler pointers, as they might point to -+ * previously allocated bic/bfqq structs. -+ */ -+ rq->elv.priv[0] = rq->elv.priv[1] = NULL; -+} -+ -+/* -+ * If needed, init rq, allocate bfq data structures associated with -+ * rq, and increment reference counters in the destination bfq_queue -+ * for rq. Return the destination bfq_queue for rq, or NULL is rq is -+ * not associated with any bfq_queue. -+ * -+ * This function is invoked by the functions that perform rq insertion -+ * or merging. One may have expected the above preparation operations -+ * to be performed in bfq_prepare_request, and not delayed to when rq -+ * is inserted or merged. The rationale behind this delayed -+ * preparation is that, after the prepare_request hook is invoked for -+ * rq, rq may still be transformed into a request with no icq, i.e., a -+ * request not associated with any queue. No bfq hook is invoked to -+ * signal this tranformation. As a consequence, should these -+ * preparation operations be performed when the prepare_request hook -+ * is invoked, and should rq be transformed one moment later, bfq -+ * would end up in an inconsistent state, because it would have -+ * incremented some queue counters for an rq destined to -+ * transformation, without any chance to correctly lower these -+ * counters back. In contrast, no transformation can still happen for -+ * rq after rq has been inserted or merged. So, it is safe to execute -+ * these preparation operations when rq is finally inserted or merged. -+ */ -+static struct bfq_queue *bfq_init_rq(struct request *rq) -+{ -+ struct request_queue *q = rq->q; -+ struct bio *bio = rq->bio; -+ struct bfq_data *bfqd = q->elevator->elevator_data; -+ struct bfq_io_cq *bic; -+ const int is_sync = rq_is_sync(rq); -+ struct bfq_queue *bfqq; -+ bool bfqq_already_existing = false, split = false; -+ bool new_queue = false; -+ -+ if (unlikely(!rq->elv.icq)) -+ return NULL; -+ -+ /* -+ * Assuming that elv.priv[1] is set only if everything is set -+ * for this rq. This holds true, because this function is -+ * invoked only for insertion or merging, and, after such -+ * events, a request cannot be manipulated any longer before -+ * being removed from bfq. -+ */ -+ if (rq->elv.priv[1]) { -+ BUG_ON(!(rq->rq_flags & RQF_ELVPRIV)); -+ return rq->elv.priv[1]; -+ } -+ -+ bic = icq_to_bic(rq->elv.icq); -+ -+ bfq_check_ioprio_change(bic, bio); -+ -+ bfq_bic_update_cgroup(bic, bio); -+ -+ bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, false, is_sync, -+ &new_queue); -+ -+ if (likely(!new_queue)) { -+ /* If the queue was seeky for too long, break it apart. */ -+ if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) { -+ BUG_ON(!is_sync); -+ bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq"); -+ -+ /* Update bic before losing reference to bfqq */ -+ if (bfq_bfqq_in_large_burst(bfqq)) -+ bic->saved_in_large_burst = true; -+ -+ bfqq = bfq_split_bfqq(bic, bfqq); -+ split = true; -+ -+ if (!bfqq) -+ bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, -+ true, is_sync, -+ NULL); -+ else -+ bfqq_already_existing = true; -+ -+ BUG_ON(!bfqq); -+ BUG_ON(bfqq == &bfqd->oom_bfqq); -+ } -+ } -+ -+ bfqq->allocated++; -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "new allocated %d", bfqq->allocated); -+ -+ bfqq->ref++; -+ bfq_log_bfqq(bfqd, bfqq, "%p: bfqq %p, %d", rq, bfqq, bfqq->ref); -+ -+ rq->elv.priv[0] = bic; -+ rq->elv.priv[1] = bfqq; -+ rq->rq_flags &= ~RQF_DISP_LIST; -+ -+ /* -+ * If a bfq_queue has only one process reference, it is owned -+ * by only this bic: we can then set bfqq->bic = bic. in -+ * addition, if the queue has also just been split, we have to -+ * resume its state. -+ */ -+ if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) { -+ bfqq->bic = bic; -+ if (split) { -+ /* -+ * The queue has just been split from a shared -+ * queue: restore the idle window and the -+ * possible weight raising period. -+ */ -+ bfq_bfqq_resume_state(bfqq, bfqd, bic, -+ bfqq_already_existing); -+ } -+ } -+ -+ if (unlikely(bfq_bfqq_just_created(bfqq))) -+ bfq_handle_burst(bfqd, bfqq); -+ -+ rq->rq_flags |= RQF_GOT; -+ -+ return bfqq; -+} -+ -+static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq) -+{ -+ struct bfq_data *bfqd = bfqq->bfqd; -+ enum bfqq_expiration reason; -+ unsigned long flags; -+ -+ BUG_ON(!bfqd); -+ spin_lock_irqsave(&bfqd->lock, flags); -+ -+ bfq_log_bfqq(bfqd, bfqq, "handling slice_timer expiration"); -+ bfq_clear_bfqq_wait_request(bfqq); -+ -+ if (bfqq != bfqd->in_service_queue) { -+ spin_unlock_irqrestore(&bfqd->lock, flags); -+ return; -+ } -+ -+ if (bfq_bfqq_budget_timeout(bfqq)) -+ /* -+ * Also here the queue can be safely expired -+ * for budget timeout without wasting -+ * guarantees -+ */ -+ reason = BFQ_BFQQ_BUDGET_TIMEOUT; -+ else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0) -+ /* -+ * The queue may not be empty upon timer expiration, -+ * because we may not disable the timer when the -+ * first request of the in-service queue arrives -+ * during disk idling. -+ */ -+ reason = BFQ_BFQQ_TOO_IDLE; -+ else -+ goto schedule_dispatch; -+ -+ bfq_bfqq_expire(bfqd, bfqq, true, reason); -+ -+schedule_dispatch: -+ spin_unlock_irqrestore(&bfqd->lock, flags); -+ bfq_schedule_dispatch(bfqd); -+} -+ -+/* -+ * Handler of the expiration of the timer running if the in-service queue -+ * is idling inside its time slice. -+ */ -+static enum hrtimer_restart bfq_idle_slice_timer(struct hrtimer *timer) -+{ -+ struct bfq_data *bfqd = container_of(timer, struct bfq_data, -+ idle_slice_timer); -+ struct bfq_queue *bfqq = bfqd->in_service_queue; -+ -+ bfq_log(bfqd, "expired"); -+ -+ /* -+ * Theoretical race here: the in-service queue can be NULL or -+ * different from the queue that was idling if a new request -+ * arrives for the current queue and there is a full dispatch -+ * cycle that changes the in-service queue. This can hardly -+ * happen, but in the worst case we just expire a queue too -+ * early. -+ */ -+ if (bfqq) -+ bfq_idle_slice_timer_body(bfqq); -+ -+ return HRTIMER_NORESTART; -+} -+ -+static void __bfq_put_async_bfqq(struct bfq_data *bfqd, -+ struct bfq_queue **bfqq_ptr) -+{ -+ struct bfq_group *root_group = bfqd->root_group; -+ struct bfq_queue *bfqq = *bfqq_ptr; -+ -+ bfq_log(bfqd, "%p", bfqq); -+ if (bfqq) { -+ bfq_bfqq_move(bfqd, bfqq, root_group); -+ bfq_log_bfqq(bfqd, bfqq, "putting %p, %d", -+ bfqq, bfqq->ref); -+ bfq_put_queue(bfqq); -+ *bfqq_ptr = NULL; -+ } -+} -+ -+/* -+ * Release all the bfqg references to its async queues. If we are -+ * deallocating the group these queues may still contain requests, so -+ * we reparent them to the root cgroup (i.e., the only one that will -+ * exist for sure until all the requests on a device are gone). -+ */ -+static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg) -+{ -+ int i, j; -+ -+ for (i = 0; i < 2; i++) -+ for (j = 0; j < IOPRIO_BE_NR; j++) -+ __bfq_put_async_bfqq(bfqd, &bfqg->async_bfqq[i][j]); -+ -+ __bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq); -+} -+ -+/* -+ * See the comments on bfq_limit_depth for the purpose of -+ * the depths set in the function. Return minimum shallow depth we'll use. -+ */ -+static unsigned int bfq_update_depths(struct bfq_data *bfqd, -+ struct sbitmap_queue *bt) -+{ -+ unsigned int i, j, min_shallow = UINT_MAX; -+ -+ /* -+ * In-word depths if no bfq_queue is being weight-raised: -+ * leaving 25% of tags only for sync reads. -+ * -+ * In next formulas, right-shift the value -+ * (1U<sb.shift), instead of computing directly -+ * (1U<<(bt->sb.shift - something)), to be robust against -+ * any possible value of bt->sb.shift, without having to -+ * limit 'something'. -+ */ -+ /* no more than 50% of tags for async I/O */ -+ bfqd->word_depths[0][0] = max((1U<sb.shift)>>1, 1U); -+ /* -+ * no more than 75% of tags for sync writes (25% extra tags -+ * w.r.t. async I/O, to prevent async I/O from starving sync -+ * writes) -+ */ -+ bfqd->word_depths[0][1] = max(((1U<sb.shift) * 3)>>2, 1U); -+ -+ /* -+ * In-word depths in case some bfq_queue is being weight- -+ * raised: leaving ~63% of tags for sync reads. This is the -+ * highest percentage for which, in our tests, application -+ * start-up times didn't suffer from any regression due to tag -+ * shortage. -+ */ -+ /* no more than ~18% of tags for async I/O */ -+ bfqd->word_depths[1][0] = max(((1U<sb.shift) * 3)>>4, 1U); -+ /* no more than ~37% of tags for sync writes (~20% extra tags) */ -+ bfqd->word_depths[1][1] = max(((1U<sb.shift) * 6)>>4, 1U); -+ -+ for (i = 0; i < 2; i++) -+ for (j = 0; j < 2; j++) -+ min_shallow = min(min_shallow, bfqd->word_depths[i][j]); -+ -+ return min_shallow; -+} -+ -+static void bfq_depth_updated(struct blk_mq_hw_ctx *hctx) -+{ -+ struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; -+ struct blk_mq_tags *tags = hctx->sched_tags; -+ unsigned int min_shallow; -+ -+ min_shallow = bfq_update_depths(bfqd, &tags->bitmap_tags); -+ sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, min_shallow); -+} -+ -+static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index) -+{ -+ bfq_depth_updated(hctx); -+ return 0; -+} -+ -+static void bfq_exit_queue(struct elevator_queue *e) -+{ -+ struct bfq_data *bfqd = e->elevator_data; -+ struct bfq_queue *bfqq, *n; -+ -+ bfq_log(bfqd, "starting ..."); -+ -+ hrtimer_cancel(&bfqd->idle_slice_timer); -+ -+ BUG_ON(bfqd->in_service_queue); -+ BUG_ON(!list_empty(&bfqd->active_list)); -+ -+ spin_lock_irq(&bfqd->lock); -+ list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list) -+ bfq_deactivate_bfqq(bfqd, bfqq, false, false); -+ spin_unlock_irq(&bfqd->lock); -+ -+ hrtimer_cancel(&bfqd->idle_slice_timer); -+ -+ BUG_ON(hrtimer_active(&bfqd->idle_slice_timer)); -+ -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ /* release oom-queue reference to root group */ -+ bfqg_and_blkg_put(bfqd->root_group); -+ -+ blkcg_deactivate_policy(bfqd->queue, &blkcg_policy_bfq); -+#else -+ spin_lock_irq(&bfqd->lock); -+ bfq_put_async_queues(bfqd, bfqd->root_group); -+ kfree(bfqd->root_group); -+ spin_unlock_irq(&bfqd->lock); -+#endif -+ -+ bfq_log(bfqd, "finished ..."); -+ kfree(bfqd); -+} -+ -+static void bfq_init_root_group(struct bfq_group *root_group, -+ struct bfq_data *bfqd) -+{ -+ int i; -+ -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ root_group->entity.parent = NULL; -+ root_group->my_entity = NULL; -+ root_group->bfqd = bfqd; -+#endif -+ root_group->rq_pos_tree = RB_ROOT; -+ for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) -+ root_group->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT; -+ root_group->sched_data.bfq_class_idle_last_service = jiffies; -+} -+ -+static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) -+{ -+ struct bfq_data *bfqd; -+ struct elevator_queue *eq; -+ -+ eq = elevator_alloc(q, e); -+ if (!eq) -+ return -ENOMEM; -+ -+ bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node); -+ if (!bfqd) { -+ kobject_put(&eq->kobj); -+ return -ENOMEM; -+ } -+ eq->elevator_data = bfqd; -+ -+ spin_lock_irq(q->queue_lock); -+ q->elevator = eq; -+ spin_unlock_irq(q->queue_lock); -+ -+ /* -+ * Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues. -+ * Grab a permanent reference to it, so that the normal code flow -+ * will not attempt to free it. -+ */ -+ bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, NULL, 1, 0); -+ bfqd->oom_bfqq.ref++; -+ bfqd->oom_bfqq.new_ioprio = BFQ_DEFAULT_QUEUE_IOPRIO; -+ bfqd->oom_bfqq.new_ioprio_class = IOPRIO_CLASS_BE; -+ bfqd->oom_bfqq.entity.new_weight = -+ bfq_ioprio_to_weight(bfqd->oom_bfqq.new_ioprio); -+ -+ /* oom_bfqq does not participate to bursts */ -+ bfq_clear_bfqq_just_created(&bfqd->oom_bfqq); -+ /* -+ * Trigger weight initialization, according to ioprio, at the -+ * oom_bfqq's first activation. The oom_bfqq's ioprio and ioprio -+ * class won't be changed any more. -+ */ -+ bfqd->oom_bfqq.entity.prio_changed = 1; -+ -+ bfqd->queue = q; -+ INIT_LIST_HEAD(&bfqd->dispatch); -+ -+ hrtimer_init(&bfqd->idle_slice_timer, CLOCK_MONOTONIC, -+ HRTIMER_MODE_REL); -+ bfqd->idle_slice_timer.function = bfq_idle_slice_timer; -+ -+ bfqd->queue_weights_tree = RB_ROOT; -+ bfqd->num_groups_with_pending_reqs = 0; -+ -+ INIT_LIST_HEAD(&bfqd->active_list); -+ INIT_LIST_HEAD(&bfqd->idle_list); -+ INIT_HLIST_HEAD(&bfqd->burst_list); -+ -+ bfqd->hw_tag = -1; -+ -+ bfqd->bfq_max_budget = bfq_default_max_budget; -+ -+ bfqd->bfq_fifo_expire[0] = bfq_fifo_expire[0]; -+ bfqd->bfq_fifo_expire[1] = bfq_fifo_expire[1]; -+ bfqd->bfq_back_max = bfq_back_max; -+ bfqd->bfq_back_penalty = bfq_back_penalty; -+ bfqd->bfq_slice_idle = bfq_slice_idle; -+ bfqd->bfq_timeout = bfq_timeout; -+ -+ bfqd->bfq_requests_within_timer = 120; -+ -+ bfqd->bfq_large_burst_thresh = 8; -+ bfqd->bfq_burst_interval = msecs_to_jiffies(180); -+ -+ bfqd->low_latency = true; -+ -+ /* -+ * Trade-off between responsiveness and fairness. -+ */ -+ bfqd->bfq_wr_coeff = 30; -+ bfqd->bfq_wr_rt_max_time = msecs_to_jiffies(300); -+ bfqd->bfq_wr_max_time = 0; -+ bfqd->bfq_wr_min_idle_time = msecs_to_jiffies(2000); -+ bfqd->bfq_wr_min_inter_arr_async = msecs_to_jiffies(500); -+ bfqd->bfq_wr_max_softrt_rate = 7000; /* -+ * Approximate rate required -+ * to playback or record a -+ * high-definition compressed -+ * video. -+ */ -+ bfqd->wr_busy_queues = 0; -+ -+ /* -+ * Begin by assuming, optimistically, that the device peak -+ * rate is equal to 2/3 of the highest reference rate. -+ */ -+ bfqd->rate_dur_prod = ref_rate[blk_queue_nonrot(bfqd->queue)] * -+ ref_wr_duration[blk_queue_nonrot(bfqd->queue)]; -+ bfqd->peak_rate = ref_rate[blk_queue_nonrot(bfqd->queue)] * 2 / 3; -+ -+ spin_lock_init(&bfqd->lock); -+ -+ /* -+ * The invocation of the next bfq_create_group_hierarchy -+ * function is the head of a chain of function calls -+ * (bfq_create_group_hierarchy->blkcg_activate_policy-> -+ * blk_mq_freeze_queue) that may lead to the invocation of the -+ * has_work hook function. For this reason, -+ * bfq_create_group_hierarchy is invoked only after all -+ * scheduler data has been initialized, apart from the fields -+ * that can be initialized only after invoking -+ * bfq_create_group_hierarchy. This, in particular, enables -+ * has_work to correctly return false. Of course, to avoid -+ * other inconsistencies, the blk-mq stack must then refrain -+ * from invoking further scheduler hooks before this init -+ * function is finished. -+ */ -+ bfqd->root_group = bfq_create_group_hierarchy(bfqd, q->node); -+ if (!bfqd->root_group) -+ goto out_free; -+ bfq_init_root_group(bfqd->root_group, bfqd); -+ bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group); -+ -+ wbt_disable_default(q); -+ return 0; -+ -+out_free: -+ kfree(bfqd); -+ kobject_put(&eq->kobj); -+ return -ENOMEM; -+} -+ -+static void bfq_slab_kill(void) -+{ -+ kmem_cache_destroy(bfq_pool); -+} -+ -+static int __init bfq_slab_setup(void) -+{ -+ bfq_pool = KMEM_CACHE(bfq_queue, 0); -+ if (!bfq_pool) -+ return -ENOMEM; -+ return 0; -+} -+ -+static ssize_t bfq_var_show(unsigned int var, char *page) -+{ -+ return sprintf(page, "%u\n", var); -+} -+ -+static ssize_t bfq_var_store(unsigned long *var, const char *page, -+ size_t count) -+{ -+ unsigned long new_val; -+ int ret = kstrtoul(page, 10, &new_val); -+ -+ if (ret == 0) -+ *var = new_val; -+ -+ return count; -+} -+ -+static ssize_t bfq_wr_max_time_show(struct elevator_queue *e, char *page) -+{ -+ struct bfq_data *bfqd = e->elevator_data; -+ -+ return sprintf(page, "%d\n", bfqd->bfq_wr_max_time > 0 ? -+ jiffies_to_msecs(bfqd->bfq_wr_max_time) : -+ jiffies_to_msecs(bfq_wr_duration(bfqd))); -+} -+ -+static ssize_t bfq_weights_show(struct elevator_queue *e, char *page) -+{ -+ struct bfq_queue *bfqq; -+ struct bfq_data *bfqd = e->elevator_data; -+ ssize_t num_char = 0; -+ -+ num_char += sprintf(page + num_char, "Tot reqs queued %d\n\n", -+ bfqd->queued); -+ -+ spin_lock_irq(&bfqd->lock); -+ -+ num_char += sprintf(page + num_char, "Active:\n"); -+ list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list) { -+ num_char += sprintf(page + num_char, -+ "pid%d: weight %hu, nr_queued %d %d, ", -+ bfqq->pid, -+ bfqq->entity.weight, -+ bfqq->queued[0], -+ bfqq->queued[1]); -+ num_char += sprintf(page + num_char, -+ "dur %d/%u\n", -+ jiffies_to_msecs( -+ jiffies - -+ bfqq->last_wr_start_finish), -+ jiffies_to_msecs(bfqq->wr_cur_max_time)); -+ } -+ -+ num_char += sprintf(page + num_char, "Idle:\n"); -+ list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list) { -+ num_char += sprintf(page + num_char, -+ "pid%d: weight %hu, dur %d/%u\n", -+ bfqq->pid, -+ bfqq->entity.weight, -+ jiffies_to_msecs(jiffies - -+ bfqq->last_wr_start_finish), -+ jiffies_to_msecs(bfqq->wr_cur_max_time)); -+ } -+ -+ spin_unlock_irq(&bfqd->lock); -+ -+ return num_char; -+} -+ -+#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ -+static ssize_t __FUNC(struct elevator_queue *e, char *page) \ -+{ \ -+ struct bfq_data *bfqd = e->elevator_data; \ -+ u64 __data = __VAR; \ -+ if (__CONV == 1) \ -+ __data = jiffies_to_msecs(__data); \ -+ else if (__CONV == 2) \ -+ __data = div_u64(__data, NSEC_PER_MSEC); \ -+ return bfq_var_show(__data, (page)); \ -+} -+SHOW_FUNCTION(bfq_fifo_expire_sync_show, bfqd->bfq_fifo_expire[1], 2); -+SHOW_FUNCTION(bfq_fifo_expire_async_show, bfqd->bfq_fifo_expire[0], 2); -+SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0); -+SHOW_FUNCTION(bfq_back_seek_penalty_show, bfqd->bfq_back_penalty, 0); -+SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 2); -+SHOW_FUNCTION(bfq_max_budget_show, bfqd->bfq_user_max_budget, 0); -+SHOW_FUNCTION(bfq_timeout_sync_show, bfqd->bfq_timeout, 1); -+SHOW_FUNCTION(bfq_strict_guarantees_show, bfqd->strict_guarantees, 0); -+SHOW_FUNCTION(bfq_low_latency_show, bfqd->low_latency, 0); -+SHOW_FUNCTION(bfq_wr_coeff_show, bfqd->bfq_wr_coeff, 0); -+SHOW_FUNCTION(bfq_wr_rt_max_time_show, bfqd->bfq_wr_rt_max_time, 1); -+SHOW_FUNCTION(bfq_wr_min_idle_time_show, bfqd->bfq_wr_min_idle_time, 1); -+SHOW_FUNCTION(bfq_wr_min_inter_arr_async_show, bfqd->bfq_wr_min_inter_arr_async, -+ 1); -+SHOW_FUNCTION(bfq_wr_max_softrt_rate_show, bfqd->bfq_wr_max_softrt_rate, 0); -+#undef SHOW_FUNCTION -+ -+#define USEC_SHOW_FUNCTION(__FUNC, __VAR) \ -+static ssize_t __FUNC(struct elevator_queue *e, char *page) \ -+{ \ -+ struct bfq_data *bfqd = e->elevator_data; \ -+ u64 __data = __VAR; \ -+ __data = div_u64(__data, NSEC_PER_USEC); \ -+ return bfq_var_show(__data, (page)); \ -+} -+USEC_SHOW_FUNCTION(bfq_slice_idle_us_show, bfqd->bfq_slice_idle); -+#undef USEC_SHOW_FUNCTION -+ -+#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ -+static ssize_t \ -+__FUNC(struct elevator_queue *e, const char *page, size_t count) \ -+{ \ -+ struct bfq_data *bfqd = e->elevator_data; \ -+ unsigned long uninitialized_var(__data); \ -+ int ret = bfq_var_store(&__data, (page), count); \ -+ if (__data < (MIN)) \ -+ __data = (MIN); \ -+ else if (__data > (MAX)) \ -+ __data = (MAX); \ -+ if (__CONV == 1) \ -+ *(__PTR) = msecs_to_jiffies(__data); \ -+ else if (__CONV == 2) \ -+ *(__PTR) = (u64)__data * NSEC_PER_MSEC; \ -+ else \ -+ *(__PTR) = __data; \ -+ return ret; \ -+} -+STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1, -+ INT_MAX, 2); -+STORE_FUNCTION(bfq_fifo_expire_async_store, &bfqd->bfq_fifo_expire[0], 1, -+ INT_MAX, 2); -+STORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0); -+STORE_FUNCTION(bfq_back_seek_penalty_store, &bfqd->bfq_back_penalty, 1, -+ INT_MAX, 0); -+STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 2); -+STORE_FUNCTION(bfq_wr_coeff_store, &bfqd->bfq_wr_coeff, 1, INT_MAX, 0); -+STORE_FUNCTION(bfq_wr_max_time_store, &bfqd->bfq_wr_max_time, 0, INT_MAX, 1); -+STORE_FUNCTION(bfq_wr_rt_max_time_store, &bfqd->bfq_wr_rt_max_time, 0, INT_MAX, -+ 1); -+STORE_FUNCTION(bfq_wr_min_idle_time_store, &bfqd->bfq_wr_min_idle_time, 0, -+ INT_MAX, 1); -+STORE_FUNCTION(bfq_wr_min_inter_arr_async_store, -+ &bfqd->bfq_wr_min_inter_arr_async, 0, INT_MAX, 1); -+STORE_FUNCTION(bfq_wr_max_softrt_rate_store, &bfqd->bfq_wr_max_softrt_rate, 0, -+ INT_MAX, 0); -+#undef STORE_FUNCTION -+ -+#define USEC_STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \ -+static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)\ -+{ \ -+ struct bfq_data *bfqd = e->elevator_data; \ -+ unsigned long uninitialized_var(__data); \ -+ int ret = bfq_var_store(&__data, (page), count); \ -+ if (__data < (MIN)) \ -+ __data = (MIN); \ -+ else if (__data > (MAX)) \ -+ __data = (MAX); \ -+ *(__PTR) = (u64)__data * NSEC_PER_USEC; \ -+ return ret; \ -+} -+USEC_STORE_FUNCTION(bfq_slice_idle_us_store, &bfqd->bfq_slice_idle, 0, -+ UINT_MAX); -+#undef USEC_STORE_FUNCTION -+ -+/* do nothing for the moment */ -+static ssize_t bfq_weights_store(struct elevator_queue *e, -+ const char *page, size_t count) -+{ -+ return count; -+} -+ -+static ssize_t bfq_max_budget_store(struct elevator_queue *e, -+ const char *page, size_t count) -+{ -+ struct bfq_data *bfqd = e->elevator_data; -+ unsigned long uninitialized_var(__data); -+ int ret = bfq_var_store(&__data, (page), count); -+ -+ if (__data == 0) -+ bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd); -+ else { -+ if (__data > INT_MAX) -+ __data = INT_MAX; -+ bfqd->bfq_max_budget = __data; -+ } -+ -+ bfqd->bfq_user_max_budget = __data; -+ -+ return ret; -+} -+ -+/* -+ * Leaving this name to preserve name compatibility with cfq -+ * parameters, but this timeout is used for both sync and async. -+ */ -+static ssize_t bfq_timeout_sync_store(struct elevator_queue *e, -+ const char *page, size_t count) -+{ -+ struct bfq_data *bfqd = e->elevator_data; -+ unsigned long uninitialized_var(__data); -+ int ret = bfq_var_store(&__data, (page), count); -+ -+ if (__data < 1) -+ __data = 1; -+ else if (__data > INT_MAX) -+ __data = INT_MAX; -+ -+ bfqd->bfq_timeout = msecs_to_jiffies(__data); -+ if (bfqd->bfq_user_max_budget == 0) -+ bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd); -+ -+ return ret; -+} -+ -+static ssize_t bfq_strict_guarantees_store(struct elevator_queue *e, -+ const char *page, size_t count) -+{ -+ struct bfq_data *bfqd = e->elevator_data; -+ unsigned long uninitialized_var(__data); -+ int ret = bfq_var_store(&__data, (page), count); -+ -+ if (__data > 1) -+ __data = 1; -+ if (!bfqd->strict_guarantees && __data == 1 -+ && bfqd->bfq_slice_idle < 8 * NSEC_PER_MSEC) -+ bfqd->bfq_slice_idle = 8 * NSEC_PER_MSEC; -+ -+ bfqd->strict_guarantees = __data; -+ -+ return ret; -+} -+ -+static ssize_t bfq_low_latency_store(struct elevator_queue *e, -+ const char *page, size_t count) -+{ -+ struct bfq_data *bfqd = e->elevator_data; -+ unsigned long uninitialized_var(__data); -+ int ret = bfq_var_store(&__data, (page), count); -+ -+ if (__data > 1) -+ __data = 1; -+ if (__data == 0 && bfqd->low_latency != 0) -+ bfq_end_wr(bfqd); -+ bfqd->low_latency = __data; -+ -+ return ret; -+} -+ -+#define BFQ_ATTR(name) \ -+ __ATTR(name, S_IRUGO|S_IWUSR, bfq_##name##_show, bfq_##name##_store) -+ -+static struct elv_fs_entry bfq_attrs[] = { -+ BFQ_ATTR(fifo_expire_sync), -+ BFQ_ATTR(fifo_expire_async), -+ BFQ_ATTR(back_seek_max), -+ BFQ_ATTR(back_seek_penalty), -+ BFQ_ATTR(slice_idle), -+ BFQ_ATTR(slice_idle_us), -+ BFQ_ATTR(max_budget), -+ BFQ_ATTR(timeout_sync), -+ BFQ_ATTR(strict_guarantees), -+ BFQ_ATTR(low_latency), -+ BFQ_ATTR(wr_coeff), -+ BFQ_ATTR(wr_max_time), -+ BFQ_ATTR(wr_rt_max_time), -+ BFQ_ATTR(wr_min_idle_time), -+ BFQ_ATTR(wr_min_inter_arr_async), -+ BFQ_ATTR(wr_max_softrt_rate), -+ BFQ_ATTR(weights), -+ __ATTR_NULL -+}; -+ -+static struct elevator_type iosched_bfq_mq = { -+ .ops.mq = { -+ .limit_depth = bfq_limit_depth, -+ .prepare_request = bfq_prepare_request, -+ .requeue_request = bfq_finish_requeue_request, -+ .finish_request = bfq_finish_requeue_request, -+ .exit_icq = bfq_exit_icq, -+ .insert_requests = bfq_insert_requests, -+ .dispatch_request = bfq_dispatch_request, -+ .next_request = elv_rb_latter_request, -+ .former_request = elv_rb_former_request, -+ .allow_merge = bfq_allow_bio_merge, -+ .bio_merge = bfq_bio_merge, -+ .request_merge = bfq_request_merge, -+ .requests_merged = bfq_requests_merged, -+ .request_merged = bfq_request_merged, -+ .has_work = bfq_has_work, -+ .depth_updated = bfq_depth_updated, -+ .init_hctx = bfq_init_hctx, -+ .init_sched = bfq_init_queue, -+ .exit_sched = bfq_exit_queue, -+ }, -+ -+ .uses_mq = true, -+ .icq_size = sizeof(struct bfq_io_cq), -+ .icq_align = __alignof__(struct bfq_io_cq), -+ .elevator_attrs = bfq_attrs, -+ .elevator_name = "bfq-mq", -+ .elevator_owner = THIS_MODULE, -+}; -+ -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+static struct blkcg_policy blkcg_policy_bfq = { -+ .dfl_cftypes = bfq_blkg_files, -+ .legacy_cftypes = bfq_blkcg_legacy_files, -+ -+ .cpd_alloc_fn = bfq_cpd_alloc, -+ .cpd_init_fn = bfq_cpd_init, -+ .cpd_bind_fn = bfq_cpd_init, -+ .cpd_free_fn = bfq_cpd_free, -+ -+ .pd_alloc_fn = bfq_pd_alloc, -+ .pd_init_fn = bfq_pd_init, -+ .pd_offline_fn = bfq_pd_offline, -+ .pd_free_fn = bfq_pd_free, -+ .pd_reset_stats_fn = bfq_pd_reset_stats, -+}; -+#endif -+ -+static int __init bfq_init(void) -+{ -+ int ret; -+ char msg[60] = "BFQ I/O-scheduler: v9"; -+ -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ ret = blkcg_policy_register(&blkcg_policy_bfq); -+ if (ret) -+ return ret; -+#endif -+ -+ ret = -ENOMEM; -+ if (bfq_slab_setup()) -+ goto err_pol_unreg; -+ -+ /* -+ * Times to load large popular applications for the typical -+ * systems installed on the reference devices (see the -+ * comments before the definition of the next -+ * array). Actually, we use slightly lower values, as the -+ * estimated peak rate tends to be smaller than the actual -+ * peak rate. The reason for this last fact is that estimates -+ * are computed over much shorter time intervals than the long -+ * intervals typically used for benchmarking. Why? First, to -+ * adapt more quickly to variations. Second, because an I/O -+ * scheduler cannot rely on a peak-rate-evaluation workload to -+ * be run for a long time. -+ */ -+ ref_wr_duration[0] = msecs_to_jiffies(7000); /* actually 8 sec */ -+ ref_wr_duration[1] = msecs_to_jiffies(2500); /* actually 3 sec */ -+ -+ ret = elv_register(&iosched_bfq_mq); -+ if (ret) -+ goto slab_kill; -+ -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ strcat(msg, " (with cgroups support)"); -+#endif -+ pr_info("%s", msg); -+ -+ return 0; -+ -+slab_kill: -+ bfq_slab_kill(); -+err_pol_unreg: -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ blkcg_policy_unregister(&blkcg_policy_bfq); -+#endif -+ return ret; -+} -+ -+static void __exit bfq_exit(void) -+{ -+ elv_unregister(&iosched_bfq_mq); -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ blkcg_policy_unregister(&blkcg_policy_bfq); -+#endif -+ bfq_slab_kill(); -+} -+ -+module_init(bfq_init); -+module_exit(bfq_exit); -+ -+MODULE_AUTHOR("Paolo Valente"); -+MODULE_LICENSE("GPL"); -+MODULE_DESCRIPTION("MQ Budget Fair Queueing I/O Scheduler"); -diff --git a/block/bfq-mq.h b/block/bfq-mq.h -new file mode 100644 -index 000000000000..ceb291132a1a ---- /dev/null -+++ b/block/bfq-mq.h -@@ -0,0 +1,1077 @@ -+/* -+ * BFQ v9: data structures and common functions prototypes. -+ * -+ * Based on ideas and code from CFQ: -+ * Copyright (C) 2003 Jens Axboe -+ * -+ * Copyright (C) 2008 Fabio Checconi -+ * Paolo Valente -+ * -+ * Copyright (C) 2015 Paolo Valente -+ * -+ * Copyright (C) 2017 Paolo Valente -+ */ -+ -+#ifndef _BFQ_H -+#define _BFQ_H -+ -+#include -+#include -+ -+/* see comments on CONFIG_BFQ_GROUP_IOSCHED in bfq.h */ -+#ifdef CONFIG_MQ_BFQ_GROUP_IOSCHED -+#define BFQ_GROUP_IOSCHED_ENABLED -+#endif -+ -+#define BFQ_IOPRIO_CLASSES 3 -+#define BFQ_CL_IDLE_TIMEOUT (HZ/5) -+ -+#define BFQ_MIN_WEIGHT 1 -+#define BFQ_MAX_WEIGHT 1000 -+#define BFQ_WEIGHT_CONVERSION_COEFF 10 -+ -+#define BFQ_DEFAULT_QUEUE_IOPRIO 4 -+ -+#define BFQ_WEIGHT_LEGACY_DFL 100 -+#define BFQ_DEFAULT_GRP_IOPRIO 0 -+#define BFQ_DEFAULT_GRP_CLASS IOPRIO_CLASS_BE -+ -+/* -+ * Soft real-time applications are extremely more latency sensitive -+ * than interactive ones. Over-raise the weight of the former to -+ * privilege them against the latter. -+ */ -+#define BFQ_SOFTRT_WEIGHT_FACTOR 100 -+ -+struct bfq_entity; -+ -+/** -+ * struct bfq_service_tree - per ioprio_class service tree. -+ * -+ * Each service tree represents a B-WF2Q+ scheduler on its own. Each -+ * ioprio_class has its own independent scheduler, and so its own -+ * bfq_service_tree. All the fields are protected by the queue lock -+ * of the containing bfqd. -+ */ -+struct bfq_service_tree { -+ /* tree for active entities (i.e., those backlogged) */ -+ struct rb_root active; -+ /* tree for idle entities (i.e., not backlogged, with V <= F_i)*/ -+ struct rb_root idle; -+ -+ struct bfq_entity *first_idle; /* idle entity with minimum F_i */ -+ struct bfq_entity *last_idle; /* idle entity with maximum F_i */ -+ -+ u64 vtime; /* scheduler virtual time */ -+ /* scheduler weight sum; active and idle entities contribute to it */ -+ unsigned long wsum; -+}; -+ -+/** -+ * struct bfq_sched_data - multi-class scheduler. -+ * -+ * bfq_sched_data is the basic scheduler queue. It supports three -+ * ioprio_classes, and can be used either as a toplevel queue or as an -+ * intermediate queue in a hierarchical setup. -+ * -+ * The supported ioprio_classes are the same as in CFQ, in descending -+ * priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE. -+ * Requests from higher priority queues are served before all the -+ * requests from lower priority queues; among requests of the same -+ * queue requests are served according to B-WF2Q+. -+ * -+ * The schedule is implemented by the service trees, plus the field -+ * @next_in_service, which points to the entity on the active trees -+ * that will be served next, if 1) no changes in the schedule occurs -+ * before the current in-service entity is expired, 2) the in-service -+ * queue becomes idle when it expires, and 3) if the entity pointed by -+ * in_service_entity is not a queue, then the in-service child entity -+ * of the entity pointed by in_service_entity becomes idle on -+ * expiration. This peculiar definition allows for the following -+ * optimization, not yet exploited: while a given entity is still in -+ * service, we already know which is the best candidate for next -+ * service among the other active entitities in the same parent -+ * entity. We can then quickly compare the timestamps of the -+ * in-service entity with those of such best candidate. -+ * -+ * All the fields are protected by the queue lock of the containing -+ * bfqd. -+ */ -+struct bfq_sched_data { -+ struct bfq_entity *in_service_entity; /* entity in service */ -+ /* head-of-the-line entity in the scheduler (see comments above) */ -+ struct bfq_entity *next_in_service; -+ /* array of service trees, one per ioprio_class */ -+ struct bfq_service_tree service_tree[BFQ_IOPRIO_CLASSES]; -+ /* last time CLASS_IDLE was served */ -+ unsigned long bfq_class_idle_last_service; -+ -+}; -+ -+/** -+ * struct bfq_weight_counter - counter of the number of all active queues -+ * with a given weight. -+ */ -+struct bfq_weight_counter { -+ unsigned int weight; /* weight of the queues this counter refers to */ -+ unsigned int num_active; /* nr of active queues with this weight */ -+ /* -+ * Weights tree member (see bfq_data's @queue_weights_tree) -+ */ -+ struct rb_node weights_node; -+}; -+ -+/** -+ * struct bfq_entity - schedulable entity. -+ * -+ * A bfq_entity is used to represent either a bfq_queue (leaf node in the -+ * cgroup hierarchy) or a bfq_group into the upper level scheduler. Each -+ * entity belongs to the sched_data of the parent group in the cgroup -+ * hierarchy. Non-leaf entities have also their own sched_data, stored -+ * in @my_sched_data. -+ * -+ * Each entity stores independently its priority values; this would -+ * allow different weights on different devices, but this -+ * functionality is not exported to userspace by now. Priorities and -+ * weights are updated lazily, first storing the new values into the -+ * new_* fields, then setting the @prio_changed flag. As soon as -+ * there is a transition in the entity state that allows the priority -+ * update to take place the effective and the requested priority -+ * values are synchronized. -+ * -+ * Unless cgroups are used, the weight value is calculated from the -+ * ioprio to export the same interface as CFQ. When dealing with -+ * ``well-behaved'' queues (i.e., queues that do not spend too much -+ * time to consume their budget and have true sequential behavior, and -+ * when there are no external factors breaking anticipation) the -+ * relative weights at each level of the cgroups hierarchy should be -+ * guaranteed. All the fields are protected by the queue lock of the -+ * containing bfqd. -+ */ -+struct bfq_entity { -+ struct rb_node rb_node; /* service_tree member */ -+ -+ /* -+ * Flag, true if the entity is on a tree (either the active or -+ * the idle one of its service_tree) or is in service. -+ */ -+ bool on_st; -+ -+ u64 finish; /* B-WF2Q+ finish timestamp (aka F_i) */ -+ u64 start; /* B-WF2Q+ start timestamp (aka S_i) */ -+ -+ /* tree the entity is enqueued into; %NULL if not on a tree */ -+ struct rb_root *tree; -+ -+ /* -+ * minimum start time of the (active) subtree rooted at this -+ * entity; used for O(log N) lookups into active trees -+ */ -+ u64 min_start; -+ -+ /* amount of service received during the last service slot */ -+ int service; -+ -+ /* budget, used also to calculate F_i: F_i = S_i + @budget / @weight */ -+ int budget; -+ -+ unsigned int weight; /* weight of the queue */ -+ unsigned int new_weight; /* next weight if a change is in progress */ -+ -+ /* original weight, used to implement weight boosting */ -+ unsigned int orig_weight; -+ -+ /* parent entity, for hierarchical scheduling */ -+ struct bfq_entity *parent; -+ -+ /* -+ * For non-leaf nodes in the hierarchy, the associated -+ * scheduler queue, %NULL on leaf nodes. -+ */ -+ struct bfq_sched_data *my_sched_data; -+ /* the scheduler queue this entity belongs to */ -+ struct bfq_sched_data *sched_data; -+ -+ /* flag, set to request a weight, ioprio or ioprio_class change */ -+ int prio_changed; -+ -+ /* flag, set if the entity is counted in groups_with_pending_reqs */ -+ bool in_groups_with_pending_reqs; -+}; -+ -+struct bfq_group; -+ -+/** -+ * struct bfq_ttime - per process thinktime stats. -+ */ -+struct bfq_ttime { -+ u64 last_end_request; /* completion time of last request */ -+ -+ u64 ttime_total; /* total process thinktime */ -+ unsigned long ttime_samples; /* number of thinktime samples */ -+ u64 ttime_mean; /* average process thinktime */ -+ -+}; -+ -+/** -+ * struct bfq_queue - leaf schedulable entity. -+ * -+ * A bfq_queue is a leaf request queue; it can be associated with an -+ * io_context or more, if it is async or shared between cooperating -+ * processes. @cgroup holds a reference to the cgroup, to be sure that it -+ * does not disappear while a bfqq still references it (mostly to avoid -+ * races between request issuing and task migration followed by cgroup -+ * destruction). -+ * All the fields are protected by the queue lock of the containing bfqd. -+ */ -+struct bfq_queue { -+ /* reference counter */ -+ int ref; -+ /* parent bfq_data */ -+ struct bfq_data *bfqd; -+ -+ /* current ioprio and ioprio class */ -+ unsigned short ioprio, ioprio_class; -+ /* next ioprio and ioprio class if a change is in progress */ -+ unsigned short new_ioprio, new_ioprio_class; -+ -+ /* -+ * Shared bfq_queue if queue is cooperating with one or more -+ * other queues. -+ */ -+ struct bfq_queue *new_bfqq; -+ /* request-position tree member (see bfq_group's @rq_pos_tree) */ -+ struct rb_node pos_node; -+ /* request-position tree root (see bfq_group's @rq_pos_tree) */ -+ struct rb_root *pos_root; -+ -+ /* sorted list of pending requests */ -+ struct rb_root sort_list; -+ /* if fifo isn't expired, next request to serve */ -+ struct request *next_rq; -+ /* number of sync and async requests queued */ -+ int queued[2]; -+ /* number of requests currently allocated */ -+ int allocated; -+ /* number of pending metadata requests */ -+ int meta_pending; -+ /* fifo list of requests in sort_list */ -+ struct list_head fifo; -+ -+ /* entity representing this queue in the scheduler */ -+ struct bfq_entity entity; -+ -+ /* pointer to the weight counter associated with this queue */ -+ struct bfq_weight_counter *weight_counter; -+ -+ /* maximum budget allowed from the feedback mechanism */ -+ int max_budget; -+ /* budget expiration (in jiffies) */ -+ unsigned long budget_timeout; -+ -+ /* number of requests on the dispatch list or inside driver */ -+ int dispatched; -+ -+ unsigned int flags; /* status flags.*/ -+ -+ /* node for active/idle bfqq list inside parent bfqd */ -+ struct list_head bfqq_list; -+ -+ /* associated @bfq_ttime struct */ -+ struct bfq_ttime ttime; -+ -+ /* bit vector: a 1 for each seeky requests in history */ -+ u32 seek_history; -+ -+ /* node for the device's burst list */ -+ struct hlist_node burst_list_node; -+ -+ /* position of the last request enqueued */ -+ sector_t last_request_pos; -+ -+ /* Number of consecutive pairs of request completion and -+ * arrival, such that the queue becomes idle after the -+ * completion, but the next request arrives within an idle -+ * time slice; used only if the queue's IO_bound flag has been -+ * cleared. -+ */ -+ unsigned int requests_within_timer; -+ -+ /* pid of the process owning the queue, used for logging purposes */ -+ pid_t pid; -+ -+ /* -+ * Pointer to the bfq_io_cq owning the bfq_queue, set to %NULL -+ * if the queue is shared. -+ */ -+ struct bfq_io_cq *bic; -+ -+ /* current maximum weight-raising time for this queue */ -+ unsigned long wr_cur_max_time; -+ /* -+ * Minimum time instant such that, only if a new request is -+ * enqueued after this time instant in an idle @bfq_queue with -+ * no outstanding requests, then the task associated with the -+ * queue it is deemed as soft real-time (see the comments on -+ * the function bfq_bfqq_softrt_next_start()) -+ */ -+ unsigned long soft_rt_next_start; -+ /* -+ * Start time of the current weight-raising period if -+ * the @bfq-queue is being weight-raised, otherwise -+ * finish time of the last weight-raising period. -+ */ -+ unsigned long last_wr_start_finish; -+ /* factor by which the weight of this queue is multiplied */ -+ unsigned int wr_coeff; -+ /* -+ * Time of the last transition of the @bfq_queue from idle to -+ * backlogged. -+ */ -+ unsigned long last_idle_bklogged; -+ /* -+ * Cumulative service received from the @bfq_queue since the -+ * last transition from idle to backlogged. -+ */ -+ unsigned long service_from_backlogged; -+ /* -+ * Cumulative service received from the @bfq_queue since its -+ * last transition to weight-raised state. -+ */ -+ unsigned long service_from_wr; -+ /* -+ * Value of wr start time when switching to soft rt -+ */ -+ unsigned long wr_start_at_switch_to_srt; -+ -+ unsigned long split_time; /* time of last split */ -+ unsigned long first_IO_time; /* time of first I/O for this queue */ -+ -+ /* max service rate measured so far */ -+ u32 max_service_rate; -+ /* -+ * Ratio between the service received by bfqq while it is in -+ * service, and the cumulative service (of requests of other -+ * queues) that may be injected while bfqq is empty but still -+ * in service. To increase precision, the coefficient is -+ * measured in tenths of unit. Here are some example of (1) -+ * ratios, (2) resulting percentages of service injected -+ * w.r.t. to the total service dispatched while bfqq is in -+ * service, and (3) corresponding values of the coefficient: -+ * 1 (50%) -> 10 -+ * 2 (33%) -> 20 -+ * 10 (9%) -> 100 -+ * 9.9 (9%) -> 99 -+ * 1.5 (40%) -> 15 -+ * 0.5 (66%) -> 5 -+ * 0.1 (90%) -> 1 -+ * -+ * So, if the coefficient is lower than 10, then -+ * injected service is more than bfqq service. -+ */ -+ unsigned int inject_coeff; -+ /* amount of service injected in current service slot */ -+ unsigned int injected_service; -+}; -+ -+/** -+ * struct bfq_io_cq - per (request_queue, io_context) structure. -+ */ -+struct bfq_io_cq { -+ /* associated io_cq structure */ -+ struct io_cq icq; /* must be the first member */ -+ /* array of two process queues, the sync and the async */ -+ struct bfq_queue *bfqq[2]; -+ /* per (request_queue, blkcg) ioprio */ -+ int ioprio; -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ uint64_t blkcg_serial_nr; /* the current blkcg serial */ -+#endif -+ -+ /* -+ * Snapshot of the has_short_time flag before merging; taken -+ * to remember its value while the queue is merged, so as to -+ * be able to restore it in case of split. -+ */ -+ bool saved_has_short_ttime; -+ /* -+ * Same purpose as the previous two fields for the I/O bound -+ * classification of a queue. -+ */ -+ bool saved_IO_bound; -+ -+ /* -+ * Same purpose as the previous fields for the value of the -+ * field keeping the queue's belonging to a large burst -+ */ -+ bool saved_in_large_burst; -+ /* -+ * True if the queue belonged to a burst list before its merge -+ * with another cooperating queue. -+ */ -+ bool was_in_burst_list; -+ -+ /* -+ * Similar to previous fields: save wr information. -+ */ -+ unsigned long saved_wr_coeff; -+ unsigned long saved_last_wr_start_finish; -+ unsigned long saved_wr_start_at_switch_to_srt; -+ unsigned int saved_wr_cur_max_time; -+ struct bfq_ttime saved_ttime; -+}; -+ -+/** -+ * struct bfq_data - per-device data structure. -+ * -+ * All the fields are protected by @lock. -+ */ -+struct bfq_data { -+ /* device request queue */ -+ struct request_queue *queue; -+ /* dispatch queue */ -+ struct list_head dispatch; -+ -+ /* root bfq_group for the device */ -+ struct bfq_group *root_group; -+ -+ /* -+ * rbtree of weight counters of @bfq_queues, sorted by -+ * weight. Used to keep track of whether all @bfq_queues have -+ * the same weight. The tree contains one counter for each -+ * distinct weight associated to some active and not -+ * weight-raised @bfq_queue (see the comments to the functions -+ * bfq_weights_tree_[add|remove] for further details). -+ */ -+ struct rb_root queue_weights_tree; -+ -+ /* -+ * Number of groups with at least one descendant process that -+ * has at least one request waiting for completion. Note that -+ * this accounts for also requests already dispatched, but not -+ * yet completed. Therefore this number of groups may differ -+ * (be larger) than the number of active groups, as a group is -+ * considered active only if its corresponding entity has -+ * descendant queues with at least one request queued. This -+ * number is used to decide whether a scenario is symmetric. -+ * For a detailed explanation see comments on the computation -+ * of the variable asymmetric_scenario in the function -+ * bfq_better_to_idle(). -+ * -+ * However, it is hard to compute this number exactly, for -+ * groups with multiple descendant processes. Consider a group -+ * that is inactive, i.e., that has no descendant process with -+ * pending I/O inside BFQ queues. Then suppose that -+ * num_groups_with_pending_reqs is still accounting for this -+ * group, because the group has descendant processes with some -+ * I/O request still in flight. num_groups_with_pending_reqs -+ * should be decremented when the in-flight request of the -+ * last descendant process is finally completed (assuming that -+ * nothing else has changed for the group in the meantime, in -+ * terms of composition of the group and active/inactive state of child -+ * groups and processes). To accomplish this, an additional -+ * pending-request counter must be added to entities, and must -+ * be updated correctly. To avoid this additional field and operations, -+ * we resort to the following tradeoff between simplicity and -+ * accuracy: for an inactive group that is still counted in -+ * num_groups_with_pending_reqs, we decrement -+ * num_groups_with_pending_reqs when the first descendant -+ * process of the group remains with no request waiting for -+ * completion. -+ * -+ * Even this simpler decrement strategy requires a little -+ * carefulness: to avoid multiple decrements, we flag a group, -+ * more precisely an entity representing a group, as still -+ * counted in num_groups_with_pending_reqs when it becomes -+ * inactive. Then, when the first descendant queue of the -+ * entity remains with no request waiting for completion, -+ * num_groups_with_pending_reqs is decremented, and this flag -+ * is reset. After this flag is reset for the entity, -+ * num_groups_with_pending_reqs won't be decremented any -+ * longer in case a new descendant queue of the entity remains -+ * with no request waiting for completion. -+ */ -+ unsigned int num_groups_with_pending_reqs; -+ -+ /* -+ * Per-class (RT, BE, IDLE) number of bfq_queues containing -+ * requests (including the queue in service, even if it is -+ * idling). -+ */ -+ unsigned int busy_queues[3]; -+ /* number of weight-raised busy @bfq_queues */ -+ int wr_busy_queues; -+ /* number of queued requests */ -+ int queued; -+ /* number of requests dispatched and waiting for completion */ -+ int rq_in_driver; -+ -+ /* -+ * Maximum number of requests in driver in the last -+ * @hw_tag_samples completed requests. -+ */ -+ int max_rq_in_driver; -+ /* number of samples used to calculate hw_tag */ -+ int hw_tag_samples; -+ /* flag set to one if the driver is showing a queueing behavior */ -+ int hw_tag; -+ -+ /* number of budgets assigned */ -+ int budgets_assigned; -+ -+ /* -+ * Timer set when idling (waiting) for the next request from -+ * the queue in service. -+ */ -+ struct hrtimer idle_slice_timer; -+ -+ /* bfq_queue in service */ -+ struct bfq_queue *in_service_queue; -+ -+ /* on-disk position of the last served request */ -+ sector_t last_position; -+ -+ /* position of the last served request for the in-service queue */ -+ sector_t in_serv_last_pos; -+ -+ /* time of last request completion (ns) */ -+ u64 last_completion; -+ -+ /* time of first rq dispatch in current observation interval (ns) */ -+ u64 first_dispatch; -+ /* time of last rq dispatch in current observation interval (ns) */ -+ u64 last_dispatch; -+ -+ /* beginning of the last budget */ -+ ktime_t last_budget_start; -+ /* beginning of the last idle slice */ -+ ktime_t last_idling_start; -+ -+ /* number of samples in current observation interval */ -+ int peak_rate_samples; -+ /* num of samples of seq dispatches in current observation interval */ -+ u32 sequential_samples; -+ /* total num of sectors transferred in current observation interval */ -+ u64 tot_sectors_dispatched; -+ /* max rq size seen during current observation interval (sectors) */ -+ u32 last_rq_max_size; -+ /* time elapsed from first dispatch in current observ. interval (us) */ -+ u64 delta_from_first; -+ /* -+ * Current estimate of the device peak rate, measured in -+ * [(sectors/usec) / 2^BFQ_RATE_SHIFT]. The left-shift by -+ * BFQ_RATE_SHIFT is performed to increase precision in -+ * fixed-point calculations. -+ */ -+ u32 peak_rate; -+ -+ /* maximum budget allotted to a bfq_queue before rescheduling */ -+ int bfq_max_budget; -+ -+ /* list of all the bfq_queues active on the device */ -+ struct list_head active_list; -+ /* list of all the bfq_queues idle on the device */ -+ struct list_head idle_list; -+ -+ /* -+ * Timeout for async/sync requests; when it fires, requests -+ * are served in fifo order. -+ */ -+ u64 bfq_fifo_expire[2]; -+ /* weight of backward seeks wrt forward ones */ -+ unsigned int bfq_back_penalty; -+ /* maximum allowed backward seek */ -+ unsigned int bfq_back_max; -+ /* maximum idling time */ -+ u32 bfq_slice_idle; -+ -+ /* user-configured max budget value (0 for auto-tuning) */ -+ int bfq_user_max_budget; -+ /* -+ * Timeout for bfq_queues to consume their budget; used to -+ * prevent seeky queues from imposing long latencies to -+ * sequential or quasi-sequential ones (this also implies that -+ * seeky queues cannot receive guarantees in the service -+ * domain; after a timeout they are charged for the time they -+ * have been in service, to preserve fairness among them, but -+ * without service-domain guarantees). -+ */ -+ unsigned int bfq_timeout; -+ -+ /* -+ * Number of consecutive requests that must be issued within -+ * the idle time slice to set again idling to a queue which -+ * was marked as non-I/O-bound (see the definition of the -+ * IO_bound flag for further details). -+ */ -+ unsigned int bfq_requests_within_timer; -+ -+ /* -+ * Force device idling whenever needed to provide accurate -+ * service guarantees, without caring about throughput -+ * issues. CAVEAT: this may even increase latencies, in case -+ * of useless idling for processes that did stop doing I/O. -+ */ -+ bool strict_guarantees; -+ -+ /* -+ * Last time at which a queue entered the current burst of -+ * queues being activated shortly after each other; for more -+ * details about this and the following parameters related to -+ * a burst of activations, see the comments on the function -+ * bfq_handle_burst. -+ */ -+ unsigned long last_ins_in_burst; -+ /* -+ * Reference time interval used to decide whether a queue has -+ * been activated shortly after @last_ins_in_burst. -+ */ -+ unsigned long bfq_burst_interval; -+ /* number of queues in the current burst of queue activations */ -+ int burst_size; -+ -+ /* common parent entity for the queues in the burst */ -+ struct bfq_entity *burst_parent_entity; -+ /* Maximum burst size above which the current queue-activation -+ * burst is deemed as 'large'. -+ */ -+ unsigned long bfq_large_burst_thresh; -+ /* true if a large queue-activation burst is in progress */ -+ bool large_burst; -+ /* -+ * Head of the burst list (as for the above fields, more -+ * details in the comments on the function bfq_handle_burst). -+ */ -+ struct hlist_head burst_list; -+ -+ /* if set to true, low-latency heuristics are enabled */ -+ bool low_latency; -+ /* -+ * Maximum factor by which the weight of a weight-raised queue -+ * is multiplied. -+ */ -+ unsigned int bfq_wr_coeff; -+ /* maximum duration of a weight-raising period (jiffies) */ -+ unsigned int bfq_wr_max_time; -+ -+ /* Maximum weight-raising duration for soft real-time processes */ -+ unsigned int bfq_wr_rt_max_time; -+ /* -+ * Minimum idle period after which weight-raising may be -+ * reactivated for a queue (in jiffies). -+ */ -+ unsigned int bfq_wr_min_idle_time; -+ /* -+ * Minimum period between request arrivals after which -+ * weight-raising may be reactivated for an already busy async -+ * queue (in jiffies). -+ */ -+ unsigned long bfq_wr_min_inter_arr_async; -+ -+ /* Max service-rate for a soft real-time queue, in sectors/sec */ -+ unsigned int bfq_wr_max_softrt_rate; -+ /* -+ * Cached value of the product ref_rate*ref_wr_duration, used -+ * for computing the maximum duration of weight raising -+ * automatically. -+ */ -+ u64 rate_dur_prod; -+ -+ /* fallback dummy bfqq for extreme OOM conditions */ -+ struct bfq_queue oom_bfqq; -+ -+ spinlock_t lock; -+ -+ /* -+ * bic associated with the task issuing current bio for -+ * merging. This and the next field are used as a support to -+ * be able to perform the bic lookup, needed by bio-merge -+ * functions, before the scheduler lock is taken, and thus -+ * avoid taking the request-queue lock while the scheduler -+ * lock is being held. -+ */ -+ struct bfq_io_cq *bio_bic; -+ /* bfqq associated with the task issuing current bio for merging */ -+ struct bfq_queue *bio_bfqq; -+ /* Extra flag used only for TESTING */ -+ bool bio_bfqq_set; -+ -+ /* -+ * Depth limits used in bfq_limit_depth (see comments on the -+ * function) -+ */ -+ unsigned int word_depths[2][2]; -+}; -+ -+enum bfqq_state_flags { -+ BFQ_BFQQ_FLAG_just_created = 0, /* queue just allocated */ -+ BFQ_BFQQ_FLAG_busy, /* has requests or is in service */ -+ BFQ_BFQQ_FLAG_wait_request, /* waiting for a request */ -+ BFQ_BFQQ_FLAG_non_blocking_wait_rq, /* -+ * waiting for a request -+ * without idling the device -+ */ -+ BFQ_BFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */ -+ BFQ_BFQQ_FLAG_has_short_ttime, /* queue has a short think time */ -+ BFQ_BFQQ_FLAG_sync, /* synchronous queue */ -+ BFQ_BFQQ_FLAG_IO_bound, /* -+ * bfqq has timed-out at least once -+ * having consumed at most 2/10 of -+ * its budget -+ */ -+ BFQ_BFQQ_FLAG_in_large_burst, /* -+ * bfqq activated in a large burst, -+ * see comments to bfq_handle_burst. -+ */ -+ BFQ_BFQQ_FLAG_softrt_update, /* -+ * may need softrt-next-start -+ * update -+ */ -+ BFQ_BFQQ_FLAG_coop, /* bfqq is shared */ -+ BFQ_BFQQ_FLAG_split_coop /* shared bfqq will be split */ -+}; -+ -+#define BFQ_BFQQ_FNS(name) \ -+static void bfq_mark_bfqq_##name(struct bfq_queue *bfqq) \ -+{ \ -+ (bfqq)->flags |= (1 << BFQ_BFQQ_FLAG_##name); \ -+} \ -+static void bfq_clear_bfqq_##name(struct bfq_queue *bfqq) \ -+{ \ -+ (bfqq)->flags &= ~(1 << BFQ_BFQQ_FLAG_##name); \ -+} \ -+static int bfq_bfqq_##name(const struct bfq_queue *bfqq) \ -+{ \ -+ return ((bfqq)->flags & (1 << BFQ_BFQQ_FLAG_##name)) != 0; \ -+} -+ -+BFQ_BFQQ_FNS(just_created); -+BFQ_BFQQ_FNS(busy); -+BFQ_BFQQ_FNS(wait_request); -+BFQ_BFQQ_FNS(non_blocking_wait_rq); -+BFQ_BFQQ_FNS(fifo_expire); -+BFQ_BFQQ_FNS(has_short_ttime); -+BFQ_BFQQ_FNS(sync); -+BFQ_BFQQ_FNS(IO_bound); -+BFQ_BFQQ_FNS(in_large_burst); -+BFQ_BFQQ_FNS(coop); -+BFQ_BFQQ_FNS(split_coop); -+BFQ_BFQQ_FNS(softrt_update); -+#undef BFQ_BFQQ_FNS -+ -+/* Logging facilities. */ -+#ifdef CONFIG_BFQ_REDIRECT_TO_CONSOLE -+ -+static const char *checked_dev_name(const struct device *dev) -+{ -+ static const char nodev[] = "nodev"; -+ -+ if (dev) -+ return dev_name(dev); -+ -+ return nodev; -+} -+ -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+static struct bfq_group *bfqq_group(struct bfq_queue *bfqq); -+static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg); -+ -+#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \ -+ pr_crit("%s bfq%d%c %s [%s] " fmt "\n", \ -+ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \ -+ (bfqq)->pid, \ -+ bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \ -+ bfqq_group(bfqq)->blkg_path, __func__, ##args); \ -+} while (0) -+ -+#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \ -+ pr_crit("%s %s [%s] " fmt "\n", \ -+ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \ -+ bfqg->blkg_path, __func__, ##args); \ -+} while (0) -+ -+#else /* BFQ_GROUP_IOSCHED_ENABLED */ -+ -+#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \ -+ pr_crit("%s bfq%d%c [%s] " fmt "\n", \ -+ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \ -+ (bfqq)->pid, bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \ -+ __func__, ##args) -+#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do {} while (0) -+ -+#endif /* BFQ_GROUP_IOSCHED_ENABLED */ -+ -+#define bfq_log(bfqd, fmt, args...) \ -+ pr_crit("%s bfq [%s] " fmt "\n", \ -+ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \ -+ __func__, ##args) -+ -+#else /* CONFIG_BFQ_REDIRECT_TO_CONSOLE */ -+ -+#if !defined(CONFIG_BLK_DEV_IO_TRACE) -+ -+/* Avoid possible "unused-variable" warning. See commit message. */ -+ -+#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) ((void) (bfqq)) -+ -+#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) ((void) (bfqg)) -+ -+#define bfq_log(bfqd, fmt, args...) do {} while (0) -+ -+#else /* CONFIG_BLK_DEV_IO_TRACE */ -+ -+#include -+ -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+static struct bfq_group *bfqq_group(struct bfq_queue *bfqq); -+static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg); -+ -+#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \ -+ blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s [%s] " fmt, \ -+ (bfqq)->pid, \ -+ bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \ -+ bfqq_group(bfqq)->blkg_path, __func__, ##args); \ -+} while (0) -+ -+#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \ -+ blk_add_trace_msg((bfqd)->queue, "%s [%s] " fmt, bfqg->blkg_path, \ -+ __func__, ##args);\ -+} while (0) -+ -+#else /* BFQ_GROUP_IOSCHED_ENABLED */ -+ -+#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \ -+ blk_add_trace_msg((bfqd)->queue, "bfq%d%c [%s] " fmt, (bfqq)->pid, \ -+ bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \ -+ __func__, ##args) -+#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do {} while (0) -+ -+#endif /* BFQ_GROUP_IOSCHED_ENABLED */ -+ -+#define bfq_log(bfqd, fmt, args...) \ -+ blk_add_trace_msg((bfqd)->queue, "bfq [%s] " fmt, __func__, ##args) -+ -+#endif /* CONFIG_BLK_DEV_IO_TRACE */ -+#endif /* CONFIG_BFQ_REDIRECT_TO_CONSOLE */ -+ -+/* Expiration reasons. */ -+enum bfqq_expiration { -+ BFQ_BFQQ_TOO_IDLE = 0, /* -+ * queue has been idling for -+ * too long -+ */ -+ BFQ_BFQQ_BUDGET_TIMEOUT, /* budget took too long to be used */ -+ BFQ_BFQQ_BUDGET_EXHAUSTED, /* budget consumed */ -+ BFQ_BFQQ_NO_MORE_REQUESTS, /* the queue has no more requests */ -+ BFQ_BFQQ_PREEMPTED /* preemption in progress */ -+}; -+ -+ -+struct bfqg_stats { -+#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP) -+ /* number of ios merged */ -+ struct blkg_rwstat merged; -+ /* total time spent on device in ns, may not be accurate w/ queueing */ -+ struct blkg_rwstat service_time; -+ /* total time spent waiting in scheduler queue in ns */ -+ struct blkg_rwstat wait_time; -+ /* number of IOs queued up */ -+ struct blkg_rwstat queued; -+ /* total disk time and nr sectors dispatched by this group */ -+ struct blkg_stat time; -+ /* sum of number of ios queued across all samples */ -+ struct blkg_stat avg_queue_size_sum; -+ /* count of samples taken for average */ -+ struct blkg_stat avg_queue_size_samples; -+ /* how many times this group has been removed from service tree */ -+ struct blkg_stat dequeue; -+ /* total time spent waiting for it to be assigned a timeslice. */ -+ struct blkg_stat group_wait_time; -+ /* time spent idling for this blkcg_gq */ -+ struct blkg_stat idle_time; -+ /* total time with empty current active q with other requests queued */ -+ struct blkg_stat empty_time; -+ /* fields after this shouldn't be cleared on stat reset */ -+ u64 start_group_wait_time; -+ u64 start_idle_time; -+ u64 start_empty_time; -+ uint16_t flags; -+#endif /* BFQ_GROUP_IOSCHED_ENABLED && CONFIG_DEBUG_BLK_CGROUP */ -+}; -+ -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+/* -+ * struct bfq_group_data - per-blkcg storage for the blkio subsystem. -+ * -+ * @ps: @blkcg_policy_storage that this structure inherits -+ * @weight: weight of the bfq_group -+ */ -+struct bfq_group_data { -+ /* must be the first member */ -+ struct blkcg_policy_data pd; -+ -+ unsigned int weight; -+}; -+ -+/** -+ * struct bfq_group - per (device, cgroup) data structure. -+ * @entity: schedulable entity to insert into the parent group sched_data. -+ * @sched_data: own sched_data, to contain child entities (they may be -+ * both bfq_queues and bfq_groups). -+ * @bfqd: the bfq_data for the device this group acts upon. -+ * @async_bfqq: array of async queues for all the tasks belonging to -+ * the group, one queue per ioprio value per ioprio_class, -+ * except for the idle class that has only one queue. -+ * @async_idle_bfqq: async queue for the idle class (ioprio is ignored). -+ * @my_entity: pointer to @entity, %NULL for the toplevel group; used -+ * to avoid too many special cases during group creation/ -+ * migration. -+ * @active_entities: number of active entities belonging to the group; -+ * unused for the root group. Used to know whether there -+ * are groups with more than one active @bfq_entity -+ * (see the comments to the function -+ * bfq_bfqq_may_idle()). -+ * @rq_pos_tree: rbtree sorted by next_request position, used when -+ * determining if two or more queues have interleaving -+ * requests (see bfq_find_close_cooperator()). -+ * -+ * Each (device, cgroup) pair has its own bfq_group, i.e., for each cgroup -+ * there is a set of bfq_groups, each one collecting the lower-level -+ * entities belonging to the group that are acting on the same device. -+ * -+ * Locking works as follows: -+ * o @bfqd is protected by the queue lock, RCU is used to access it -+ * from the readers. -+ * o All the other fields are protected by the @bfqd queue lock. -+ */ -+struct bfq_group { -+ /* must be the first member */ -+ struct blkg_policy_data pd; -+ -+ /* cached path for this blkg (see comments in bfq_bic_update_cgroup) */ -+ char blkg_path[128]; -+ -+ /* reference counter (see comments in bfq_bic_update_cgroup) */ -+ int ref; -+ -+ struct bfq_entity entity; -+ struct bfq_sched_data sched_data; -+ -+ void *bfqd; -+ -+ struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR]; -+ struct bfq_queue *async_idle_bfqq; -+ -+ struct bfq_entity *my_entity; -+ -+ int active_entities; -+ -+ struct rb_root rq_pos_tree; -+ -+ struct bfqg_stats stats; -+}; -+ -+#else -+struct bfq_group { -+ struct bfq_sched_data sched_data; -+ -+ struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR]; -+ struct bfq_queue *async_idle_bfqq; -+ -+ struct rb_root rq_pos_tree; -+}; -+#endif -+ -+static struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity); -+ -+static unsigned int bfq_class_idx(struct bfq_entity *entity) -+{ -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); -+ -+ return bfqq ? bfqq->ioprio_class - 1 : -+ BFQ_DEFAULT_GRP_CLASS - 1; -+} -+ -+static unsigned int bfq_tot_busy_queues(struct bfq_data *bfqd) -+{ -+ return bfqd->busy_queues[0] + bfqd->busy_queues[1] + -+ bfqd->busy_queues[2]; -+} -+ -+static struct bfq_service_tree * -+bfq_entity_service_tree(struct bfq_entity *entity) -+{ -+ struct bfq_sched_data *sched_data = entity->sched_data; -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); -+ unsigned int idx = bfq_class_idx(entity); -+ -+ BUG_ON(idx >= BFQ_IOPRIO_CLASSES); -+ BUG_ON(sched_data == NULL); -+ -+ if (bfqq) -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "%p %d", -+ sched_data->service_tree + idx, idx); -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ else { -+ struct bfq_group *bfqg = -+ container_of(entity, struct bfq_group, entity); -+ -+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg, -+ "%p %d", -+ sched_data->service_tree + idx, idx); -+ } -+#endif -+ return sched_data->service_tree + idx; -+} -+ -+static struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, bool is_sync) -+{ -+ return bic->bfqq[is_sync]; -+} -+ -+static void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, -+ bool is_sync) -+{ -+ bic->bfqq[is_sync] = bfqq; -+} -+ -+static struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic) -+{ -+ return bic->icq.q->elevator->elevator_data; -+} -+ -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ -+static struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq) -+{ -+ struct bfq_entity *group_entity = bfqq->entity.parent; -+ -+ if (!group_entity) -+ group_entity = &bfqq->bfqd->root_group->entity; -+ -+ return container_of(group_entity, struct bfq_group, entity); -+} -+ -+#else -+ -+static struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq) -+{ -+ return bfqq->bfqd->root_group; -+} -+ -+#endif -+ -+static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio); -+static void bfq_put_queue(struct bfq_queue *bfqq); -+static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd, -+ struct bio *bio, bool is_sync, -+ struct bfq_io_cq *bic); -+static void bfq_end_wr_async_queues(struct bfq_data *bfqd, -+ struct bfq_group *bfqg); -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg); -+#endif -+static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq); -+ -+#endif /* _BFQ_H */ -diff --git a/block/bfq-sched.c b/block/bfq-sched.c -new file mode 100644 -index 000000000000..7a4923231106 ---- /dev/null -+++ b/block/bfq-sched.c -@@ -0,0 +1,2077 @@ -+/* -+ * BFQ: Hierarchical B-WF2Q+ scheduler. -+ * -+ * Based on ideas and code from CFQ: -+ * Copyright (C) 2003 Jens Axboe -+ * -+ * Copyright (C) 2008 Fabio Checconi -+ * Paolo Valente -+ * -+ * Copyright (C) 2015 Paolo Valente -+ * -+ * Copyright (C) 2016 Paolo Valente -+ */ -+ -+static struct bfq_group *bfqq_group(struct bfq_queue *bfqq); -+ -+/** -+ * bfq_gt - compare two timestamps. -+ * @a: first ts. -+ * @b: second ts. -+ * -+ * Return @a > @b, dealing with wrapping correctly. -+ */ -+static int bfq_gt(u64 a, u64 b) -+{ -+ return (s64)(a - b) > 0; -+} -+ -+static struct bfq_entity *bfq_root_active_entity(struct rb_root *tree) -+{ -+ struct rb_node *node = tree->rb_node; -+ -+ return rb_entry(node, struct bfq_entity, rb_node); -+} -+ -+static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd, -+ bool expiration); -+ -+static bool bfq_update_parent_budget(struct bfq_entity *next_in_service); -+ -+/** -+ * bfq_update_next_in_service - update sd->next_in_service -+ * @sd: sched_data for which to perform the update. -+ * @new_entity: if not NULL, pointer to the entity whose activation, -+ * requeueing or repositionig triggered the invocation of -+ * this function. -+ * @expiration: id true, this function is being invoked after the -+ * expiration of the in-service entity -+ * -+ * This function is called to update sd->next_in_service, which, in -+ * its turn, may change as a consequence of the insertion or -+ * extraction of an entity into/from one of the active trees of -+ * sd. These insertions/extractions occur as a consequence of -+ * activations/deactivations of entities, with some activations being -+ * 'true' activations, and other activations being requeueings (i.e., -+ * implementing the second, requeueing phase of the mechanism used to -+ * reposition an entity in its active tree; see comments on -+ * __bfq_activate_entity and __bfq_requeue_entity for details). In -+ * both the last two activation sub-cases, new_entity points to the -+ * just activated or requeued entity. -+ * -+ * Returns true if sd->next_in_service changes in such a way that -+ * entity->parent may become the next_in_service for its parent -+ * entity. -+ */ -+static bool bfq_update_next_in_service(struct bfq_sched_data *sd, -+ struct bfq_entity *new_entity, -+ bool expiration) -+{ -+ struct bfq_entity *next_in_service = sd->next_in_service; -+ struct bfq_queue *bfqq; -+ bool parent_sched_may_change = false; -+ bool change_without_lookup = false; -+ -+ /* -+ * If this update is triggered by the activation, requeueing -+ * or repositiong of an entity that does not coincide with -+ * sd->next_in_service, then a full lookup in the active tree -+ * can be avoided. In fact, it is enough to check whether the -+ * just-modified entity has the same priority as -+ * sd->next_in_service, is eligible and has a lower virtual -+ * finish time than sd->next_in_service. If this compound -+ * condition holds, then the new entity becomes the new -+ * next_in_service. Otherwise no change is needed. -+ */ -+ if (new_entity && new_entity != sd->next_in_service) { -+ /* -+ * Flag used to decide whether to replace -+ * sd->next_in_service with new_entity. Tentatively -+ * set to true, and left as true if -+ * sd->next_in_service is NULL. -+ */ -+ change_without_lookup = true; -+ -+ /* -+ * If there is already a next_in_service candidate -+ * entity, then compare timestamps to decide whether -+ * to replace sd->service_tree with new_entity. -+ */ -+ if (next_in_service) { -+ unsigned int new_entity_class_idx = -+ bfq_class_idx(new_entity); -+ struct bfq_service_tree *st = -+ sd->service_tree + new_entity_class_idx; -+ -+ change_without_lookup = -+ (new_entity_class_idx == -+ bfq_class_idx(next_in_service) -+ && -+ !bfq_gt(new_entity->start, st->vtime) -+ && -+ bfq_gt(next_in_service->finish, -+ new_entity->finish)); -+ } -+ -+ if (change_without_lookup) { -+ next_in_service = new_entity; -+ bfqq = bfq_entity_to_bfqq(next_in_service); -+ -+ if (bfqq) -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "chose without lookup"); -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ else { -+ struct bfq_group *bfqg = -+ container_of(next_in_service, -+ struct bfq_group, entity); -+ -+ bfq_log_bfqg((struct bfq_data*)bfqg->bfqd, bfqg, -+ "chose without lookup"); -+ } -+#endif -+ } -+ } -+ -+ if (!change_without_lookup) /* lookup needed */ -+ next_in_service = bfq_lookup_next_entity(sd, expiration); -+ -+ if (next_in_service) { -+ bool new_budget_triggers_change = -+ bfq_update_parent_budget(next_in_service); -+ -+ parent_sched_may_change = !sd->next_in_service || -+ new_budget_triggers_change; -+ } -+ -+ sd->next_in_service = next_in_service; -+ -+ if (!next_in_service) -+ return parent_sched_may_change; -+ -+ bfqq = bfq_entity_to_bfqq(next_in_service); -+ if (bfqq) -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "chosen this queue"); -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ else { -+ struct bfq_group *bfqg = -+ container_of(next_in_service, -+ struct bfq_group, entity); -+ -+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg, -+ "chosen this entity"); -+ } -+#endif -+ return parent_sched_may_change; -+} -+ -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+/* both next loops stop at one of the child entities of the root group */ -+#define for_each_entity(entity) \ -+ for (; entity ; entity = entity->parent) -+ -+/* -+ * For each iteration, compute parent in advance, so as to be safe if -+ * entity is deallocated during the iteration. Such a deallocation may -+ * happen as a consequence of a bfq_put_queue that frees the bfq_queue -+ * containing entity. -+ */ -+#define for_each_entity_safe(entity, parent) \ -+ for (; entity && ({ parent = entity->parent; 1; }); entity = parent) -+ -+/* -+ * Returns true if this budget changes may let next_in_service->parent -+ * become the next_in_service entity for its parent entity. -+ */ -+static bool bfq_update_parent_budget(struct bfq_entity *next_in_service) -+{ -+ struct bfq_entity *bfqg_entity; -+ struct bfq_group *bfqg; -+ struct bfq_sched_data *group_sd; -+ bool ret = false; -+ -+ BUG_ON(!next_in_service); -+ -+ group_sd = next_in_service->sched_data; -+ -+ bfqg = container_of(group_sd, struct bfq_group, sched_data); -+ /* -+ * bfq_group's my_entity field is not NULL only if the group -+ * is not the root group. We must not touch the root entity -+ * as it must never become an in-service entity. -+ */ -+ bfqg_entity = bfqg->my_entity; -+ if (bfqg_entity) { -+ if (bfqg_entity->budget > next_in_service->budget) -+ ret = true; -+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg, -+ "old budg: %d, new budg: %d", -+ bfqg_entity->budget, next_in_service->budget); -+ bfqg_entity->budget = next_in_service->budget; -+ } -+ -+ return ret; -+} -+ -+/* -+ * This function tells whether entity stops being a candidate for next -+ * service, according to the restrictive definition of the field -+ * next_in_service. In particular, this function is invoked for an -+ * entity that is about to be set in service. -+ * -+ * If entity is a queue, then the entity is no longer a candidate for -+ * next service according to the that definition, because entity is -+ * about to become the in-service queue. This function then returns -+ * true if entity is a queue. -+ * -+ * In contrast, entity could still be a candidate for next service if -+ * it is not a queue, and has more than one active child. In fact, -+ * even if one of its children is about to be set in service, other -+ * active children may still be the next to serve, for the parent -+ * entity, even according to the above definition. As a consequence, a -+ * non-queue entity is not a candidate for next-service only if it has -+ * only one active child. And only if this condition holds, then this -+ * function returns true for a non-queue entity. -+ */ -+static bool bfq_no_longer_next_in_service(struct bfq_entity *entity) -+{ -+ struct bfq_group *bfqg; -+ -+ if (bfq_entity_to_bfqq(entity)) -+ return true; -+ -+ bfqg = container_of(entity, struct bfq_group, entity); -+ -+ BUG_ON(bfqg == ((struct bfq_data *)(bfqg->bfqd))->root_group); -+ BUG_ON(bfqg->active_entities == 0); -+ /* -+ * The field active_entities does not always contain the -+ * actual number of active children entities: it happens to -+ * not account for the in-service entity in case the latter is -+ * removed from its active tree (which may get done after -+ * invoking the function bfq_no_longer_next_in_service in -+ * bfq_get_next_queue). Fortunately, here, i.e., while -+ * bfq_no_longer_next_in_service is not yet completed in -+ * bfq_get_next_queue, bfq_active_extract has not yet been -+ * invoked, and thus active_entities still coincides with the -+ * actual number of active entities. -+ */ -+ if (bfqg->active_entities == 1) -+ return true; -+ -+ return false; -+} -+ -+#else /* BFQ_GROUP_IOSCHED_ENABLED */ -+#define for_each_entity(entity) \ -+ for (; entity ; entity = NULL) -+ -+#define for_each_entity_safe(entity, parent) \ -+ for (parent = NULL; entity ; entity = parent) -+ -+static bool bfq_update_parent_budget(struct bfq_entity *next_in_service) -+{ -+ return false; -+} -+ -+static bool bfq_no_longer_next_in_service(struct bfq_entity *entity) -+{ -+ return true; -+} -+ -+#endif /* BFQ_GROUP_IOSCHED_ENABLED */ -+ -+/* -+ * Shift for timestamp calculations. This actually limits the maximum -+ * service allowed in one timestamp delta (small shift values increase it), -+ * the maximum total weight that can be used for the queues in the system -+ * (big shift values increase it), and the period of virtual time -+ * wraparounds. -+ */ -+#define WFQ_SERVICE_SHIFT 22 -+ -+static struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity) -+{ -+ struct bfq_queue *bfqq = NULL; -+ -+ BUG_ON(!entity); -+ -+ if (!entity->my_sched_data) -+ bfqq = container_of(entity, struct bfq_queue, entity); -+ -+ return bfqq; -+} -+ -+ -+/** -+ * bfq_delta - map service into the virtual time domain. -+ * @service: amount of service. -+ * @weight: scale factor (weight of an entity or weight sum). -+ */ -+static u64 bfq_delta(unsigned long service, unsigned long weight) -+{ -+ u64 d = (u64)service << WFQ_SERVICE_SHIFT; -+ -+ do_div(d, weight); -+ return d; -+} -+ -+/** -+ * bfq_calc_finish - assign the finish time to an entity. -+ * @entity: the entity to act upon. -+ * @service: the service to be charged to the entity. -+ */ -+static void bfq_calc_finish(struct bfq_entity *entity, unsigned long service) -+{ -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); -+ unsigned long long start, finish, delta; -+ -+ BUG_ON(entity->weight == 0); -+ -+ entity->finish = entity->start + -+ bfq_delta(service, entity->weight); -+ -+ start = ((entity->start>>10)*1000)>>12; -+ finish = ((entity->finish>>10)*1000)>>12; -+ delta = ((bfq_delta(service, entity->weight)>>10)*1000)>>12; -+ -+ if (bfqq) { -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "serv %lu, w %d", -+ service, entity->weight); -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "start %llu, finish %llu, delta %llu", -+ start, finish, delta); -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ } else { -+ struct bfq_group *bfqg = -+ container_of(entity, struct bfq_group, entity); -+ -+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg, -+ "group: serv %lu, w %d", -+ service, entity->weight); -+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg, -+ "group: start %llu, finish %llu, delta %llu", -+ start, finish, delta); -+#endif -+ } -+} -+ -+/** -+ * bfq_entity_of - get an entity from a node. -+ * @node: the node field of the entity. -+ * -+ * Convert a node pointer to the relative entity. This is used only -+ * to simplify the logic of some functions and not as the generic -+ * conversion mechanism because, e.g., in the tree walking functions, -+ * the check for a %NULL value would be redundant. -+ */ -+static struct bfq_entity *bfq_entity_of(struct rb_node *node) -+{ -+ struct bfq_entity *entity = NULL; -+ -+ if (node) -+ entity = rb_entry(node, struct bfq_entity, rb_node); -+ -+ return entity; -+} -+ -+/** -+ * bfq_extract - remove an entity from a tree. -+ * @root: the tree root. -+ * @entity: the entity to remove. -+ */ -+static void bfq_extract(struct rb_root *root, struct bfq_entity *entity) -+{ -+ BUG_ON(entity->tree != root); -+ -+ entity->tree = NULL; -+ rb_erase(&entity->rb_node, root); -+} -+ -+/** -+ * bfq_idle_extract - extract an entity from the idle tree. -+ * @st: the service tree of the owning @entity. -+ * @entity: the entity being removed. -+ */ -+static void bfq_idle_extract(struct bfq_service_tree *st, -+ struct bfq_entity *entity) -+{ -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); -+ struct rb_node *next; -+ -+ BUG_ON(entity->tree != &st->idle); -+ -+ if (entity == st->first_idle) { -+ next = rb_next(&entity->rb_node); -+ st->first_idle = bfq_entity_of(next); -+ } -+ -+ if (entity == st->last_idle) { -+ next = rb_prev(&entity->rb_node); -+ st->last_idle = bfq_entity_of(next); -+ } -+ -+ bfq_extract(&st->idle, entity); -+ -+ if (bfqq) -+ list_del(&bfqq->bfqq_list); -+} -+ -+/** -+ * bfq_insert - generic tree insertion. -+ * @root: tree root. -+ * @entity: entity to insert. -+ * -+ * This is used for the idle and the active tree, since they are both -+ * ordered by finish time. -+ */ -+static void bfq_insert(struct rb_root *root, struct bfq_entity *entity) -+{ -+ struct bfq_entity *entry; -+ struct rb_node **node = &root->rb_node; -+ struct rb_node *parent = NULL; -+ -+ BUG_ON(entity->tree); -+ -+ while (*node) { -+ parent = *node; -+ entry = rb_entry(parent, struct bfq_entity, rb_node); -+ -+ if (bfq_gt(entry->finish, entity->finish)) -+ node = &parent->rb_left; -+ else -+ node = &parent->rb_right; -+ } -+ -+ rb_link_node(&entity->rb_node, parent, node); -+ rb_insert_color(&entity->rb_node, root); -+ -+ entity->tree = root; -+} -+ -+/** -+ * bfq_update_min - update the min_start field of a entity. -+ * @entity: the entity to update. -+ * @node: one of its children. -+ * -+ * This function is called when @entity may store an invalid value for -+ * min_start due to updates to the active tree. The function assumes -+ * that the subtree rooted at @node (which may be its left or its right -+ * child) has a valid min_start value. -+ */ -+static void bfq_update_min(struct bfq_entity *entity, struct rb_node *node) -+{ -+ struct bfq_entity *child; -+ -+ if (node) { -+ child = rb_entry(node, struct bfq_entity, rb_node); -+ if (bfq_gt(entity->min_start, child->min_start)) -+ entity->min_start = child->min_start; -+ } -+} -+ -+/** -+ * bfq_update_active_node - recalculate min_start. -+ * @node: the node to update. -+ * -+ * @node may have changed position or one of its children may have moved, -+ * this function updates its min_start value. The left and right subtrees -+ * are assumed to hold a correct min_start value. -+ */ -+static void bfq_update_active_node(struct rb_node *node) -+{ -+ struct bfq_entity *entity = rb_entry(node, struct bfq_entity, rb_node); -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); -+ -+ entity->min_start = entity->start; -+ bfq_update_min(entity, node->rb_right); -+ bfq_update_min(entity, node->rb_left); -+ -+ if (bfqq) { -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "new min_start %llu", -+ ((entity->min_start>>10)*1000)>>12); -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ } else { -+ struct bfq_group *bfqg = -+ container_of(entity, struct bfq_group, entity); -+ -+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg, -+ "new min_start %llu", -+ ((entity->min_start>>10)*1000)>>12); -+#endif -+ } -+} -+ -+/** -+ * bfq_update_active_tree - update min_start for the whole active tree. -+ * @node: the starting node. -+ * -+ * @node must be the deepest modified node after an update. This function -+ * updates its min_start using the values held by its children, assuming -+ * that they did not change, and then updates all the nodes that may have -+ * changed in the path to the root. The only nodes that may have changed -+ * are the ones in the path or their siblings. -+ */ -+static void bfq_update_active_tree(struct rb_node *node) -+{ -+ struct rb_node *parent; -+ -+up: -+ bfq_update_active_node(node); -+ -+ parent = rb_parent(node); -+ if (!parent) -+ return; -+ -+ if (node == parent->rb_left && parent->rb_right) -+ bfq_update_active_node(parent->rb_right); -+ else if (parent->rb_left) -+ bfq_update_active_node(parent->rb_left); -+ -+ node = parent; -+ goto up; -+} -+ -+static void bfq_weights_tree_add(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq, -+ struct rb_root *root); -+ -+static void __bfq_weights_tree_remove(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq, -+ struct rb_root *root); -+ -+static void bfq_weights_tree_remove(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq); -+ -+ -+/** -+ * bfq_active_insert - insert an entity in the active tree of its -+ * group/device. -+ * @st: the service tree of the entity. -+ * @entity: the entity being inserted. -+ * -+ * The active tree is ordered by finish time, but an extra key is kept -+ * per each node, containing the minimum value for the start times of -+ * its children (and the node itself), so it's possible to search for -+ * the eligible node with the lowest finish time in logarithmic time. -+ */ -+static void bfq_active_insert(struct bfq_service_tree *st, -+ struct bfq_entity *entity) -+{ -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); -+ struct rb_node *node = &entity->rb_node; -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ struct bfq_sched_data *sd = NULL; -+ struct bfq_group *bfqg = NULL; -+ struct bfq_data *bfqd = NULL; -+#endif -+ -+ bfq_insert(&st->active, entity); -+ -+ if (node->rb_left) -+ node = node->rb_left; -+ else if (node->rb_right) -+ node = node->rb_right; -+ -+ bfq_update_active_tree(node); -+ -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ sd = entity->sched_data; -+ bfqg = container_of(sd, struct bfq_group, sched_data); -+ BUG_ON(!bfqg); -+ bfqd = (struct bfq_data *)bfqg->bfqd; -+#endif -+ if (bfqq) -+ list_add(&bfqq->bfqq_list, &bfqq->bfqd->active_list); -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ if (bfqg != bfqd->root_group) { -+ BUG_ON(!bfqg); -+ BUG_ON(!bfqd); -+ bfqg->active_entities++; -+ } -+#endif -+} -+ -+/** -+ * bfq_ioprio_to_weight - calc a weight from an ioprio. -+ * @ioprio: the ioprio value to convert. -+ */ -+static unsigned short bfq_ioprio_to_weight(int ioprio) -+{ -+ BUG_ON(ioprio < 0 || ioprio >= IOPRIO_BE_NR); -+ return (IOPRIO_BE_NR - ioprio) * BFQ_WEIGHT_CONVERSION_COEFF; -+} -+ -+/** -+ * bfq_weight_to_ioprio - calc an ioprio from a weight. -+ * @weight: the weight value to convert. -+ * -+ * To preserve as much as possible the old only-ioprio user interface, -+ * 0 is used as an escape ioprio value for weights (numerically) equal or -+ * larger than IOPRIO_BE_NR * BFQ_WEIGHT_CONVERSION_COEFF. -+ */ -+static unsigned short bfq_weight_to_ioprio(int weight) -+{ -+ BUG_ON(weight < BFQ_MIN_WEIGHT || weight > BFQ_MAX_WEIGHT); -+ return IOPRIO_BE_NR * BFQ_WEIGHT_CONVERSION_COEFF - weight < 0 ? -+ 0 : IOPRIO_BE_NR * BFQ_WEIGHT_CONVERSION_COEFF - weight; -+} -+ -+static void bfq_get_entity(struct bfq_entity *entity) -+{ -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); -+ -+ if (bfqq) { -+ bfqq->ref++; -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "%p %d", -+ bfqq, bfqq->ref); -+ } -+} -+ -+/** -+ * bfq_find_deepest - find the deepest node that an extraction can modify. -+ * @node: the node being removed. -+ * -+ * Do the first step of an extraction in an rb tree, looking for the -+ * node that will replace @node, and returning the deepest node that -+ * the following modifications to the tree can touch. If @node is the -+ * last node in the tree return %NULL. -+ */ -+static struct rb_node *bfq_find_deepest(struct rb_node *node) -+{ -+ struct rb_node *deepest; -+ -+ if (!node->rb_right && !node->rb_left) -+ deepest = rb_parent(node); -+ else if (!node->rb_right) -+ deepest = node->rb_left; -+ else if (!node->rb_left) -+ deepest = node->rb_right; -+ else { -+ deepest = rb_next(node); -+ if (deepest->rb_right) -+ deepest = deepest->rb_right; -+ else if (rb_parent(deepest) != node) -+ deepest = rb_parent(deepest); -+ } -+ -+ return deepest; -+} -+ -+/** -+ * bfq_active_extract - remove an entity from the active tree. -+ * @st: the service_tree containing the tree. -+ * @entity: the entity being removed. -+ */ -+static void bfq_active_extract(struct bfq_service_tree *st, -+ struct bfq_entity *entity) -+{ -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); -+ struct rb_node *node; -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ struct bfq_sched_data *sd = NULL; -+ struct bfq_group *bfqg = NULL; -+ struct bfq_data *bfqd = NULL; -+#endif -+ -+ node = bfq_find_deepest(&entity->rb_node); -+ bfq_extract(&st->active, entity); -+ -+ if (node) -+ bfq_update_active_tree(node); -+ -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ sd = entity->sched_data; -+ bfqg = container_of(sd, struct bfq_group, sched_data); -+ BUG_ON(!bfqg); -+ bfqd = (struct bfq_data *)bfqg->bfqd; -+#endif -+ if (bfqq) -+ list_del(&bfqq->bfqq_list); -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ if (bfqg != bfqd->root_group) { -+ BUG_ON(!bfqg); -+ BUG_ON(!bfqd); -+ BUG_ON(!bfqg->active_entities); -+ bfqg->active_entities--; -+ } -+#endif -+} -+ -+/** -+ * bfq_idle_insert - insert an entity into the idle tree. -+ * @st: the service tree containing the tree. -+ * @entity: the entity to insert. -+ */ -+static void bfq_idle_insert(struct bfq_service_tree *st, -+ struct bfq_entity *entity) -+{ -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); -+ struct bfq_entity *first_idle = st->first_idle; -+ struct bfq_entity *last_idle = st->last_idle; -+ -+ if (!first_idle || bfq_gt(first_idle->finish, entity->finish)) -+ st->first_idle = entity; -+ if (!last_idle || bfq_gt(entity->finish, last_idle->finish)) -+ st->last_idle = entity; -+ -+ bfq_insert(&st->idle, entity); -+ -+ if (bfqq) -+ list_add(&bfqq->bfqq_list, &bfqq->bfqd->idle_list); -+} -+ -+/** -+ * bfq_forget_entity - do not consider entity any longer for scheduling -+ * @st: the service tree. -+ * @entity: the entity being removed. -+ * @is_in_service: true if entity is currently the in-service entity. -+ * -+ * Forget everything about @entity. In addition, if entity represents -+ * a queue, and the latter is not in service, then release the service -+ * reference to the queue (the one taken through bfq_get_entity). In -+ * fact, in this case, there is really no more service reference to -+ * the queue, as the latter is also outside any service tree. If, -+ * instead, the queue is in service, then __bfq_bfqd_reset_in_service -+ * will take care of putting the reference when the queue finally -+ * stops being served. -+ */ -+static void bfq_forget_entity(struct bfq_service_tree *st, -+ struct bfq_entity *entity, -+ bool is_in_service) -+{ -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); -+ BUG_ON(!entity->on_st); -+ -+ entity->on_st = false; -+ st->wsum -= entity->weight; -+ if (bfqq && !is_in_service) { -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "(before): %p %d", -+ bfqq, bfqq->ref); -+ bfq_put_queue(bfqq); -+ } -+} -+ -+/** -+ * bfq_put_idle_entity - release the idle tree ref of an entity. -+ * @st: service tree for the entity. -+ * @entity: the entity being released. -+ */ -+static void bfq_put_idle_entity(struct bfq_service_tree *st, -+ struct bfq_entity *entity) -+{ -+ bfq_idle_extract(st, entity); -+ bfq_forget_entity(st, entity, -+ entity == entity->sched_data->in_service_entity); -+} -+ -+/** -+ * bfq_forget_idle - update the idle tree if necessary. -+ * @st: the service tree to act upon. -+ * -+ * To preserve the global O(log N) complexity we only remove one entry here; -+ * as the idle tree will not grow indefinitely this can be done safely. -+ */ -+static void bfq_forget_idle(struct bfq_service_tree *st) -+{ -+ struct bfq_entity *first_idle = st->first_idle; -+ struct bfq_entity *last_idle = st->last_idle; -+ -+ if (RB_EMPTY_ROOT(&st->active) && last_idle && -+ !bfq_gt(last_idle->finish, st->vtime)) { -+ /* -+ * Forget the whole idle tree, increasing the vtime past -+ * the last finish time of idle entities. -+ */ -+ st->vtime = last_idle->finish; -+ } -+ -+ if (first_idle && !bfq_gt(first_idle->finish, st->vtime)) -+ bfq_put_idle_entity(st, first_idle); -+} -+ -+/* -+ * Update weight and priority of entity. If update_class_too is true, -+ * then update the ioprio_class of entity too. -+ * -+ * The reason why the update of ioprio_class is controlled through the -+ * last parameter is as follows. Changing the ioprio class of an -+ * entity implies changing the destination service trees for that -+ * entity. If such a change occurred when the entity is already on one -+ * of the service trees for its previous class, then the state of the -+ * entity would become more complex: none of the new possible service -+ * trees for the entity, according to bfq_entity_service_tree(), would -+ * match any of the possible service trees on which the entity -+ * is. Complex operations involving these trees, such as entity -+ * activations and deactivations, should take into account this -+ * additional complexity. To avoid this issue, this function is -+ * invoked with update_class_too unset in the points in the code where -+ * entity may happen to be on some tree. -+ */ -+static struct bfq_service_tree * -+__bfq_entity_update_weight_prio(struct bfq_service_tree *old_st, -+ struct bfq_entity *entity, -+ bool update_class_too) -+{ -+ struct bfq_service_tree *new_st = old_st; -+ -+ if (entity->prio_changed) { -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); -+ unsigned int prev_weight, new_weight; -+ struct bfq_data *bfqd = NULL; -+ struct rb_root *root; -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ struct bfq_sched_data *sd; -+ struct bfq_group *bfqg; -+#endif -+ -+ if (bfqq) -+ bfqd = bfqq->bfqd; -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ else { -+ sd = entity->my_sched_data; -+ bfqg = container_of(sd, struct bfq_group, sched_data); -+ BUG_ON(!bfqg); -+ bfqd = (struct bfq_data *)bfqg->bfqd; -+ BUG_ON(!bfqd); -+ } -+#endif -+ -+ BUG_ON(entity->tree && update_class_too); -+ BUG_ON(old_st->wsum < entity->weight); -+ old_st->wsum -= entity->weight; -+ -+ if (entity->new_weight != entity->orig_weight) { -+ if (entity->new_weight < BFQ_MIN_WEIGHT || -+ entity->new_weight > BFQ_MAX_WEIGHT) { -+ pr_crit("update_weight_prio: new_weight %d\n", -+ entity->new_weight); -+ if (entity->new_weight < BFQ_MIN_WEIGHT) -+ entity->new_weight = BFQ_MIN_WEIGHT; -+ else -+ entity->new_weight = BFQ_MAX_WEIGHT; -+ } -+ entity->orig_weight = entity->new_weight; -+ if (bfqq) -+ bfqq->ioprio = -+ bfq_weight_to_ioprio(entity->orig_weight); -+ } -+ -+ if (bfqq && update_class_too) -+ bfqq->ioprio_class = bfqq->new_ioprio_class; -+ -+ /* -+ * Reset prio_changed only if the ioprio_class change -+ * is not pending any longer. -+ */ -+ if (!bfqq || bfqq->ioprio_class == bfqq->new_ioprio_class) -+ entity->prio_changed = 0; -+ -+ /* -+ * NOTE: here we may be changing the weight too early, -+ * this will cause unfairness. The correct approach -+ * would have required additional complexity to defer -+ * weight changes to the proper time instants (i.e., -+ * when entity->finish <= old_st->vtime). -+ */ -+ new_st = bfq_entity_service_tree(entity); -+ -+ prev_weight = entity->weight; -+ new_weight = entity->orig_weight * -+ (bfqq ? bfqq->wr_coeff : 1); -+ /* -+ * If the weight of the entity changes and the entity is a -+ * queue, remove the entity from its old weight counter (if -+ * there is a counter associated with the entity). -+ */ -+ if (prev_weight != new_weight && bfqq) { -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "weight changed %d %d(%d %d)", -+ prev_weight, new_weight, -+ entity->orig_weight, -+ bfqq->wr_coeff); -+ -+ root = &bfqd->queue_weights_tree; -+ __bfq_weights_tree_remove(bfqd, bfqq, root); -+ } -+ entity->weight = new_weight; -+ /* -+ * Add the entity, if it is not a weight-raised queue, to the -+ * counter associated with its new weight. -+ */ -+ if (prev_weight != new_weight && bfqq && bfqq->wr_coeff == 1) { -+ /* If we get here, root has been initialized. */ -+ bfq_weights_tree_add(bfqd, bfqq, root); -+ } -+ -+ new_st->wsum += entity->weight; -+ -+ if (new_st != old_st) { -+ BUG_ON(!update_class_too); -+ entity->start = new_st->vtime; -+ } -+ } -+ -+ return new_st; -+} -+ -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+static void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg); -+#endif -+ -+/** -+ * bfq_bfqq_served - update the scheduler status after selection for -+ * service. -+ * @bfqq: the queue being served. -+ * @served: bytes to transfer. -+ * -+ * NOTE: this can be optimized, as the timestamps of upper level entities -+ * are synchronized every time a new bfqq is selected for service. By now, -+ * we keep it to better check consistency. -+ */ -+static void bfq_bfqq_served(struct bfq_queue *bfqq, int served) -+{ -+ struct bfq_entity *entity = &bfqq->entity; -+ struct bfq_service_tree *st; -+ -+ if (!bfqq->service_from_backlogged) -+ bfqq->first_IO_time = jiffies; -+ -+ if (bfqq->wr_coeff > 1) -+ bfqq->service_from_wr += served; -+ -+ bfqq->service_from_backlogged += served; -+ for_each_entity(entity) { -+ st = bfq_entity_service_tree(entity); -+ -+ entity->service += served; -+ -+ BUG_ON(st->wsum == 0); -+ -+ st->vtime += bfq_delta(served, st->wsum); -+ bfq_forget_idle(st); -+ } -+#ifndef BFQ_MQ -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ bfqg_stats_set_start_empty_time(bfqq_group(bfqq)); -+#endif -+#endif -+ st = bfq_entity_service_tree(&bfqq->entity); -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "bfqq_served %d secs, vtime %llu on %p", -+ served, ((st->vtime>>10)*1000)>>12, st); -+} -+ -+/** -+ * bfq_bfqq_charge_time - charge an amount of service equivalent to the length -+ * of the time interval during which bfqq has been in -+ * service. -+ * @bfqd: the device -+ * @bfqq: the queue that needs a service update. -+ * @time_ms: the amount of time during which the queue has received service -+ * -+ * If a queue does not consume its budget fast enough, then providing -+ * the queue with service fairness may impair throughput, more or less -+ * severely. For this reason, queues that consume their budget slowly -+ * are provided with time fairness instead of service fairness. This -+ * goal is achieved through the BFQ scheduling engine, even if such an -+ * engine works in the service, and not in the time domain. The trick -+ * is charging these queues with an inflated amount of service, equal -+ * to the amount of service that they would have received during their -+ * service slot if they had been fast, i.e., if their requests had -+ * been dispatched at a rate equal to the estimated peak rate. -+ * -+ * It is worth noting that time fairness can cause important -+ * distortions in terms of bandwidth distribution, on devices with -+ * internal queueing. The reason is that I/O requests dispatched -+ * during the service slot of a queue may be served after that service -+ * slot is finished, and may have a total processing time loosely -+ * correlated with the duration of the service slot. This is -+ * especially true for short service slots. -+ */ -+static void bfq_bfqq_charge_time(struct bfq_data *bfqd, struct bfq_queue *bfqq, -+ unsigned long time_ms) -+{ -+ struct bfq_entity *entity = &bfqq->entity; -+ unsigned long timeout_ms = jiffies_to_msecs(bfq_timeout); -+ unsigned long bounded_time_ms = min(time_ms, timeout_ms); -+ int serv_to_charge_for_time = -+ (bfqd->bfq_max_budget * bounded_time_ms) / timeout_ms; -+ int tot_serv_to_charge = max(serv_to_charge_for_time, entity->service); -+ -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "%lu/%lu ms, %d/%d/%d/%d sectors", -+ time_ms, timeout_ms, -+ entity->service, -+ tot_serv_to_charge, -+ bfqd->bfq_max_budget, -+ entity->budget); -+ -+ /* Increase budget to avoid inconsistencies */ -+ if (tot_serv_to_charge > entity->budget) -+ entity->budget = tot_serv_to_charge; -+ -+ bfq_bfqq_served(bfqq, -+ max_t(int, 0, tot_serv_to_charge - entity->service)); -+} -+ -+static void bfq_update_fin_time_enqueue(struct bfq_entity *entity, -+ struct bfq_service_tree *st, -+ bool backshifted) -+{ -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); -+ struct bfq_sched_data *sd = entity->sched_data; -+ -+ /* -+ * When this function is invoked, entity is not in any service -+ * tree, then it is safe to invoke next function with the last -+ * parameter set (see the comments on the function). -+ */ -+ BUG_ON(entity->tree); -+ st = __bfq_entity_update_weight_prio(st, entity, true); -+ bfq_calc_finish(entity, entity->budget); -+ -+ /* -+ * If some queues enjoy backshifting for a while, then their -+ * (virtual) finish timestamps may happen to become lower and -+ * lower than the system virtual time. In particular, if -+ * these queues often happen to be idle for short time -+ * periods, and during such time periods other queues with -+ * higher timestamps happen to be busy, then the backshifted -+ * timestamps of the former queues can become much lower than -+ * the system virtual time. In fact, to serve the queues with -+ * higher timestamps while the ones with lower timestamps are -+ * idle, the system virtual time may be pushed-up to much -+ * higher values than the finish timestamps of the idle -+ * queues. As a consequence, the finish timestamps of all new -+ * or newly activated queues may end up being much larger than -+ * those of lucky queues with backshifted timestamps. The -+ * latter queues may then monopolize the device for a lot of -+ * time. This would simply break service guarantees. -+ * -+ * To reduce this problem, push up a little bit the -+ * backshifted timestamps of the queue associated with this -+ * entity (only a queue can happen to have the backshifted -+ * flag set): just enough to let the finish timestamp of the -+ * queue be equal to the current value of the system virtual -+ * time. This may introduce a little unfairness among queues -+ * with backshifted timestamps, but it does not break -+ * worst-case fairness guarantees. -+ * -+ * As a special case, if bfqq is weight-raised, push up -+ * timestamps much less, to keep very low the probability that -+ * this push up causes the backshifted finish timestamps of -+ * weight-raised queues to become higher than the backshifted -+ * finish timestamps of non weight-raised queues. -+ */ -+ if (backshifted && bfq_gt(st->vtime, entity->finish)) { -+ unsigned long delta = st->vtime - entity->finish; -+ -+ if (bfqq) -+ delta /= bfqq->wr_coeff; -+ -+ entity->start += delta; -+ entity->finish += delta; -+ -+ if (bfqq) { -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "new queue finish %llu", -+ ((entity->finish>>10)*1000)>>12); -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ } else { -+ struct bfq_group *bfqg = -+ container_of(entity, struct bfq_group, entity); -+ -+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg, -+ "new group finish %llu", -+ ((entity->finish>>10)*1000)>>12); -+#endif -+ } -+ } -+ -+ bfq_active_insert(st, entity); -+ -+ if (bfqq) { -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "queue %seligible in st %p", -+ entity->start <= st->vtime ? "" : "non ", st); -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ } else { -+ struct bfq_group *bfqg = -+ container_of(entity, struct bfq_group, entity); -+ -+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg, -+ "group %seligible in st %p", -+ entity->start <= st->vtime ? "" : "non ", st); -+#endif -+ } -+ BUG_ON(RB_EMPTY_ROOT(&st->active)); -+ BUG_ON(&st->active != &sd->service_tree->active && -+ &st->active != &(sd->service_tree+1)->active && -+ &st->active != &(sd->service_tree+2)->active); -+} -+ -+/** -+ * __bfq_activate_entity - handle activation of entity. -+ * @entity: the entity being activated. -+ * @non_blocking_wait_rq: true if entity was waiting for a request -+ * -+ * Called for a 'true' activation, i.e., if entity is not active and -+ * one of its children receives a new request. -+ * -+ * Basically, this function updates the timestamps of entity and -+ * inserts entity into its active tree, after possibly extracting it -+ * from its idle tree. -+ */ -+static void __bfq_activate_entity(struct bfq_entity *entity, -+ bool non_blocking_wait_rq) -+{ -+ struct bfq_sched_data *sd = entity->sched_data; -+ struct bfq_service_tree *st = bfq_entity_service_tree(entity); -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); -+ bool backshifted = false; -+ unsigned long long min_vstart; -+ -+ BUG_ON(!sd); -+ BUG_ON(!st); -+ -+ /* See comments on bfq_fqq_update_budg_for_activation */ -+ if (non_blocking_wait_rq && bfq_gt(st->vtime, entity->finish)) { -+ backshifted = true; -+ min_vstart = entity->finish; -+ } else -+ min_vstart = st->vtime; -+ -+ if (entity->tree == &st->idle) { -+ /* -+ * Must be on the idle tree, bfq_idle_extract() will -+ * check for that. -+ */ -+ bfq_idle_extract(st, entity); -+ BUG_ON(entity->tree); -+ entity->start = bfq_gt(min_vstart, entity->finish) ? -+ min_vstart : entity->finish; -+ } else { -+ BUG_ON(entity->tree); -+ /* -+ * The finish time of the entity may be invalid, and -+ * it is in the past for sure, otherwise the queue -+ * would have been on the idle tree. -+ */ -+ entity->start = min_vstart; -+ st->wsum += entity->weight; -+ /* -+ * entity is about to be inserted into a service tree, -+ * and then set in service: get a reference to make -+ * sure entity does not disappear until it is no -+ * longer in service or scheduled for service. -+ */ -+ bfq_get_entity(entity); -+ -+ BUG_ON(entity->on_st && bfqq); -+ -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ if (entity->on_st && !bfqq) { -+ struct bfq_group *bfqg = -+ container_of(entity, struct bfq_group, -+ entity); -+ -+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, -+ bfqg, -+ "activate bug, class %d in_service %p", -+ bfq_class_idx(entity), sd->in_service_entity); -+ } -+#endif -+ BUG_ON(entity->on_st && !bfqq); -+ entity->on_st = true; -+ } -+ -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ if (!bfq_entity_to_bfqq(entity)) { /* bfq_group */ -+ struct bfq_group *bfqg = -+ container_of(entity, struct bfq_group, entity); -+ struct bfq_data *bfqd = bfqg->bfqd; -+ -+ BUG_ON(!bfqd); -+ if (!entity->in_groups_with_pending_reqs) { -+ entity->in_groups_with_pending_reqs = true; -+ bfqd->num_groups_with_pending_reqs++; -+ } -+ bfq_log_bfqg(bfqd, bfqg, "num_groups_with_pending_reqs %u", -+ bfqd->num_groups_with_pending_reqs); -+ } -+#endif -+ -+ bfq_update_fin_time_enqueue(entity, st, backshifted); -+} -+ -+/** -+ * __bfq_requeue_entity - handle requeueing or repositioning of an entity. -+ * @entity: the entity being requeued or repositioned. -+ * -+ * Requeueing is needed if this entity stops being served, which -+ * happens if a leaf descendant entity has expired. On the other hand, -+ * repositioning is needed if the next_inservice_entity for the child -+ * entity has changed. See the comments inside the function for -+ * details. -+ * -+ * Basically, this function: 1) removes entity from its active tree if -+ * present there, 2) updates the timestamps of entity and 3) inserts -+ * entity back into its active tree (in the new, right position for -+ * the new values of the timestamps). -+ */ -+static void __bfq_requeue_entity(struct bfq_entity *entity) -+{ -+ struct bfq_sched_data *sd = entity->sched_data; -+ struct bfq_service_tree *st = bfq_entity_service_tree(entity); -+ -+ BUG_ON(!sd); -+ BUG_ON(!st); -+ -+ BUG_ON(entity != sd->in_service_entity && -+ entity->tree != &st->active); -+ -+ if (entity == sd->in_service_entity) { -+ /* -+ * We are requeueing the current in-service entity, -+ * which may have to be done for one of the following -+ * reasons: -+ * - entity represents the in-service queue, and the -+ * in-service queue is being requeued after an -+ * expiration; -+ * - entity represents a group, and its budget has -+ * changed because one of its child entities has -+ * just been either activated or requeued for some -+ * reason; the timestamps of the entity need then to -+ * be updated, and the entity needs to be enqueued -+ * or repositioned accordingly. -+ * -+ * In particular, before requeueing, the start time of -+ * the entity must be moved forward to account for the -+ * service that the entity has received while in -+ * service. This is done by the next instructions. The -+ * finish time will then be updated according to this -+ * new value of the start time, and to the budget of -+ * the entity. -+ */ -+ bfq_calc_finish(entity, entity->service); -+ entity->start = entity->finish; -+ BUG_ON(entity->tree && entity->tree == &st->idle); -+ BUG_ON(entity->tree && entity->tree != &st->active); -+ /* -+ * In addition, if the entity had more than one child -+ * when set in service, then it was not extracted from -+ * the active tree. This implies that the position of -+ * the entity in the active tree may need to be -+ * changed now, because we have just updated the start -+ * time of the entity, and we will update its finish -+ * time in a moment (the requeueing is then, more -+ * precisely, a repositioning in this case). To -+ * implement this repositioning, we: 1) dequeue the -+ * entity here, 2) update the finish time and requeue -+ * the entity according to the new timestamps below. -+ */ -+ if (entity->tree) -+ bfq_active_extract(st, entity); -+ } else { /* The entity is already active, and not in service */ -+ /* -+ * In this case, this function gets called only if the -+ * next_in_service entity below this entity has -+ * changed, and this change has caused the budget of -+ * this entity to change, which, finally implies that -+ * the finish time of this entity must be -+ * updated. Such an update may cause the scheduling, -+ * i.e., the position in the active tree, of this -+ * entity to change. We handle this change by: 1) -+ * dequeueing the entity here, 2) updating the finish -+ * time and requeueing the entity according to the new -+ * timestamps below. This is the same approach as the -+ * non-extracted-entity sub-case above. -+ */ -+ bfq_active_extract(st, entity); -+ } -+ -+ bfq_update_fin_time_enqueue(entity, st, false); -+} -+ -+static void __bfq_activate_requeue_entity(struct bfq_entity *entity, -+ struct bfq_sched_data *sd, -+ bool non_blocking_wait_rq) -+{ -+ struct bfq_service_tree *st = bfq_entity_service_tree(entity); -+ -+ if (sd->in_service_entity == entity || entity->tree == &st->active) -+ /* -+ * in service or already queued on the active tree, -+ * requeue or reposition -+ */ -+ __bfq_requeue_entity(entity); -+ else -+ /* -+ * Not in service and not queued on its active tree: -+ * the activity is idle and this is a true activation. -+ */ -+ __bfq_activate_entity(entity, non_blocking_wait_rq); -+} -+ -+ -+/** -+ * bfq_activate_requeue_entity - activate or requeue an entity representing a bfq_queue, -+ * and activate, requeue or reposition all ancestors -+ * for which such an update becomes necessary. -+ * @entity: the entity to activate. -+ * @non_blocking_wait_rq: true if this entity was waiting for a request -+ * @requeue: true if this is a requeue, which implies that bfqq is -+ * being expired; thus ALL its ancestors stop being served and must -+ * therefore be requeued -+ * @expiration: true if this function is being invoked in the expiration path -+ * of the in-service queue -+ */ -+static void bfq_activate_requeue_entity(struct bfq_entity *entity, -+ bool non_blocking_wait_rq, -+ bool requeue, bool expiration) -+{ -+ struct bfq_sched_data *sd; -+ -+ for_each_entity(entity) { -+ BUG_ON(!entity); -+ sd = entity->sched_data; -+ __bfq_activate_requeue_entity(entity, sd, non_blocking_wait_rq); -+ -+ BUG_ON(RB_EMPTY_ROOT(&sd->service_tree->active) && -+ RB_EMPTY_ROOT(&(sd->service_tree+1)->active) && -+ RB_EMPTY_ROOT(&(sd->service_tree+2)->active)); -+ -+ if (!bfq_update_next_in_service(sd, entity, expiration) && -+ !requeue) { -+ BUG_ON(!sd->next_in_service); -+ break; -+ } -+ BUG_ON(!sd->next_in_service); -+ } -+} -+ -+/** -+ * __bfq_deactivate_entity - update sched_data and service trees for -+ * entity, so as to represent entity as inactive -+ * @entity: the entity being deactivated. -+ * @ins_into_idle_tree: if false, the entity will not be put into the -+ * idle tree. -+ * -+ * If necessary and allowed, puts entity into the idle tree. NOTE: -+ * entity may be on no tree if in service. -+ */ -+static bool __bfq_deactivate_entity(struct bfq_entity *entity, -+ bool ins_into_idle_tree) -+{ -+ struct bfq_sched_data *sd = entity->sched_data; -+ struct bfq_service_tree *st; -+ bool is_in_service; -+ -+ if (!entity->on_st) { /* entity never activated, or already inactive */ -+ BUG_ON(sd && entity == sd->in_service_entity); -+ return false; -+ } -+ -+ /* -+ * If we get here, then entity is active, which implies that -+ * bfq_group_set_parent has already been invoked for the group -+ * represented by entity. Therefore, the field -+ * entity->sched_data has been set, and we can safely use it. -+ */ -+ st = bfq_entity_service_tree(entity); -+ is_in_service = entity == sd->in_service_entity; -+ -+ BUG_ON(is_in_service && entity->tree && entity->tree != &st->active); -+ -+ bfq_calc_finish(entity, entity->service); -+ -+ if (is_in_service) { -+ sd->in_service_entity = NULL; -+ } else -+ /* -+ * Non in-service entity: nobody will take care of -+ * resetting its service counter on expiration. Do it -+ * now. -+ */ -+ entity->service = 0; -+ -+ if (entity->tree == &st->active) -+ bfq_active_extract(st, entity); -+ else if (!is_in_service && entity->tree == &st->idle) -+ bfq_idle_extract(st, entity); -+ else if (entity->tree) -+ BUG(); -+ -+ if (!ins_into_idle_tree || !bfq_gt(entity->finish, st->vtime)) -+ bfq_forget_entity(st, entity, is_in_service); -+ else -+ bfq_idle_insert(st, entity); -+ -+ return true; -+} -+ -+/** -+ * bfq_deactivate_entity - deactivate an entity representing a bfq_queue. -+ * @entity: the entity to deactivate. -+ * @ins_into_idle_tree: true if the entity can be put into the idle tree -+ * @expiration: true if this function is being invoked in the expiration path -+ * of the in-service queue -+ */ -+static void bfq_deactivate_entity(struct bfq_entity *entity, -+ bool ins_into_idle_tree, -+ bool expiration) -+{ -+ struct bfq_sched_data *sd; -+ struct bfq_entity *parent = NULL; -+ -+ for_each_entity_safe(entity, parent) { -+ sd = entity->sched_data; -+ -+ BUG_ON(sd == NULL); /* -+ * It would mean that this is the -+ * root group. -+ */ -+ -+ BUG_ON(expiration && entity != sd->in_service_entity); -+ -+ BUG_ON(entity != sd->in_service_entity && -+ entity->tree == -+ &bfq_entity_service_tree(entity)->active && -+ !sd->next_in_service); -+ -+ if (!__bfq_deactivate_entity(entity, ins_into_idle_tree)) { -+ /* -+ * entity is not in any tree any more, so -+ * this deactivation is a no-op, and there is -+ * nothing to change for upper-level entities -+ * (in case of expiration, this can never -+ * happen). -+ */ -+ BUG_ON(expiration); /* -+ * entity cannot be already out of -+ * any tree -+ */ -+ return; -+ } -+ -+ if (sd->next_in_service == entity) -+ /* -+ * entity was the next_in_service entity, -+ * then, since entity has just been -+ * deactivated, a new one must be found. -+ */ -+ bfq_update_next_in_service(sd, NULL, expiration); -+ -+ if (sd->next_in_service || sd->in_service_entity) { -+ /* -+ * The parent entity is still active, because -+ * either next_in_service or in_service_entity -+ * is not NULL. So, no further upwards -+ * deactivation must be performed. Yet, -+ * next_in_service has changed. Then the -+ * schedule does need to be updated upwards. -+ * -+ * NOTE If in_service_entity is not NULL, then -+ * next_in_service may happen to be NULL, -+ * although the parent entity is evidently -+ * active. This happens if 1) the entity -+ * pointed by in_service_entity is the only -+ * active entity in the parent entity, and 2) -+ * according to the definition of -+ * next_in_service, the in_service_entity -+ * cannot be considered as -+ * next_in_service. See the comments on the -+ * definition of next_in_service for details. -+ */ -+ BUG_ON(sd->next_in_service == entity); -+ BUG_ON(sd->in_service_entity == entity); -+ break; -+ } -+ -+ /* -+ * If we get here, then the parent is no more -+ * backlogged and we need to propagate the -+ * deactivation upwards. Thus let the loop go on. -+ */ -+ -+ /* -+ * Also let parent be queued into the idle tree on -+ * deactivation, to preserve service guarantees, and -+ * assuming that who invoked this function does not -+ * need parent entities too to be removed completely. -+ */ -+ ins_into_idle_tree = true; -+ } -+ -+ /* -+ * If the deactivation loop is fully executed, then there are -+ * no more entities to touch and next loop is not executed at -+ * all. Otherwise, requeue remaining entities if they are -+ * about to stop receiving service, or reposition them if this -+ * is not the case. -+ */ -+ entity = parent; -+ for_each_entity(entity) { -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); -+ -+ /* -+ * Invoke __bfq_requeue_entity on entity, even if -+ * already active, to requeue/reposition it in the -+ * active tree (because sd->next_in_service has -+ * changed) -+ */ -+ __bfq_requeue_entity(entity); -+ -+ sd = entity->sched_data; -+ BUG_ON(expiration && sd->in_service_entity != entity); -+ -+ if (bfqq) -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "invoking udpdate_next for this queue"); -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ else { -+ struct bfq_group *bfqg = -+ container_of(entity, -+ struct bfq_group, entity); -+ -+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg, -+ "invoking udpdate_next for this entity"); -+ } -+#endif -+ if (!bfq_update_next_in_service(sd, entity, expiration) && -+ !expiration) -+ /* -+ * next_in_service unchanged or not causing -+ * any change in entity->parent->sd, and no -+ * requeueing needed for expiration: stop -+ * here. -+ */ -+ break; -+ } -+} -+ -+/** -+ * bfq_calc_vtime_jump - compute the value to which the vtime should jump, -+ * if needed, to have at least one entity eligible. -+ * @st: the service tree to act upon. -+ * -+ * Assumes that st is not empty. -+ */ -+static u64 bfq_calc_vtime_jump(struct bfq_service_tree *st) -+{ -+ struct bfq_entity *root_entity = bfq_root_active_entity(&st->active); -+ -+ if (bfq_gt(root_entity->min_start, st->vtime)) { -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(root_entity); -+ -+ if (bfqq) -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "new value %llu", -+ ((root_entity->min_start>>10)*1000)>>12); -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ else { -+ struct bfq_group *bfqg = -+ container_of(root_entity, struct bfq_group, -+ entity); -+ -+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg, -+ "new value %llu", -+ ((root_entity->min_start>>10)*1000)>>12); -+ } -+#endif -+ return root_entity->min_start; -+ } -+ return st->vtime; -+} -+ -+static void bfq_update_vtime(struct bfq_service_tree *st, u64 new_value) -+{ -+ if (new_value > st->vtime) { -+ st->vtime = new_value; -+ bfq_forget_idle(st); -+ } -+} -+ -+/** -+ * bfq_first_active_entity - find the eligible entity with -+ * the smallest finish time -+ * @st: the service tree to select from. -+ * @vtime: the system virtual to use as a reference for eligibility -+ * -+ * This function searches the first schedulable entity, starting from the -+ * root of the tree and going on the left every time on this side there is -+ * a subtree with at least one eligible (start >= vtime) entity. The path on -+ * the right is followed only if a) the left subtree contains no eligible -+ * entities and b) no eligible entity has been found yet. -+ */ -+static struct bfq_entity *bfq_first_active_entity(struct bfq_service_tree *st, -+ u64 vtime) -+{ -+ struct bfq_entity *entry, *first = NULL; -+ struct rb_node *node = st->active.rb_node; -+ -+ while (node) { -+ entry = rb_entry(node, struct bfq_entity, rb_node); -+left: -+ if (!bfq_gt(entry->start, vtime)) -+ first = entry; -+ -+ BUG_ON(bfq_gt(entry->min_start, vtime)); -+ -+ if (node->rb_left) { -+ entry = rb_entry(node->rb_left, -+ struct bfq_entity, rb_node); -+ if (!bfq_gt(entry->min_start, vtime)) { -+ node = node->rb_left; -+ goto left; -+ } -+ } -+ if (first) -+ break; -+ node = node->rb_right; -+ } -+ -+ BUG_ON(!first && !RB_EMPTY_ROOT(&st->active)); -+ return first; -+} -+ -+/** -+ * __bfq_lookup_next_entity - return the first eligible entity in @st. -+ * @st: the service tree. -+ * -+ * If there is no in-service entity for the sched_data st belongs to, -+ * then return the entity that will be set in service if: -+ * 1) the parent entity this st belongs to is set in service; -+ * 2) no entity belonging to such parent entity undergoes a state change -+ * that would influence the timestamps of the entity (e.g., becomes idle, -+ * becomes backlogged, changes its budget, ...). -+ * -+ * In this first case, update the virtual time in @st too (see the -+ * comments on this update inside the function). -+ * -+ * In constrast, if there is an in-service entity, then return the -+ * entity that would be set in service if not only the above -+ * conditions, but also the next one held true: the currently -+ * in-service entity, on expiration, -+ * 1) gets a finish time equal to the current one, or -+ * 2) is not eligible any more, or -+ * 3) is idle. -+ */ -+static struct bfq_entity * -+__bfq_lookup_next_entity(struct bfq_service_tree *st, bool in_service) -+{ -+ struct bfq_entity *entity; -+ u64 new_vtime; -+ struct bfq_queue *bfqq; -+ -+ if (RB_EMPTY_ROOT(&st->active)) -+ return NULL; -+ -+ /* -+ * Get the value of the system virtual time for which at -+ * least one entity is eligible. -+ */ -+ new_vtime = bfq_calc_vtime_jump(st); -+ -+ /* -+ * If there is no in-service entity for the sched_data this -+ * active tree belongs to, then push the system virtual time -+ * up to the value that guarantees that at least one entity is -+ * eligible. If, instead, there is an in-service entity, then -+ * do not make any such update, because there is already an -+ * eligible entity, namely the in-service one (even if the -+ * entity is not on st, because it was extracted when set in -+ * service). -+ */ -+ if (!in_service) -+ bfq_update_vtime(st, new_vtime); -+ -+ entity = bfq_first_active_entity(st, new_vtime); -+ BUG_ON(bfq_gt(entity->start, new_vtime)); -+ -+ /* Log some information */ -+ bfqq = bfq_entity_to_bfqq(entity); -+ if (bfqq) -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "start %llu vtime %llu st %p", -+ ((entity->start>>10)*1000)>>12, -+ ((new_vtime>>10)*1000)>>12, st); -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ else { -+ struct bfq_group *bfqg = -+ container_of(entity, struct bfq_group, entity); -+ -+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg, -+ "start %llu vtime %llu (%llu) st %p", -+ ((entity->start>>10)*1000)>>12, -+ ((st->vtime>>10)*1000)>>12, -+ ((new_vtime>>10)*1000)>>12, st); -+ } -+#endif -+ -+ BUG_ON(!entity); -+ -+ return entity; -+} -+ -+/** -+ * bfq_lookup_next_entity - return the first eligible entity in @sd. -+ * @sd: the sched_data. -+ * @expiration: true if we are on the expiration path of the in-service queue -+ * -+ * This function is invoked when there has been a change in the trees -+ * for sd, and we need to know what is the new next entity to serve -+ * after this change. -+ */ -+static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd, -+ bool expiration) -+{ -+ struct bfq_service_tree *st = sd->service_tree; -+ struct bfq_service_tree *idle_class_st = st + (BFQ_IOPRIO_CLASSES - 1); -+ struct bfq_entity *entity = NULL; -+ struct bfq_queue *bfqq; -+ int class_idx = 0; -+ -+ BUG_ON(!sd); -+ BUG_ON(!st); -+ /* -+ * Choose from idle class, if needed to guarantee a minimum -+ * bandwidth to this class (and if there is some active entity -+ * in idle class). This should also mitigate -+ * priority-inversion problems in case a low priority task is -+ * holding file system resources. -+ */ -+ if (time_is_before_jiffies(sd->bfq_class_idle_last_service + -+ BFQ_CL_IDLE_TIMEOUT)) { -+ if (!RB_EMPTY_ROOT(&idle_class_st->active)) -+ class_idx = BFQ_IOPRIO_CLASSES - 1; -+ /* About to be served if backlogged, or not yet backlogged */ -+ sd->bfq_class_idle_last_service = jiffies; -+ } -+ -+ /* -+ * Find the next entity to serve for the highest-priority -+ * class, unless the idle class needs to be served. -+ */ -+ for (; class_idx < BFQ_IOPRIO_CLASSES; class_idx++) { -+ /* -+ * If expiration is true, then bfq_lookup_next_entity -+ * is being invoked as a part of the expiration path -+ * of the in-service queue. In this case, even if -+ * sd->in_service_entity is not NULL, -+ * sd->in_service_entiy at this point is actually not -+ * in service any more, and, if needed, has already -+ * been properly queued or requeued into the right -+ * tree. The reason why sd->in_service_entity is still -+ * not NULL here, even if expiration is true, is that -+ * sd->in_service_entiy is reset as a last step in the -+ * expiration path. So, if expiration is true, tell -+ * __bfq_lookup_next_entity that there is no -+ * sd->in_service_entity. -+ */ -+ entity = __bfq_lookup_next_entity(st + class_idx, -+ sd->in_service_entity && -+ !expiration); -+ -+ if (entity) -+ break; -+ } -+ -+ BUG_ON(!entity && -+ (!RB_EMPTY_ROOT(&st->active) || !RB_EMPTY_ROOT(&(st+1)->active) || -+ !RB_EMPTY_ROOT(&(st+2)->active))); -+ -+ if (!entity) -+ return NULL; -+ -+ /* Log some information */ -+ bfqq = bfq_entity_to_bfqq(entity); -+ if (bfqq) -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "chosen from st %p %d", -+ st + class_idx, class_idx); -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ else { -+ struct bfq_group *bfqg = -+ container_of(entity, struct bfq_group, entity); -+ -+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg, -+ "chosen from st %p %d", -+ st + class_idx, class_idx); -+ } -+#endif -+ -+ return entity; -+} -+ -+static bool next_queue_may_preempt(struct bfq_data *bfqd) -+{ -+ struct bfq_sched_data *sd = &bfqd->root_group->sched_data; -+ -+ return sd->next_in_service != sd->in_service_entity; -+} -+ -+/* -+ * Get next queue for service. -+ */ -+static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd) -+{ -+ struct bfq_entity *entity = NULL; -+ struct bfq_sched_data *sd; -+ struct bfq_queue *bfqq; -+ -+ BUG_ON(bfqd->in_service_queue); -+ -+ if (bfq_tot_busy_queues(bfqd) == 0) -+ return NULL; -+ -+ /* -+ * Traverse the path from the root to the leaf entity to -+ * serve. Set in service all the entities visited along the -+ * way. -+ */ -+ sd = &bfqd->root_group->sched_data; -+ for (; sd ; sd = entity->my_sched_data) { -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ if (entity) { -+ struct bfq_group *bfqg = -+ container_of(entity, struct bfq_group, entity); -+ -+ bfq_log_bfqg(bfqd, bfqg, -+ "lookup in this group"); -+ if (!sd->next_in_service) -+ pr_crit("lookup in this group"); -+ } else { -+ bfq_log_bfqg(bfqd, bfqd->root_group, -+ "lookup in root group"); -+ if (!sd->next_in_service) -+ pr_crit("lookup in root group"); -+ } -+#endif -+ -+ BUG_ON(!sd->next_in_service); -+ -+ /* -+ * WARNING. We are about to set the in-service entity -+ * to sd->next_in_service, i.e., to the (cached) value -+ * returned by bfq_lookup_next_entity(sd) the last -+ * time it was invoked, i.e., the last time when the -+ * service order in sd changed as a consequence of the -+ * activation or deactivation of an entity. In this -+ * respect, if we execute bfq_lookup_next_entity(sd) -+ * in this very moment, it may, although with low -+ * probability, yield a different entity than that -+ * pointed to by sd->next_in_service. This rare event -+ * happens in case there was no CLASS_IDLE entity to -+ * serve for sd when bfq_lookup_next_entity(sd) was -+ * invoked for the last time, while there is now one -+ * such entity. -+ * -+ * If the above event happens, then the scheduling of -+ * such entity in CLASS_IDLE is postponed until the -+ * service of the sd->next_in_service entity -+ * finishes. In fact, when the latter is expired, -+ * bfq_lookup_next_entity(sd) gets called again, -+ * exactly to update sd->next_in_service. -+ */ -+ -+ /* Make next_in_service entity become in_service_entity */ -+ entity = sd->next_in_service; -+ sd->in_service_entity = entity; -+ -+ /* -+ * If entity is no longer a candidate for next -+ * service, then it must be extracted from its active -+ * tree, so as to make sure that it won't be -+ * considered when computing next_in_service. See the -+ * comments on the function -+ * bfq_no_longer_next_in_service() for details. -+ */ -+ if (bfq_no_longer_next_in_service(entity)) -+ bfq_active_extract(bfq_entity_service_tree(entity), -+ entity); -+ -+ /* -+ * Even if entity is not to be extracted according to -+ * the above check, a descendant entity may get -+ * extracted in one of the next iterations of this -+ * loop. Such an event could cause a change in -+ * next_in_service for the level of the descendant -+ * entity, and thus possibly back to this level. -+ * -+ * However, we cannot perform the resulting needed -+ * update of next_in_service for this level before the -+ * end of the whole loop, because, to know which is -+ * the correct next-to-serve candidate entity for each -+ * level, we need first to find the leaf entity to set -+ * in service. In fact, only after we know which is -+ * the next-to-serve leaf entity, we can discover -+ * whether the parent entity of the leaf entity -+ * becomes the next-to-serve, and so on. -+ */ -+ -+ /* Log some information */ -+ bfqq = bfq_entity_to_bfqq(entity); -+ if (bfqq) -+ bfq_log_bfqq(bfqd, bfqq, -+ "this queue, finish %llu", -+ (((entity->finish>>10)*1000)>>10)>>2); -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ else { -+ struct bfq_group *bfqg = -+ container_of(entity, struct bfq_group, entity); -+ -+ bfq_log_bfqg(bfqd, bfqg, -+ "this entity, finish %llu", -+ (((entity->finish>>10)*1000)>>10)>>2); -+ } -+#endif -+ -+ } -+ -+ BUG_ON(!entity); -+ bfqq = bfq_entity_to_bfqq(entity); -+ BUG_ON(!bfqq); -+ -+ /* -+ * We can finally update all next-to-serve entities along the -+ * path from the leaf entity just set in service to the root. -+ */ -+ for_each_entity(entity) { -+ struct bfq_sched_data *sd = entity->sched_data; -+ -+ if (!bfq_update_next_in_service(sd, NULL, false)) -+ break; -+ } -+ -+ return bfqq; -+} -+ -+static void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd) -+{ -+ struct bfq_queue *in_serv_bfqq = bfqd->in_service_queue; -+ struct bfq_entity *in_serv_entity = &in_serv_bfqq->entity; -+ struct bfq_entity *entity = in_serv_entity; -+ -+#ifndef BFQ_MQ -+ if (bfqd->in_service_bic) { -+ put_io_context(bfqd->in_service_bic->icq.ioc); -+ bfqd->in_service_bic = NULL; -+ } -+#endif -+ -+ bfq_clear_bfqq_wait_request(in_serv_bfqq); -+ hrtimer_try_to_cancel(&bfqd->idle_slice_timer); -+ bfqd->in_service_queue = NULL; -+ -+ /* -+ * When this function is called, all in-service entities have -+ * been properly deactivated or requeued, so we can safely -+ * execute the final step: reset in_service_entity along the -+ * path from entity to the root. -+ */ -+ for_each_entity(entity) -+ entity->sched_data->in_service_entity = NULL; -+ -+ /* -+ * in_serv_entity is no longer in service, so, if it is in no -+ * service tree either, then release the service reference to -+ * the queue it represents (taken with bfq_get_entity). -+ */ -+ if (!in_serv_entity->on_st) -+ bfq_put_queue(in_serv_bfqq); -+} -+ -+static void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, -+ bool ins_into_idle_tree, bool expiration) -+{ -+ struct bfq_entity *entity = &bfqq->entity; -+ -+ bfq_deactivate_entity(entity, ins_into_idle_tree, expiration); -+} -+ -+static void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) -+{ -+ struct bfq_entity *entity = &bfqq->entity; -+ struct bfq_service_tree *st = bfq_entity_service_tree(entity); -+ -+ BUG_ON(bfqq == bfqd->in_service_queue); -+ BUG_ON(entity->tree != &st->active && entity->tree != &st->idle && -+ entity->on_st); -+ -+ bfq_activate_requeue_entity(entity, bfq_bfqq_non_blocking_wait_rq(bfqq), -+ false, false); -+ bfq_clear_bfqq_non_blocking_wait_rq(bfqq); -+} -+ -+static void bfq_requeue_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, -+ bool expiration) -+{ -+ struct bfq_entity *entity = &bfqq->entity; -+ -+ bfq_activate_requeue_entity(entity, false, -+ bfqq == bfqd->in_service_queue, expiration); -+} -+ -+static void bfqg_stats_update_dequeue(struct bfq_group *bfqg); -+ -+/* -+ * Called when the bfqq no longer has requests pending, remove it from -+ * the service tree. As a special case, it can be invoked during an -+ * expiration. -+ */ -+static void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq, -+ bool expiration) -+{ -+ BUG_ON(!bfq_bfqq_busy(bfqq)); -+ BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list)); -+ -+ bfq_log_bfqq(bfqd, bfqq, "del from busy"); -+ -+ bfq_clear_bfqq_busy(bfqq); -+ -+ BUG_ON(bfq_tot_busy_queues(bfqd) == 0); -+ bfqd->busy_queues[bfqq->ioprio_class - 1]--; -+ -+ if (bfqq->wr_coeff > 1) { -+ bfqd->wr_busy_queues--; -+ BUG_ON(bfqd->wr_busy_queues < 0); -+ } -+ -+ bfqg_stats_update_dequeue(bfqq_group(bfqq)); -+ -+ BUG_ON(bfqq->entity.budget < 0); -+ -+ bfq_deactivate_bfqq(bfqd, bfqq, true, expiration); -+ if (!bfqq->dispatched) -+ bfq_weights_tree_remove(bfqd, bfqq); -+} -+ -+/* -+ * Called when an inactive queue receives a new request. -+ */ -+static void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq) -+{ -+ BUG_ON(bfq_bfqq_busy(bfqq)); -+ BUG_ON(bfqq == bfqd->in_service_queue); -+ -+ bfq_log_bfqq(bfqd, bfqq, "add to busy"); -+ -+ bfq_activate_bfqq(bfqd, bfqq); -+ -+ bfq_mark_bfqq_busy(bfqq); -+ bfqd->busy_queues[bfqq->ioprio_class - 1]++; -+ -+ if (!bfqq->dispatched) -+ if (bfqq->wr_coeff == 1) -+ bfq_weights_tree_add(bfqd, bfqq, -+ &bfqd->queue_weights_tree); -+ -+ if (bfqq->wr_coeff > 1) { -+ bfqd->wr_busy_queues++; -+ BUG_ON(bfqd->wr_busy_queues > bfq_tot_busy_queues(bfqd)); -+ } -+ -+} -diff --git a/block/bfq-sq-iosched.c b/block/bfq-sq-iosched.c -new file mode 100644 -index 000000000000..6da94eef0cf1 ---- /dev/null -+++ b/block/bfq-sq-iosched.c -@@ -0,0 +1,5957 @@ -+/* -+ * Budget Fair Queueing (BFQ) I/O scheduler. -+ * -+ * Based on ideas and code from CFQ: -+ * Copyright (C) 2003 Jens Axboe -+ * -+ * Copyright (C) 2008 Fabio Checconi -+ * Paolo Valente -+ * -+ * Copyright (C) 2015 Paolo Valente -+ * -+ * Copyright (C) 2017 Paolo Valente -+ * -+ * Licensed under the GPL-2 as detailed in the accompanying COPYING.BFQ -+ * file. -+ * -+ * BFQ is a proportional-share I/O scheduler, with some extra -+ * low-latency capabilities. BFQ also supports full hierarchical -+ * scheduling through cgroups. Next paragraphs provide an introduction -+ * on BFQ inner workings. Details on BFQ benefits and usage can be -+ * found in Documentation/block/bfq-iosched.txt. -+ * -+ * BFQ is a proportional-share storage-I/O scheduling algorithm based -+ * on the slice-by-slice service scheme of CFQ. But BFQ assigns -+ * budgets, measured in number of sectors, to processes instead of -+ * time slices. The device is not granted to the in-service process -+ * for a given time slice, but until it has exhausted its assigned -+ * budget. This change from the time to the service domain enables BFQ -+ * to distribute the device throughput among processes as desired, -+ * without any distortion due to throughput fluctuations, or to device -+ * internal queueing. BFQ uses an ad hoc internal scheduler, called -+ * B-WF2Q+, to schedule processes according to their budgets. More -+ * precisely, BFQ schedules queues associated with processes. Thanks to -+ * the accurate policy of B-WF2Q+, BFQ can afford to assign high -+ * budgets to I/O-bound processes issuing sequential requests (to -+ * boost the throughput), and yet guarantee a low latency to -+ * interactive and soft real-time applications. -+ * -+ * In particular, BFQ schedules I/O so as to achieve the latter goal-- -+ * low latency for interactive and soft real-time applications--if the -+ * low_latency parameter is set (default configuration). To this -+ * purpose, BFQ constantly tries to detect whether the I/O requests in -+ * a bfq_queue come from an interactive or a soft real-time -+ * application. For brevity, in these cases, the queue is said to be -+ * interactive or soft real-time. In both cases, BFQ privileges the -+ * service of the queue, over that of non-interactive and -+ * non-soft-real-time queues. This privileging is performed, mainly, -+ * by raising the weight of the queue. So, for brevity, we call just -+ * weight-raising periods the time periods during which a queue is -+ * privileged, because deemed interactive or soft real-time. -+ * -+ * The detection of soft real-time queues/applications is described in -+ * detail in the comments on the function -+ * bfq_bfqq_softrt_next_start. On the other hand, the detection of an -+ * interactive queue works as follows: a queue is deemed interactive -+ * if it is constantly non empty only for a limited time interval, -+ * after which it does become empty. The queue may be deemed -+ * interactive again (for a limited time), if it restarts being -+ * constantly non empty, provided that this happens only after the -+ * queue has remained empty for a given minimum idle time. -+ * -+ * By default, BFQ computes automatically the above maximum time -+ * interval, i.e., the time interval after which a constantly -+ * non-empty queue stops being deemed interactive. Since a queue is -+ * weight-raised while it is deemed interactive, this maximum time -+ * interval happens to coincide with the (maximum) duration of the -+ * weight-raising for interactive queues. -+ * -+ * NOTE: if the main or only goal, with a given device, is to achieve -+ * the maximum-possible throughput at all times, then do switch off -+ * all low-latency heuristics for that device, by setting low_latency -+ * to 0. -+ * -+ * BFQ is described in [1], where also a reference to the initial, -+ * more theoretical paper on BFQ can be found. The interested reader -+ * can find in the latter paper full details on the main algorithm, as -+ * well as formulas of the guarantees and formal proofs of all the -+ * properties. With respect to the version of BFQ presented in these -+ * papers, this implementation adds a few more heuristics, such as the -+ * one that guarantees a low latency to soft real-time applications, -+ * and a hierarchical extension based on H-WF2Q+. -+ * -+ * B-WF2Q+ is based on WF2Q+, that is described in [2], together with -+ * H-WF2Q+, while the augmented tree used to implement B-WF2Q+ with O(log N) -+ * complexity derives from the one introduced with EEVDF in [3]. -+ * -+ * [1] P. Valente, A. Avanzini, "Evolution of the BFQ Storage I/O -+ * Scheduler", Proceedings of the First Workshop on Mobile System -+ * Technologies (MST-2015), May 2015. -+ * http://algogroup.unimore.it/people/paolo/disk_sched/mst-2015.pdf -+ * -+ * http://algogroup.unimo.it/people/paolo/disk_sched/bf1-v1-suite-results.pdf -+ * -+ * [2] Jon C.R. Bennett and H. Zhang, ``Hierarchical Packet Fair Queueing -+ * Algorithms,'' IEEE/ACM Transactions on Networking, 5(5):675-689, -+ * Oct 1997. -+ * -+ * http://www.cs.cmu.edu/~hzhang/papers/TON-97-Oct.ps.gz -+ * -+ * [3] I. Stoica and H. Abdel-Wahab, ``Earliest Eligible Virtual Deadline -+ * First: A Flexible and Accurate Mechanism for Proportional Share -+ * Resource Allocation,'' technical report. -+ * -+ * http://www.cs.berkeley.edu/~istoica/papers/eevdf-tr-95.pdf -+ */ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "blk.h" -+#include "bfq.h" -+#include "blk-wbt.h" -+ -+/* Expiration time of sync (0) and async (1) requests, in ns. */ -+static const u64 bfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 }; -+ -+/* Maximum backwards seek, in KiB. */ -+static const int bfq_back_max = (16 * 1024); -+ -+/* Penalty of a backwards seek, in number of sectors. */ -+static const int bfq_back_penalty = 2; -+ -+/* Idling period duration, in ns. */ -+static u32 bfq_slice_idle = (NSEC_PER_SEC / 125); -+ -+/* Minimum number of assigned budgets for which stats are safe to compute. */ -+static const int bfq_stats_min_budgets = 194; -+ -+/* Default maximum budget values, in sectors and number of requests. */ -+static const int bfq_default_max_budget = (16 * 1024); -+ -+/* -+ * When a sync request is dispatched, the queue that contains that -+ * request, and all the ancestor entities of that queue, are charged -+ * with the number of sectors of the request. In constrast, if the -+ * request is async, then the queue and its ancestor entities are -+ * charged with the number of sectors of the request, multiplied by -+ * the factor below. This throttles the bandwidth for async I/O, -+ * w.r.t. to sync I/O, and it is done to counter the tendency of async -+ * writes to steal I/O throughput to reads. -+ * -+ * The current value of this parameter is the result of a tuning with -+ * several hardware and software configurations. We tried to find the -+ * lowest value for which writes do not cause noticeable problems to -+ * reads. In fact, the lower this parameter, the stabler I/O control, -+ * in the following respect. The lower this parameter is, the less -+ * the bandwidth enjoyed by a group decreases -+ * - when the group does writes, w.r.t. to when it does reads; -+ * - when other groups do reads, w.r.t. to when they do writes. -+ */ -+static const int bfq_async_charge_factor = 3; -+ -+/* Default timeout values, in jiffies, approximating CFQ defaults. */ -+static const int bfq_timeout = (HZ / 8); -+ -+/* -+ * Time limit for merging (see comments in bfq_setup_cooperator). Set -+ * to the slowest value that, in our tests, proved to be effective in -+ * removing false positives, while not causing true positives to miss -+ * queue merging. -+ * -+ * As can be deduced from the low time limit below, queue merging, if -+ * successful, happens at the very beggining of the I/O of the involved -+ * cooperating processes, as a consequence of the arrival of the very -+ * first requests from each cooperator. After that, there is very -+ * little chance to find cooperators. -+ */ -+static const unsigned long bfq_merge_time_limit = HZ/10; -+ -+#define MAX_LENGTH_REASON_NAME 25 -+ -+static const char reason_name[][MAX_LENGTH_REASON_NAME] = {"TOO_IDLE", -+"BUDGET_TIMEOUT", "BUDGET_EXHAUSTED", "NO_MORE_REQUESTS", -+"PREEMPTED"}; -+ -+static struct kmem_cache *bfq_pool; -+ -+/* Below this threshold (in ns), we consider thinktime immediate. */ -+#define BFQ_MIN_TT (2 * NSEC_PER_MSEC) -+ -+/* hw_tag detection: parallel requests threshold and min samples needed. */ -+#define BFQ_HW_QUEUE_THRESHOLD 3 -+#define BFQ_HW_QUEUE_SAMPLES 32 -+ -+#define BFQQ_SEEK_THR (sector_t)(8 * 100) -+#define BFQQ_SECT_THR_NONROT (sector_t)(2 * 32) -+#define BFQ_RQ_SEEKY(bfqd, last_pos, rq) \ -+ (get_sdist(last_pos, rq) > \ -+ BFQQ_SEEK_THR && \ -+ (!blk_queue_nonrot(bfqd->queue) || \ -+ blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT)) -+#define BFQQ_CLOSE_THR (sector_t)(8 * 1024) -+#define BFQQ_SEEKY(bfqq) (hweight32(bfqq->seek_history) > 19) -+ -+/* Min number of samples required to perform peak-rate update */ -+#define BFQ_RATE_MIN_SAMPLES 32 -+/* Min observation time interval required to perform a peak-rate update (ns) */ -+#define BFQ_RATE_MIN_INTERVAL (300*NSEC_PER_MSEC) -+/* Target observation time interval for a peak-rate update (ns) */ -+#define BFQ_RATE_REF_INTERVAL NSEC_PER_SEC -+ -+/* -+ * Shift used for peak-rate fixed precision calculations. -+ * With -+ * - the current shift: 16 positions -+ * - the current type used to store rate: u32 -+ * - the current unit of measure for rate: [sectors/usec], or, more precisely, -+ * [(sectors/usec) / 2^BFQ_RATE_SHIFT] to take into account the shift, -+ * the range of rates that can be stored is -+ * [1 / 2^BFQ_RATE_SHIFT, 2^(32 - BFQ_RATE_SHIFT)] sectors/usec = -+ * [1 / 2^16, 2^16] sectors/usec = [15e-6, 65536] sectors/usec = -+ * [15, 65G] sectors/sec -+ * Which, assuming a sector size of 512B, corresponds to a range of -+ * [7.5K, 33T] B/sec -+ */ -+#define BFQ_RATE_SHIFT 16 -+ -+/* -+ * When configured for computing the duration of the weight-raising -+ * for interactive queues automatically (see the comments at the -+ * beginning of this file), BFQ does it using the following formula: -+ * duration = (ref_rate / r) * ref_wr_duration, -+ * where r is the peak rate of the device, and ref_rate and -+ * ref_wr_duration are two reference parameters. In particular, -+ * ref_rate is the peak rate of the reference storage device (see -+ * below), and ref_wr_duration is about the maximum time needed, with -+ * BFQ and while reading two files in parallel, to load typical large -+ * applications on the reference device (see the comments on -+ * max_service_from_wr below, for more details on how ref_wr_duration -+ * is obtained). In practice, the slower/faster the device at hand -+ * is, the more/less it takes to load applications with respect to the -+ * reference device. Accordingly, the longer/shorter BFQ grants -+ * weight raising to interactive applications. -+ * -+ * BFQ uses two different reference pairs (ref_rate, ref_wr_duration), -+ * depending on whether the device is rotational or non-rotational. -+ * -+ * In the following definitions, ref_rate[0] and ref_wr_duration[0] -+ * are the reference values for a rotational device, whereas -+ * ref_rate[1] and ref_wr_duration[1] are the reference values for a -+ * non-rotational device. The reference rates are not the actual peak -+ * rates of the devices used as a reference, but slightly lower -+ * values. The reason for using slightly lower values is that the -+ * peak-rate estimator tends to yield slightly lower values than the -+ * actual peak rate (it can yield the actual peak rate only if there -+ * is only one process doing I/O, and the process does sequential -+ * I/O). -+ * -+ * The reference peak rates are measured in sectors/usec, left-shifted -+ * by BFQ_RATE_SHIFT. -+ */ -+static int ref_rate[2] = {14000, 33000}; -+/* -+ * To improve readability, a conversion function is used to initialize -+ * the following array, which entails that the array can be -+ * initialized only in a function. -+ */ -+static int ref_wr_duration[2]; -+ -+/* -+ * BFQ uses the above-detailed, time-based weight-raising mechanism to -+ * privilege interactive tasks. This mechanism is vulnerable to the -+ * following false positives: I/O-bound applications that will go on -+ * doing I/O for much longer than the duration of weight -+ * raising. These applications have basically no benefit from being -+ * weight-raised at the beginning of their I/O. On the opposite end, -+ * while being weight-raised, these applications -+ * a) unjustly steal throughput to applications that may actually need -+ * low latency; -+ * b) make BFQ uselessly perform device idling; device idling results -+ * in loss of device throughput with most flash-based storage, and may -+ * increase latencies when used purposelessly. -+ * -+ * BFQ tries to reduce these problems, by adopting the following -+ * countermeasure. To introduce this countermeasure, we need first to -+ * finish explaining how the duration of weight-raising for -+ * interactive tasks is computed. -+ * -+ * For a bfq_queue deemed as interactive, the duration of weight -+ * raising is dynamically adjusted, as a function of the estimated -+ * peak rate of the device, so as to be equal to the time needed to -+ * execute the 'largest' interactive task we benchmarked so far. By -+ * largest task, we mean the task for which each involved process has -+ * to do more I/O than for any of the other tasks we benchmarked. This -+ * reference interactive task is the start-up of LibreOffice Writer, -+ * and in this task each process/bfq_queue needs to have at most ~110K -+ * sectors transfered. -+ * -+ * This last piece of information enables BFQ to reduce the actual -+ * duration of weight-raising for at least one class of I/O-bound -+ * applications: those doing sequential or quasi-sequential I/O. An -+ * example is file copy. In fact, once started, the main I/O-bound -+ * processes of these applications usually consume the above 110K -+ * sectors in much less time than the processes of an application that -+ * is starting, because these I/O-bound processes will greedily devote -+ * almost all their CPU cycles only to their target, -+ * throughput-friendly I/O operations. This is even more true if BFQ -+ * happens to be underestimating the device peak rate, and thus -+ * overestimating the duration of weight raising. But, according to -+ * our measurements, once transferred 110K sectors, these processes -+ * have no right to be weight-raised any longer. -+ * -+ * Basing on the last consideration, BFQ ends weight-raising for a -+ * bfq_queue if the latter happens to have received an amount of -+ * service at least equal to the following constant. The constant is -+ * set to slightly more than 110K, to have a minimum safety margin. -+ * -+ * This early ending of weight-raising reduces the amount of time -+ * during which interactive false positives cause the two problems -+ * described at the beginning of these comments. -+ */ -+static const unsigned long max_service_from_wr = 120000; -+ -+#define BFQ_SERVICE_TREE_INIT ((struct bfq_service_tree) \ -+ { RB_ROOT, RB_ROOT, NULL, NULL, 0, 0 }) -+ -+#define RQ_BIC(rq) icq_to_bic((rq)->elv.priv[0]) -+#define RQ_BFQQ(rq) ((rq)->elv.priv[1]) -+ -+static void bfq_schedule_dispatch(struct bfq_data *bfqd); -+ -+#include "bfq-ioc.c" -+#include "bfq-sched.c" -+#include "bfq-cgroup-included.c" -+ -+#define bfq_class_idle(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_IDLE) -+#define bfq_class_rt(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_RT) -+ -+#define bfq_sample_valid(samples) ((samples) > 80) -+ -+/* -+ * Scheduler run of queue, if there are requests pending and no one in the -+ * driver that will restart queueing. -+ */ -+static void bfq_schedule_dispatch(struct bfq_data *bfqd) -+{ -+ if (bfqd->queued != 0) { -+ bfq_log(bfqd, ""); -+ kblockd_schedule_work(&bfqd->unplug_work); -+ } -+} -+ -+/* -+ * Lifted from AS - choose which of rq1 and rq2 that is best served now. -+ * We choose the request that is closesr to the head right now. Distance -+ * behind the head is penalized and only allowed to a certain extent. -+ */ -+static struct request *bfq_choose_req(struct bfq_data *bfqd, -+ struct request *rq1, -+ struct request *rq2, -+ sector_t last) -+{ -+ sector_t s1, s2, d1 = 0, d2 = 0; -+ unsigned long back_max; -+#define BFQ_RQ1_WRAP 0x01 /* request 1 wraps */ -+#define BFQ_RQ2_WRAP 0x02 /* request 2 wraps */ -+ unsigned int wrap = 0; /* bit mask: requests behind the disk head? */ -+ -+ if (!rq1 || rq1 == rq2) -+ return rq2; -+ if (!rq2) -+ return rq1; -+ -+ if (rq_is_sync(rq1) && !rq_is_sync(rq2)) -+ return rq1; -+ else if (rq_is_sync(rq2) && !rq_is_sync(rq1)) -+ return rq2; -+ if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META)) -+ return rq1; -+ else if ((rq2->cmd_flags & REQ_META) && !(rq1->cmd_flags & REQ_META)) -+ return rq2; -+ -+ s1 = blk_rq_pos(rq1); -+ s2 = blk_rq_pos(rq2); -+ -+ /* -+ * By definition, 1KiB is 2 sectors. -+ */ -+ back_max = bfqd->bfq_back_max * 2; -+ -+ /* -+ * Strict one way elevator _except_ in the case where we allow -+ * short backward seeks which are biased as twice the cost of a -+ * similar forward seek. -+ */ -+ if (s1 >= last) -+ d1 = s1 - last; -+ else if (s1 + back_max >= last) -+ d1 = (last - s1) * bfqd->bfq_back_penalty; -+ else -+ wrap |= BFQ_RQ1_WRAP; -+ -+ if (s2 >= last) -+ d2 = s2 - last; -+ else if (s2 + back_max >= last) -+ d2 = (last - s2) * bfqd->bfq_back_penalty; -+ else -+ wrap |= BFQ_RQ2_WRAP; -+ -+ /* Found required data */ -+ -+ /* -+ * By doing switch() on the bit mask "wrap" we avoid having to -+ * check two variables for all permutations: --> faster! -+ */ -+ switch (wrap) { -+ case 0: /* common case for CFQ: rq1 and rq2 not wrapped */ -+ if (d1 < d2) -+ return rq1; -+ else if (d2 < d1) -+ return rq2; -+ -+ if (s1 >= s2) -+ return rq1; -+ else -+ return rq2; -+ -+ case BFQ_RQ2_WRAP: -+ return rq1; -+ case BFQ_RQ1_WRAP: -+ return rq2; -+ case (BFQ_RQ1_WRAP|BFQ_RQ2_WRAP): /* both rqs wrapped */ -+ default: -+ /* -+ * Since both rqs are wrapped, -+ * start with the one that's further behind head -+ * (--> only *one* back seek required), -+ * since back seek takes more time than forward. -+ */ -+ if (s1 <= s2) -+ return rq1; -+ else -+ return rq2; -+ } -+} -+ -+static struct bfq_queue * -+bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root, -+ sector_t sector, struct rb_node **ret_parent, -+ struct rb_node ***rb_link) -+{ -+ struct rb_node **p, *parent; -+ struct bfq_queue *bfqq = NULL; -+ -+ parent = NULL; -+ p = &root->rb_node; -+ while (*p) { -+ struct rb_node **n; -+ -+ parent = *p; -+ bfqq = rb_entry(parent, struct bfq_queue, pos_node); -+ -+ /* -+ * Sort strictly based on sector. Smallest to the left, -+ * largest to the right. -+ */ -+ if (sector > blk_rq_pos(bfqq->next_rq)) -+ n = &(*p)->rb_right; -+ else if (sector < blk_rq_pos(bfqq->next_rq)) -+ n = &(*p)->rb_left; -+ else -+ break; -+ p = n; -+ bfqq = NULL; -+ } -+ -+ *ret_parent = parent; -+ if (rb_link) -+ *rb_link = p; -+ -+ bfq_log(bfqd, "%llu: returning %d", -+ (unsigned long long) sector, -+ bfqq ? bfqq->pid : 0); -+ -+ return bfqq; -+} -+ -+static bool bfq_too_late_for_merging(struct bfq_queue *bfqq) -+{ -+ return bfqq->service_from_backlogged > 0 && -+ time_is_before_jiffies(bfqq->first_IO_time + -+ bfq_merge_time_limit); -+} -+ -+static void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq) -+{ -+ struct rb_node **p, *parent; -+ struct bfq_queue *__bfqq; -+ -+ if (bfqq->pos_root) { -+ rb_erase(&bfqq->pos_node, bfqq->pos_root); -+ bfqq->pos_root = NULL; -+ } -+ -+ /* -+ * bfqq cannot be merged any longer (see comments in -+ * bfq_setup_cooperator): no point in adding bfqq into the -+ * position tree. -+ */ -+ if (bfq_too_late_for_merging(bfqq)) -+ return; -+ -+ if (bfq_class_idle(bfqq)) -+ return; -+ if (!bfqq->next_rq) -+ return; -+ -+ bfqq->pos_root = &bfq_bfqq_to_bfqg(bfqq)->rq_pos_tree; -+ __bfqq = bfq_rq_pos_tree_lookup(bfqd, bfqq->pos_root, -+ blk_rq_pos(bfqq->next_rq), &parent, &p); -+ if (!__bfqq) { -+ rb_link_node(&bfqq->pos_node, parent, p); -+ rb_insert_color(&bfqq->pos_node, bfqq->pos_root); -+ } else -+ bfqq->pos_root = NULL; -+} -+ -+/* -+ * The following function returns true if every queue must receive the -+ * same share of the throughput (this condition is used when deciding -+ * whether idling may be disabled, see the comments in the function -+ * bfq_better_to_idle()). -+ * -+ * Such a scenario occurs when: -+ * 1) all active queues have the same weight, -+ * 2) all active queues belong to the same I/O-priority class, -+ * 3) all active groups at the same level in the groups tree have the same -+ * weight, -+ * 4) all active groups at the same level in the groups tree have the same -+ * number of children. -+ * -+ * Unfortunately, keeping the necessary state for evaluating exactly -+ * the last two symmetry sub-conditions above would be quite complex -+ * and time consuming. Therefore this function evaluates, instead, -+ * only the following stronger three sub-conditions, for which it is -+ * much easier to maintain the needed state: -+ * 1) all active queues have the same weight, -+ * 2) all active queues belong to the same I/O-priority class, -+ * 3) there are no active groups. -+ * In particular, the last condition is always true if hierarchical -+ * support or the cgroups interface are not enabled, thus no state -+ * needs to be maintained in this case. -+ */ -+static bool bfq_symmetric_scenario(struct bfq_data *bfqd) -+{ -+ /* -+ * For queue weights to differ, queue_weights_tree must contain -+ * at least two nodes. -+ */ -+ bool varied_queue_weights = !RB_EMPTY_ROOT(&bfqd->queue_weights_tree) && -+ (bfqd->queue_weights_tree.rb_node->rb_left || -+ bfqd->queue_weights_tree.rb_node->rb_right); -+ -+ bool multiple_classes_busy = -+ (bfqd->busy_queues[0] && bfqd->busy_queues[1]) || -+ (bfqd->busy_queues[0] && bfqd->busy_queues[2]) || -+ (bfqd->busy_queues[1] && bfqd->busy_queues[2]); -+ -+ bfq_log(bfqd, "varied_queue_weights %d mul_classes %d", -+ varied_queue_weights, multiple_classes_busy); -+ -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ bfq_log(bfqd, "num_groups_with_pending_reqs %u", -+ bfqd->num_groups_with_pending_reqs); -+#endif -+ -+ return !(varied_queue_weights || multiple_classes_busy -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ || bfqd->num_groups_with_pending_reqs > 0 -+#endif -+ ); -+} -+ -+/* -+ * If the weight-counter tree passed as input contains no counter for -+ * the weight of the input queue, then add that counter; otherwise just -+ * increment the existing counter. -+ * -+ * Note that weight-counter trees contain few nodes in mostly symmetric -+ * scenarios. For example, if all queues have the same weight, then the -+ * weight-counter tree for the queues may contain at most one node. -+ * This holds even if low_latency is on, because weight-raised queues -+ * are not inserted in the tree. -+ * In most scenarios, the rate at which nodes are created/destroyed -+ * should be low too. -+ */ -+static void bfq_weights_tree_add(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq, -+ struct rb_root *root) -+{ -+ struct bfq_entity *entity = &bfqq->entity; -+ struct rb_node **new = &(root->rb_node), *parent = NULL; -+ -+ /* -+ * Do not insert if the queue is already associated with a -+ * counter, which happens if: -+ * 1) a request arrival has caused the queue to become both -+ * non-weight-raised, and hence change its weight, and -+ * backlogged; in this respect, each of the two events -+ * causes an invocation of this function, -+ * 2) this is the invocation of this function caused by the -+ * second event. This second invocation is actually useless, -+ * and we handle this fact by exiting immediately. More -+ * efficient or clearer solutions might possibly be adopted. -+ */ -+ if (bfqq->weight_counter) -+ return; -+ -+ while (*new) { -+ struct bfq_weight_counter *__counter = container_of(*new, -+ struct bfq_weight_counter, -+ weights_node); -+ parent = *new; -+ -+ if (entity->weight == __counter->weight) { -+ bfqq->weight_counter = __counter; -+ goto inc_counter; -+ } -+ if (entity->weight < __counter->weight) -+ new = &((*new)->rb_left); -+ else -+ new = &((*new)->rb_right); -+ } -+ -+ bfqq->weight_counter = kzalloc(sizeof(struct bfq_weight_counter), -+ GFP_ATOMIC); -+ -+ /* -+ * In the unlucky event of an allocation failure, we just -+ * exit. This will cause the weight of queue to not be -+ * considered in bfq_symmetric_scenario, which, in its turn, -+ * causes the scenario to be deemed wrongly symmetric in case -+ * bfqq's weight would have been the only weight making the -+ * scenario asymmetric. On the bright side, no unbalance will -+ * however occur when bfqq becomes inactive again (the -+ * invocation of this function is triggered by an activation -+ * of queue). In fact, bfq_weights_tree_remove does nothing -+ * if !bfqq->weight_counter. -+ */ -+ if (unlikely(!bfqq->weight_counter)) -+ return; -+ -+ bfqq->weight_counter->weight = entity->weight; -+ rb_link_node(&bfqq->weight_counter->weights_node, parent, new); -+ rb_insert_color(&bfqq->weight_counter->weights_node, root); -+ -+inc_counter: -+ bfqq->weight_counter->num_active++; -+ bfqq->ref++; -+ -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "refs %d weight %d symmetric %d", -+ bfqq->ref, -+ entity->weight, -+ bfq_symmetric_scenario(bfqd)); -+} -+ -+/* -+ * Decrement the weight counter associated with the queue, and, if the -+ * counter reaches 0, remove the counter from the tree. -+ * See the comments to the function bfq_weights_tree_add() for considerations -+ * about overhead. -+ */ -+static void __bfq_weights_tree_remove(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq, -+ struct rb_root *root) -+{ -+ struct bfq_entity *entity = &bfqq->entity; -+ -+ if (!bfqq->weight_counter) -+ return; -+ -+ BUG_ON(RB_EMPTY_ROOT(root)); -+ BUG_ON(bfqq->weight_counter->weight != entity->weight); -+ -+ BUG_ON(!bfqq->weight_counter->num_active); -+ bfqq->weight_counter->num_active--; -+ -+ if (bfqq->weight_counter->num_active > 0) -+ goto reset_entity_pointer; -+ -+ rb_erase(&bfqq->weight_counter->weights_node, root); -+ kfree(bfqq->weight_counter); -+ -+reset_entity_pointer: -+ bfqq->weight_counter = NULL; -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "refs %d weight %d symmetric %d", -+ bfqq->ref, -+ entity->weight, -+ bfq_symmetric_scenario(bfqd)); -+ bfq_put_queue(bfqq); -+} -+ -+/* -+ * Invoke __bfq_weights_tree_remove on bfqq and decrement the number -+ * of active groups for each queue's inactive parent entity. -+ */ -+static void bfq_weights_tree_remove(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq) -+{ -+ struct bfq_entity *entity = bfqq->entity.parent; -+ -+ for_each_entity(entity) { -+ struct bfq_sched_data *sd = entity->my_sched_data; -+ -+ BUG_ON(entity->sched_data == NULL); /* -+ * It would mean -+ * that this is -+ * the root group. -+ */ -+ -+ if (sd->next_in_service || sd->in_service_entity) { -+ BUG_ON(!entity->in_groups_with_pending_reqs); -+ /* -+ * entity is still active, because either -+ * next_in_service or in_service_entity is not -+ * NULL (see the comments on the definition of -+ * next_in_service for details on why -+ * in_service_entity must be checked too). -+ * -+ * As a consequence, its parent entities are -+ * active as well, and thus this loop must -+ * stop here. -+ */ -+ break; -+ } -+ -+ BUG_ON(!bfqd->num_groups_with_pending_reqs && -+ entity->in_groups_with_pending_reqs); -+ /* -+ * The decrement of num_groups_with_pending_reqs is -+ * not performed immediately upon the deactivation of -+ * entity, but it is delayed to when it also happens -+ * that the first leaf descendant bfqq of entity gets -+ * all its pending requests completed. The following -+ * instructions perform this delayed decrement, if -+ * needed. See the comments on -+ * num_groups_with_pending_reqs for details. -+ */ -+ if (entity->in_groups_with_pending_reqs) { -+ entity->in_groups_with_pending_reqs = false; -+ bfqd->num_groups_with_pending_reqs--; -+ } -+ bfq_log_bfqq(bfqd, bfqq, "num_groups_with_pending_reqs %u", -+ bfqd->num_groups_with_pending_reqs); -+ } -+ -+ /* -+ * Next function is invoked last, because it causes bfqq to be -+ * freed if the following holds: bfqq is not in service and -+ * has no dispatched request. DO NOT use bfqq after the next -+ * function invocation. -+ */ -+ __bfq_weights_tree_remove(bfqd, bfqq, -+ &bfqd->queue_weights_tree); -+} -+ -+/* -+ * Return expired entry, or NULL to just start from scratch in rbtree. -+ */ -+static struct request *bfq_check_fifo(struct bfq_queue *bfqq, -+ struct request *last) -+{ -+ struct request *rq; -+ -+ if (bfq_bfqq_fifo_expire(bfqq)) -+ return NULL; -+ -+ bfq_mark_bfqq_fifo_expire(bfqq); -+ -+ rq = rq_entry_fifo(bfqq->fifo.next); -+ -+ if (rq == last || ktime_get_ns() < rq->fifo_time) -+ return NULL; -+ -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "returned %p", rq); -+ BUG_ON(RB_EMPTY_NODE(&rq->rb_node)); -+ return rq; -+} -+ -+static struct request *bfq_find_next_rq(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq, -+ struct request *last) -+{ -+ struct rb_node *rbnext = rb_next(&last->rb_node); -+ struct rb_node *rbprev = rb_prev(&last->rb_node); -+ struct request *next, *prev = NULL; -+ -+ BUG_ON(list_empty(&bfqq->fifo)); -+ -+ /* Follow expired path, else get first next available. */ -+ next = bfq_check_fifo(bfqq, last); -+ if (next) { -+ BUG_ON(next == last); -+ return next; -+ } -+ -+ BUG_ON(RB_EMPTY_NODE(&last->rb_node)); -+ -+ if (rbprev) -+ prev = rb_entry_rq(rbprev); -+ -+ if (rbnext) -+ next = rb_entry_rq(rbnext); -+ else { -+ rbnext = rb_first(&bfqq->sort_list); -+ if (rbnext && rbnext != &last->rb_node) -+ next = rb_entry_rq(rbnext); -+ } -+ -+ return bfq_choose_req(bfqd, next, prev, blk_rq_pos(last)); -+} -+ -+/* see the definition of bfq_async_charge_factor for details */ -+static unsigned long bfq_serv_to_charge(struct request *rq, -+ struct bfq_queue *bfqq) -+{ -+ if (bfq_bfqq_sync(bfqq) || bfqq->wr_coeff > 1 || -+ !bfq_symmetric_scenario(bfqq->bfqd)) -+ return blk_rq_sectors(rq); -+ -+ return blk_rq_sectors(rq) * bfq_async_charge_factor; -+} -+ -+/** -+ * bfq_updated_next_req - update the queue after a new next_rq selection. -+ * @bfqd: the device data the queue belongs to. -+ * @bfqq: the queue to update. -+ * -+ * If the first request of a queue changes we make sure that the queue -+ * has enough budget to serve at least its first request (if the -+ * request has grown). We do this because if the queue has not enough -+ * budget for its first request, it has to go through two dispatch -+ * rounds to actually get it dispatched. -+ */ -+static void bfq_updated_next_req(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq) -+{ -+ struct bfq_entity *entity = &bfqq->entity; -+ struct bfq_service_tree *st = bfq_entity_service_tree(entity); -+ struct request *next_rq = bfqq->next_rq; -+ unsigned long new_budget; -+ -+ if (!next_rq) -+ return; -+ -+ if (bfqq == bfqd->in_service_queue) -+ /* -+ * In order not to break guarantees, budgets cannot be -+ * changed after an entity has been selected. -+ */ -+ return; -+ -+ BUG_ON(entity->tree != &st->active); -+ BUG_ON(entity == entity->sched_data->in_service_entity); -+ -+ new_budget = max_t(unsigned long, -+ max_t(unsigned long, bfqq->max_budget, -+ bfq_serv_to_charge(next_rq, bfqq)), -+ entity->service); -+ if (entity->budget != new_budget) { -+ entity->budget = new_budget; -+ bfq_log_bfqq(bfqd, bfqq, "new budget %lu", -+ new_budget); -+ bfq_requeue_bfqq(bfqd, bfqq, false); -+ } -+} -+ -+static unsigned int bfq_wr_duration(struct bfq_data *bfqd) -+{ -+ u64 dur; -+ -+ if (bfqd->bfq_wr_max_time > 0) -+ return bfqd->bfq_wr_max_time; -+ -+ dur = bfqd->rate_dur_prod; -+ do_div(dur, bfqd->peak_rate); -+ -+ /* -+ * Limit duration between 3 and 25 seconds. The upper limit -+ * has been conservatively set after the following worst case: -+ * on a QEMU/KVM virtual machine -+ * - running in a slow PC -+ * - with a virtual disk stacked on a slow low-end 5400rpm HDD -+ * - serving a heavy I/O workload, such as the sequential reading -+ * of several files -+ * mplayer took 23 seconds to start, if constantly weight-raised. -+ * -+ * As for higher values than that accomodating the above bad -+ * scenario, tests show that higher values would often yield -+ * the opposite of the desired result, i.e., would worsen -+ * responsiveness by allowing non-interactive applications to -+ * preserve weight raising for too long. -+ * -+ * On the other end, lower values than 3 seconds make it -+ * difficult for most interactive tasks to complete their jobs -+ * before weight-raising finishes. -+ */ -+ return clamp_val(dur, msecs_to_jiffies(3000), msecs_to_jiffies(25000)); -+} -+ -+/* switch back from soft real-time to interactive weight raising */ -+static void switch_back_to_interactive_wr(struct bfq_queue *bfqq, -+ struct bfq_data *bfqd) -+{ -+ bfqq->wr_coeff = bfqd->bfq_wr_coeff; -+ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); -+ bfqq->last_wr_start_finish = bfqq->wr_start_at_switch_to_srt; -+} -+ -+static void -+bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd, -+ struct bfq_io_cq *bic, bool bfq_already_existing) -+{ -+ unsigned int old_wr_coeff; -+ bool busy = bfq_already_existing && bfq_bfqq_busy(bfqq); -+ -+ if (bic->saved_has_short_ttime) -+ bfq_mark_bfqq_has_short_ttime(bfqq); -+ else -+ bfq_clear_bfqq_has_short_ttime(bfqq); -+ -+ if (bic->saved_IO_bound) -+ bfq_mark_bfqq_IO_bound(bfqq); -+ else -+ bfq_clear_bfqq_IO_bound(bfqq); -+ -+ if (unlikely(busy)) -+ old_wr_coeff = bfqq->wr_coeff; -+ -+ bfqq->wr_coeff = bic->saved_wr_coeff; -+ bfqq->wr_start_at_switch_to_srt = bic->saved_wr_start_at_switch_to_srt; -+ BUG_ON(time_is_after_jiffies(bfqq->wr_start_at_switch_to_srt)); -+ bfqq->last_wr_start_finish = bic->saved_last_wr_start_finish; -+ bfqq->wr_cur_max_time = bic->saved_wr_cur_max_time; -+ BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish)); -+ -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "bic %p wr_coeff %d start_finish %lu max_time %lu", -+ bic, bfqq->wr_coeff, bfqq->last_wr_start_finish, -+ bfqq->wr_cur_max_time); -+ -+ if (bfqq->wr_coeff > 1 && (bfq_bfqq_in_large_burst(bfqq) || -+ time_is_before_jiffies(bfqq->last_wr_start_finish + -+ bfqq->wr_cur_max_time))) { -+ if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time && -+ !bfq_bfqq_in_large_burst(bfqq) && -+ time_is_after_eq_jiffies(bfqq->wr_start_at_switch_to_srt + -+ bfq_wr_duration(bfqd))) { -+ switch_back_to_interactive_wr(bfqq, bfqd); -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "switching back to interactive"); -+ } else { -+ bfqq->wr_coeff = 1; -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "switching off wr (%lu + %lu < %lu)", -+ bfqq->last_wr_start_finish, bfqq->wr_cur_max_time, -+ jiffies); -+ } -+ } -+ -+ /* make sure weight will be updated, however we got here */ -+ bfqq->entity.prio_changed = 1; -+ -+ if (likely(!busy)) -+ return; -+ -+ if (old_wr_coeff == 1 && bfqq->wr_coeff > 1) { -+ bfqd->wr_busy_queues++; -+ BUG_ON(bfqd->wr_busy_queues > bfq_tot_busy_queues(bfqd)); -+ } else if (old_wr_coeff > 1 && bfqq->wr_coeff == 1) { -+ bfqd->wr_busy_queues--; -+ BUG_ON(bfqd->wr_busy_queues < 0); -+ } -+} -+ -+static int bfqq_process_refs(struct bfq_queue *bfqq) -+{ -+ int process_refs, io_refs; -+ -+ lockdep_assert_held(bfqq->bfqd->queue->queue_lock); -+ -+ io_refs = bfqq->allocated[READ] + bfqq->allocated[WRITE]; -+ process_refs = bfqq->ref - io_refs - bfqq->entity.on_st - -+ (bfqq->weight_counter != NULL); -+ BUG_ON(process_refs < 0); -+ return process_refs; -+} -+ -+/* Empty burst list and add just bfqq (see comments to bfq_handle_burst) */ -+static void bfq_reset_burst_list(struct bfq_data *bfqd, struct bfq_queue *bfqq) -+{ -+ struct bfq_queue *item; -+ struct hlist_node *n; -+ -+ hlist_for_each_entry_safe(item, n, &bfqd->burst_list, burst_list_node) -+ hlist_del_init(&item->burst_list_node); -+ hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list); -+ bfqd->burst_size = 1; -+ bfqd->burst_parent_entity = bfqq->entity.parent; -+} -+ -+/* Add bfqq to the list of queues in current burst (see bfq_handle_burst) */ -+static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq) -+{ -+ /* Increment burst size to take into account also bfqq */ -+ bfqd->burst_size++; -+ -+ bfq_log_bfqq(bfqd, bfqq, "%d", bfqd->burst_size); -+ -+ BUG_ON(bfqd->burst_size > bfqd->bfq_large_burst_thresh); -+ -+ if (bfqd->burst_size == bfqd->bfq_large_burst_thresh) { -+ struct bfq_queue *pos, *bfqq_item; -+ struct hlist_node *n; -+ -+ /* -+ * Enough queues have been activated shortly after each -+ * other to consider this burst as large. -+ */ -+ bfqd->large_burst = true; -+ bfq_log_bfqq(bfqd, bfqq, "large burst started"); -+ -+ /* -+ * We can now mark all queues in the burst list as -+ * belonging to a large burst. -+ */ -+ hlist_for_each_entry(bfqq_item, &bfqd->burst_list, -+ burst_list_node) { -+ bfq_mark_bfqq_in_large_burst(bfqq_item); -+ bfq_log_bfqq(bfqd, bfqq_item, "marked in large burst"); -+ } -+ bfq_mark_bfqq_in_large_burst(bfqq); -+ bfq_log_bfqq(bfqd, bfqq, "marked in large burst"); -+ -+ /* -+ * From now on, and until the current burst finishes, any -+ * new queue being activated shortly after the last queue -+ * was inserted in the burst can be immediately marked as -+ * belonging to a large burst. So the burst list is not -+ * needed any more. Remove it. -+ */ -+ hlist_for_each_entry_safe(pos, n, &bfqd->burst_list, -+ burst_list_node) -+ hlist_del_init(&pos->burst_list_node); -+ } else /* -+ * Burst not yet large: add bfqq to the burst list. Do -+ * not increment the ref counter for bfqq, because bfqq -+ * is removed from the burst list before freeing bfqq -+ * in put_queue. -+ */ -+ hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list); -+} -+ -+/* -+ * If many queues belonging to the same group happen to be created -+ * shortly after each other, then the processes associated with these -+ * queues have typically a common goal. In particular, bursts of queue -+ * creations are usually caused by services or applications that spawn -+ * many parallel threads/processes. Examples are systemd during boot, -+ * or git grep. To help these processes get their job done as soon as -+ * possible, it is usually better to not grant either weight-raising -+ * or device idling to their queues. -+ * -+ * In this comment we describe, firstly, the reasons why this fact -+ * holds, and, secondly, the next function, which implements the main -+ * steps needed to properly mark these queues so that they can then be -+ * treated in a different way. -+ * -+ * The above services or applications benefit mostly from a high -+ * throughput: the quicker the requests of the activated queues are -+ * cumulatively served, the sooner the target job of these queues gets -+ * completed. As a consequence, weight-raising any of these queues, -+ * which also implies idling the device for it, is almost always -+ * counterproductive. In most cases it just lowers throughput. -+ * -+ * On the other hand, a burst of queue creations may be caused also by -+ * the start of an application that does not consist of a lot of -+ * parallel I/O-bound threads. In fact, with a complex application, -+ * several short processes may need to be executed to start-up the -+ * application. In this respect, to start an application as quickly as -+ * possible, the best thing to do is in any case to privilege the I/O -+ * related to the application with respect to all other -+ * I/O. Therefore, the best strategy to start as quickly as possible -+ * an application that causes a burst of queue creations is to -+ * weight-raise all the queues created during the burst. This is the -+ * exact opposite of the best strategy for the other type of bursts. -+ * -+ * In the end, to take the best action for each of the two cases, the -+ * two types of bursts need to be distinguished. Fortunately, this -+ * seems relatively easy, by looking at the sizes of the bursts. In -+ * particular, we found a threshold such that only bursts with a -+ * larger size than that threshold are apparently caused by -+ * services or commands such as systemd or git grep. For brevity, -+ * hereafter we call just 'large' these bursts. BFQ *does not* -+ * weight-raise queues whose creation occurs in a large burst. In -+ * addition, for each of these queues BFQ performs or does not perform -+ * idling depending on which choice boosts the throughput more. The -+ * exact choice depends on the device and request pattern at -+ * hand. -+ * -+ * Unfortunately, false positives may occur while an interactive task -+ * is starting (e.g., an application is being started). The -+ * consequence is that the queues associated with the task do not -+ * enjoy weight raising as expected. Fortunately these false positives -+ * are very rare. They typically occur if some service happens to -+ * start doing I/O exactly when the interactive task starts. -+ * -+ * Turning back to the next function, it implements all the steps -+ * needed to detect the occurrence of a large burst and to properly -+ * mark all the queues belonging to it (so that they can then be -+ * treated in a different way). This goal is achieved by maintaining a -+ * "burst list" that holds, temporarily, the queues that belong to the -+ * burst in progress. The list is then used to mark these queues as -+ * belonging to a large burst if the burst does become large. The main -+ * steps are the following. -+ * -+ * . when the very first queue is created, the queue is inserted into the -+ * list (as it could be the first queue in a possible burst) -+ * -+ * . if the current burst has not yet become large, and a queue Q that does -+ * not yet belong to the burst is activated shortly after the last time -+ * at which a new queue entered the burst list, then the function appends -+ * Q to the burst list -+ * -+ * . if, as a consequence of the previous step, the burst size reaches -+ * the large-burst threshold, then -+ * -+ * . all the queues in the burst list are marked as belonging to a -+ * large burst -+ * -+ * . the burst list is deleted; in fact, the burst list already served -+ * its purpose (keeping temporarily track of the queues in a burst, -+ * so as to be able to mark them as belonging to a large burst in the -+ * previous sub-step), and now is not needed any more -+ * -+ * . the device enters a large-burst mode -+ * -+ * . if a queue Q that does not belong to the burst is created while -+ * the device is in large-burst mode and shortly after the last time -+ * at which a queue either entered the burst list or was marked as -+ * belonging to the current large burst, then Q is immediately marked -+ * as belonging to a large burst. -+ * -+ * . if a queue Q that does not belong to the burst is created a while -+ * later, i.e., not shortly after, than the last time at which a queue -+ * either entered the burst list or was marked as belonging to the -+ * current large burst, then the current burst is deemed as finished and: -+ * -+ * . the large-burst mode is reset if set -+ * -+ * . the burst list is emptied -+ * -+ * . Q is inserted in the burst list, as Q may be the first queue -+ * in a possible new burst (then the burst list contains just Q -+ * after this step). -+ */ -+static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq) -+{ -+ /* -+ * If bfqq is already in the burst list or is part of a large -+ * burst, or finally has just been split, then there is -+ * nothing else to do. -+ */ -+ if (!hlist_unhashed(&bfqq->burst_list_node) || -+ bfq_bfqq_in_large_burst(bfqq) || -+ time_is_after_eq_jiffies(bfqq->split_time + -+ msecs_to_jiffies(10))) -+ return; -+ -+ /* -+ * If bfqq's creation happens late enough, or bfqq belongs to -+ * a different group than the burst group, then the current -+ * burst is finished, and related data structures must be -+ * reset. -+ * -+ * In this respect, consider the special case where bfqq is -+ * the very first queue created after BFQ is selected for this -+ * device. In this case, last_ins_in_burst and -+ * burst_parent_entity are not yet significant when we get -+ * here. But it is easy to verify that, whether or not the -+ * following condition is true, bfqq will end up being -+ * inserted into the burst list. In particular the list will -+ * happen to contain only bfqq. And this is exactly what has -+ * to happen, as bfqq may be the first queue of the first -+ * burst. -+ */ -+ if (time_is_before_jiffies(bfqd->last_ins_in_burst + -+ bfqd->bfq_burst_interval) || -+ bfqq->entity.parent != bfqd->burst_parent_entity) { -+ bfqd->large_burst = false; -+ bfq_reset_burst_list(bfqd, bfqq); -+ bfq_log_bfqq(bfqd, bfqq, -+ "late activation or different group"); -+ goto end; -+ } -+ -+ /* -+ * If we get here, then bfqq is being activated shortly after the -+ * last queue. So, if the current burst is also large, we can mark -+ * bfqq as belonging to this large burst immediately. -+ */ -+ if (bfqd->large_burst) { -+ bfq_log_bfqq(bfqd, bfqq, "marked in burst"); -+ bfq_mark_bfqq_in_large_burst(bfqq); -+ goto end; -+ } -+ -+ /* -+ * If we get here, then a large-burst state has not yet been -+ * reached, but bfqq is being activated shortly after the last -+ * queue. Then we add bfqq to the burst. -+ */ -+ bfq_add_to_burst(bfqd, bfqq); -+end: -+ /* -+ * At this point, bfqq either has been added to the current -+ * burst or has caused the current burst to terminate and a -+ * possible new burst to start. In particular, in the second -+ * case, bfqq has become the first queue in the possible new -+ * burst. In both cases last_ins_in_burst needs to be moved -+ * forward. -+ */ -+ bfqd->last_ins_in_burst = jiffies; -+ -+} -+ -+static int bfq_bfqq_budget_left(struct bfq_queue *bfqq) -+{ -+ struct bfq_entity *entity = &bfqq->entity; -+ -+ if (entity->budget < entity->service) { -+ pr_crit("budget %d service %d\n", -+ entity->budget, entity->service); -+ BUG(); -+ } -+ return entity->budget - entity->service; -+} -+ -+/* -+ * If enough samples have been computed, return the current max budget -+ * stored in bfqd, which is dynamically updated according to the -+ * estimated disk peak rate; otherwise return the default max budget -+ */ -+static int bfq_max_budget(struct bfq_data *bfqd) -+{ -+ if (bfqd->budgets_assigned < bfq_stats_min_budgets) -+ return bfq_default_max_budget; -+ else -+ return bfqd->bfq_max_budget; -+} -+ -+/* -+ * Return min budget, which is a fraction of the current or default -+ * max budget (trying with 1/32) -+ */ -+static int bfq_min_budget(struct bfq_data *bfqd) -+{ -+ if (bfqd->budgets_assigned < bfq_stats_min_budgets) -+ return bfq_default_max_budget / 32; -+ else -+ return bfqd->bfq_max_budget / 32; -+} -+ -+static void bfq_bfqq_expire(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq, -+ bool compensate, -+ enum bfqq_expiration reason); -+ -+/* -+ * The next function, invoked after the input queue bfqq switches from -+ * idle to busy, updates the budget of bfqq. The function also tells -+ * whether the in-service queue should be expired, by returning -+ * true. The purpose of expiring the in-service queue is to give bfqq -+ * the chance to possibly preempt the in-service queue, and the reason -+ * for preempting the in-service queue is to achieve one of the two -+ * goals below. -+ * -+ * 1. Guarantee to bfqq its reserved bandwidth even if bfqq has -+ * expired because it has remained idle. In particular, bfqq may have -+ * expired for one of the following two reasons: -+ * -+ * - BFQ_BFQQ_NO_MORE_REQUEST bfqq did not enjoy any device idling and -+ * did not make it to issue a new request before its last request -+ * was served; -+ * -+ * - BFQ_BFQQ_TOO_IDLE bfqq did enjoy device idling, but did not issue -+ * a new request before the expiration of the idling-time. -+ * -+ * Even if bfqq has expired for one of the above reasons, the process -+ * associated with the queue may be however issuing requests greedily, -+ * and thus be sensitive to the bandwidth it receives (bfqq may have -+ * remained idle for other reasons: CPU high load, bfqq not enjoying -+ * idling, I/O throttling somewhere in the path from the process to -+ * the I/O scheduler, ...). But if, after every expiration for one of -+ * the above two reasons, bfqq has to wait for the service of at least -+ * one full budget of another queue before being served again, then -+ * bfqq is likely to get a much lower bandwidth or resource time than -+ * its reserved ones. To address this issue, two countermeasures need -+ * to be taken. -+ * -+ * First, the budget and the timestamps of bfqq need to be updated in -+ * a special way on bfqq reactivation: they need to be updated as if -+ * bfqq did not remain idle and did not expire. In fact, if they are -+ * computed as if bfqq expired and remained idle until reactivation, -+ * then the process associated with bfqq is treated as if, instead of -+ * being greedy, it stopped issuing requests when bfqq remained idle, -+ * and restarts issuing requests only on this reactivation. In other -+ * words, the scheduler does not help the process recover the "service -+ * hole" between bfqq expiration and reactivation. As a consequence, -+ * the process receives a lower bandwidth than its reserved one. In -+ * contrast, to recover this hole, the budget must be updated as if -+ * bfqq was not expired at all before this reactivation, i.e., it must -+ * be set to the value of the remaining budget when bfqq was -+ * expired. Along the same line, timestamps need to be assigned the -+ * value they had the last time bfqq was selected for service, i.e., -+ * before last expiration. Thus timestamps need to be back-shifted -+ * with respect to their normal computation (see [1] for more details -+ * on this tricky aspect). -+ * -+ * Secondly, to allow the process to recover the hole, the in-service -+ * queue must be expired too, to give bfqq the chance to preempt it -+ * immediately. In fact, if bfqq has to wait for a full budget of the -+ * in-service queue to be completed, then it may become impossible to -+ * let the process recover the hole, even if the back-shifted -+ * timestamps of bfqq are lower than those of the in-service queue. If -+ * this happens for most or all of the holes, then the process may not -+ * receive its reserved bandwidth. In this respect, it is worth noting -+ * that, being the service of outstanding requests unpreemptible, a -+ * little fraction of the holes may however be unrecoverable, thereby -+ * causing a little loss of bandwidth. -+ * -+ * The last important point is detecting whether bfqq does need this -+ * bandwidth recovery. In this respect, the next function deems the -+ * process associated with bfqq greedy, and thus allows it to recover -+ * the hole, if: 1) the process is waiting for the arrival of a new -+ * request (which implies that bfqq expired for one of the above two -+ * reasons), and 2) such a request has arrived soon. The first -+ * condition is controlled through the flag non_blocking_wait_rq, -+ * while the second through the flag arrived_in_time. If both -+ * conditions hold, then the function computes the budget in the -+ * above-described special way, and signals that the in-service queue -+ * should be expired. Timestamp back-shifting is done later in -+ * __bfq_activate_entity. -+ * -+ * 2. Reduce latency. Even if timestamps are not backshifted to let -+ * the process associated with bfqq recover a service hole, bfqq may -+ * however happen to have, after being (re)activated, a lower finish -+ * timestamp than the in-service queue. That is, the next budget of -+ * bfqq may have to be completed before the one of the in-service -+ * queue. If this is the case, then preempting the in-service queue -+ * allows this goal to be achieved, apart from the unpreemptible, -+ * outstanding requests mentioned above. -+ * -+ * Unfortunately, regardless of which of the above two goals one wants -+ * to achieve, service trees need first to be updated to know whether -+ * the in-service queue must be preempted. To have service trees -+ * correctly updated, the in-service queue must be expired and -+ * rescheduled, and bfqq must be scheduled too. This is one of the -+ * most costly operations (in future versions, the scheduling -+ * mechanism may be re-designed in such a way to make it possible to -+ * know whether preemption is needed without needing to update service -+ * trees). In addition, queue preemptions almost always cause random -+ * I/O, and thus loss of throughput. Because of these facts, the next -+ * function adopts the following simple scheme to avoid both costly -+ * operations and too frequent preemptions: it requests the expiration -+ * of the in-service queue (unconditionally) only for queues that need -+ * to recover a hole, or that either are weight-raised or deserve to -+ * be weight-raised. -+ */ -+static bool bfq_bfqq_update_budg_for_activation(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq, -+ bool arrived_in_time, -+ bool wr_or_deserves_wr) -+{ -+ struct bfq_entity *entity = &bfqq->entity; -+ -+ /* -+ * In the next compound condition, we check also whether there -+ * is some budget left, because otherwise there is no point in -+ * trying to go on serving bfqq with this same budget: bfqq -+ * would be expired immediately after being selected for -+ * service. This would only cause useless overhead. -+ */ -+ if (bfq_bfqq_non_blocking_wait_rq(bfqq) && arrived_in_time && -+ bfq_bfqq_budget_left(bfqq) > 0) { -+ /* -+ * We do not clear the flag non_blocking_wait_rq here, as -+ * the latter is used in bfq_activate_bfqq to signal -+ * that timestamps need to be back-shifted (and is -+ * cleared right after). -+ */ -+ -+ /* -+ * In next assignment we rely on that either -+ * entity->service or entity->budget are not updated -+ * on expiration if bfqq is empty (see -+ * __bfq_bfqq_recalc_budget). Thus both quantities -+ * remain unchanged after such an expiration, and the -+ * following statement therefore assigns to -+ * entity->budget the remaining budget on such an -+ * expiration. -+ */ -+ BUG_ON(bfqq->max_budget < 0); -+ entity->budget = min_t(unsigned long, -+ bfq_bfqq_budget_left(bfqq), -+ bfqq->max_budget); -+ -+ BUG_ON(entity->budget < 0); -+ -+ /* -+ * At this point, we have used entity->service to get -+ * the budget left (needed for updating -+ * entity->budget). Thus we finally can, and have to, -+ * reset entity->service. The latter must be reset -+ * because bfqq would otherwise be charged again for -+ * the service it has received during its previous -+ * service slot(s). -+ */ -+ entity->service = 0; -+ -+ return true; -+ } -+ -+ /* -+ * We can finally complete expiration, by setting service to 0. -+ */ -+ entity->service = 0; -+ BUG_ON(bfqq->max_budget < 0); -+ entity->budget = max_t(unsigned long, bfqq->max_budget, -+ bfq_serv_to_charge(bfqq->next_rq, bfqq)); -+ BUG_ON(entity->budget < 0); -+ -+ bfq_clear_bfqq_non_blocking_wait_rq(bfqq); -+ return wr_or_deserves_wr; -+} -+ -+/* -+ * Return the farthest past time instant according to jiffies -+ * macros. -+ */ -+static unsigned long bfq_smallest_from_now(void) -+{ -+ return jiffies - MAX_JIFFY_OFFSET; -+} -+ -+static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq, -+ unsigned int old_wr_coeff, -+ bool wr_or_deserves_wr, -+ bool interactive, -+ bool in_burst, -+ bool soft_rt) -+{ -+ if (old_wr_coeff == 1 && wr_or_deserves_wr) { -+ /* start a weight-raising period */ -+ if (interactive) { -+ bfqq->service_from_wr = 0; -+ bfqq->wr_coeff = bfqd->bfq_wr_coeff; -+ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); -+ } else { -+ /* -+ * No interactive weight raising in progress -+ * here: assign minus infinity to -+ * wr_start_at_switch_to_srt, to make sure -+ * that, at the end of the soft-real-time -+ * weight raising periods that is starting -+ * now, no interactive weight-raising period -+ * may be wrongly considered as still in -+ * progress (and thus actually started by -+ * mistake). -+ */ -+ bfqq->wr_start_at_switch_to_srt = -+ bfq_smallest_from_now(); -+ bfqq->wr_coeff = bfqd->bfq_wr_coeff * -+ BFQ_SOFTRT_WEIGHT_FACTOR; -+ bfqq->wr_cur_max_time = -+ bfqd->bfq_wr_rt_max_time; -+ } -+ /* -+ * If needed, further reduce budget to make sure it is -+ * close to bfqq's backlog, so as to reduce the -+ * scheduling-error component due to a too large -+ * budget. Do not care about throughput consequences, -+ * but only about latency. Finally, do not assign a -+ * too small budget either, to avoid increasing -+ * latency by causing too frequent expirations. -+ */ -+ bfqq->entity.budget = min_t(unsigned long, -+ bfqq->entity.budget, -+ 2 * bfq_min_budget(bfqd)); -+ -+ bfq_log_bfqq(bfqd, bfqq, -+ "wrais starting at %lu, rais_max_time %u", -+ jiffies, -+ jiffies_to_msecs(bfqq->wr_cur_max_time)); -+ } else if (old_wr_coeff > 1) { -+ if (interactive) { /* update wr coeff and duration */ -+ bfqq->wr_coeff = bfqd->bfq_wr_coeff; -+ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); -+ } else if (in_burst) { -+ bfqq->wr_coeff = 1; -+ bfq_log_bfqq(bfqd, bfqq, -+ "wrais ending at %lu, rais_max_time %u", -+ jiffies, -+ jiffies_to_msecs(bfqq-> -+ wr_cur_max_time)); -+ } else if (soft_rt) { -+ /* -+ * The application is now or still meeting the -+ * requirements for being deemed soft rt. We -+ * can then correctly and safely (re)charge -+ * the weight-raising duration for the -+ * application with the weight-raising -+ * duration for soft rt applications. -+ * -+ * In particular, doing this recharge now, i.e., -+ * before the weight-raising period for the -+ * application finishes, reduces the probability -+ * of the following negative scenario: -+ * 1) the weight of a soft rt application is -+ * raised at startup (as for any newly -+ * created application), -+ * 2) since the application is not interactive, -+ * at a certain time weight-raising is -+ * stopped for the application, -+ * 3) at that time the application happens to -+ * still have pending requests, and hence -+ * is destined to not have a chance to be -+ * deemed soft rt before these requests are -+ * completed (see the comments to the -+ * function bfq_bfqq_softrt_next_start() -+ * for details on soft rt detection), -+ * 4) these pending requests experience a high -+ * latency because the application is not -+ * weight-raised while they are pending. -+ */ -+ if (bfqq->wr_cur_max_time != -+ bfqd->bfq_wr_rt_max_time) { -+ bfqq->wr_start_at_switch_to_srt = -+ bfqq->last_wr_start_finish; -+ BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish)); -+ -+ bfqq->wr_cur_max_time = -+ bfqd->bfq_wr_rt_max_time; -+ bfqq->wr_coeff = bfqd->bfq_wr_coeff * -+ BFQ_SOFTRT_WEIGHT_FACTOR; -+ bfq_log_bfqq(bfqd, bfqq, -+ "switching to soft_rt wr"); -+ } else -+ bfq_log_bfqq(bfqd, bfqq, -+ "moving forward soft_rt wr duration"); -+ bfqq->last_wr_start_finish = jiffies; -+ } -+ } -+} -+ -+static bool bfq_bfqq_idle_for_long_time(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq) -+{ -+ return bfqq->dispatched == 0 && -+ time_is_before_jiffies( -+ bfqq->budget_timeout + -+ bfqd->bfq_wr_min_idle_time); -+} -+ -+static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq, -+ int old_wr_coeff, -+ struct request *rq, -+ bool *interactive) -+{ -+ bool soft_rt, in_burst, wr_or_deserves_wr, -+ bfqq_wants_to_preempt, -+ idle_for_long_time = bfq_bfqq_idle_for_long_time(bfqd, bfqq), -+ /* -+ * See the comments on -+ * bfq_bfqq_update_budg_for_activation for -+ * details on the usage of the next variable. -+ */ -+ arrived_in_time = ktime_get_ns() <= -+ RQ_BIC(rq)->ttime.last_end_request + -+ bfqd->bfq_slice_idle * 3; -+ -+ bfq_log_bfqq(bfqd, bfqq, -+ "bfq_add_request non-busy: " -+ "jiffies %lu, in_time %d, idle_long %d busyw %d " -+ "wr_coeff %u", -+ jiffies, arrived_in_time, -+ idle_for_long_time, -+ bfq_bfqq_non_blocking_wait_rq(bfqq), -+ old_wr_coeff); -+ -+ BUG_ON(bfqq->entity.budget < bfqq->entity.service); -+ -+ BUG_ON(bfqq == bfqd->in_service_queue); -+ bfqg_stats_update_io_add(bfqq_group(RQ_BFQQ(rq)), bfqq, rq->cmd_flags); -+ -+ /* -+ * bfqq deserves to be weight-raised if: -+ * - it is sync, -+ * - it does not belong to a large burst, -+ * - it has been idle for enough time or is soft real-time, -+ * - is linked to a bfq_io_cq (it is not shared in any sense) -+ */ -+ in_burst = bfq_bfqq_in_large_burst(bfqq); -+ soft_rt = bfqd->bfq_wr_max_softrt_rate > 0 && -+ !in_burst && -+ time_is_before_jiffies(bfqq->soft_rt_next_start) && -+ bfqq->dispatched == 0; -+ *interactive = -+ !in_burst && -+ idle_for_long_time; -+ wr_or_deserves_wr = bfqd->low_latency && -+ (bfqq->wr_coeff > 1 || -+ (bfq_bfqq_sync(bfqq) && -+ bfqq->bic && (*interactive || soft_rt))); -+ -+ bfq_log_bfqq(bfqd, bfqq, -+ "bfq_add_request: " -+ "in_burst %d, " -+ "soft_rt %d (next %lu), inter %d, bic %p", -+ bfq_bfqq_in_large_burst(bfqq), soft_rt, -+ bfqq->soft_rt_next_start, -+ *interactive, -+ bfqq->bic); -+ -+ /* -+ * Using the last flag, update budget and check whether bfqq -+ * may want to preempt the in-service queue. -+ */ -+ bfqq_wants_to_preempt = -+ bfq_bfqq_update_budg_for_activation(bfqd, bfqq, -+ arrived_in_time, -+ wr_or_deserves_wr); -+ -+ /* -+ * If bfqq happened to be activated in a burst, but has been -+ * idle for much more than an interactive queue, then we -+ * assume that, in the overall I/O initiated in the burst, the -+ * I/O associated with bfqq is finished. So bfqq does not need -+ * to be treated as a queue belonging to a burst -+ * anymore. Accordingly, we reset bfqq's in_large_burst flag -+ * if set, and remove bfqq from the burst list if it's -+ * there. We do not decrement burst_size, because the fact -+ * that bfqq does not need to belong to the burst list any -+ * more does not invalidate the fact that bfqq was created in -+ * a burst. -+ */ -+ if (likely(!bfq_bfqq_just_created(bfqq)) && -+ idle_for_long_time && -+ time_is_before_jiffies( -+ bfqq->budget_timeout + -+ msecs_to_jiffies(10000))) { -+ hlist_del_init(&bfqq->burst_list_node); -+ bfq_clear_bfqq_in_large_burst(bfqq); -+ } -+ -+ bfq_clear_bfqq_just_created(bfqq); -+ -+ if (!bfq_bfqq_IO_bound(bfqq)) { -+ if (arrived_in_time) { -+ bfqq->requests_within_timer++; -+ if (bfqq->requests_within_timer >= -+ bfqd->bfq_requests_within_timer) -+ bfq_mark_bfqq_IO_bound(bfqq); -+ } else -+ bfqq->requests_within_timer = 0; -+ bfq_log_bfqq(bfqd, bfqq, "requests in time %d", -+ bfqq->requests_within_timer); -+ } -+ -+ if (bfqd->low_latency) { -+ if (unlikely(time_is_after_jiffies(bfqq->split_time))) -+ /* wraparound */ -+ bfqq->split_time = -+ jiffies - bfqd->bfq_wr_min_idle_time - 1; -+ -+ if (time_is_before_jiffies(bfqq->split_time + -+ bfqd->bfq_wr_min_idle_time)) { -+ bfq_update_bfqq_wr_on_rq_arrival(bfqd, bfqq, -+ old_wr_coeff, -+ wr_or_deserves_wr, -+ *interactive, -+ in_burst, -+ soft_rt); -+ -+ if (old_wr_coeff != bfqq->wr_coeff) -+ bfqq->entity.prio_changed = 1; -+ } -+ } -+ -+ bfqq->last_idle_bklogged = jiffies; -+ bfqq->service_from_backlogged = 0; -+ bfq_clear_bfqq_softrt_update(bfqq); -+ -+ bfq_add_bfqq_busy(bfqd, bfqq); -+ -+ /* -+ * Expire in-service queue only if preemption may be needed -+ * for guarantees. In this respect, the function -+ * next_queue_may_preempt just checks a simple, necessary -+ * condition, and not a sufficient condition based on -+ * timestamps. In fact, for the latter condition to be -+ * evaluated, timestamps would need first to be updated, and -+ * this operation is quite costly (see the comments on the -+ * function bfq_bfqq_update_budg_for_activation). -+ */ -+ if (bfqd->in_service_queue && bfqq_wants_to_preempt && -+ bfqd->in_service_queue->wr_coeff < bfqq->wr_coeff && -+ next_queue_may_preempt(bfqd)) { -+ struct bfq_queue *in_serv = -+ bfqd->in_service_queue; -+ BUG_ON(in_serv == bfqq); -+ -+ bfq_bfqq_expire(bfqd, bfqd->in_service_queue, -+ false, BFQ_BFQQ_PREEMPTED); -+ } -+} -+ -+static void bfq_add_request(struct request *rq) -+{ -+ struct bfq_queue *bfqq = RQ_BFQQ(rq); -+ struct bfq_data *bfqd = bfqq->bfqd; -+ struct request *next_rq, *prev; -+ unsigned int old_wr_coeff = bfqq->wr_coeff; -+ bool interactive = false; -+ -+ bfq_log_bfqq(bfqd, bfqq, "size %u %s", -+ blk_rq_sectors(rq), rq_is_sync(rq) ? "S" : "A"); -+ -+ if (bfqq->wr_coeff > 1) /* queue is being weight-raised */ -+ bfq_log_bfqq(bfqd, bfqq, -+ "raising period dur %u/%u msec, old coeff %u, w %d(%d)", -+ jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish), -+ jiffies_to_msecs(bfqq->wr_cur_max_time), -+ bfqq->wr_coeff, -+ bfqq->entity.weight, bfqq->entity.orig_weight); -+ -+ bfqq->queued[rq_is_sync(rq)]++; -+ bfqd->queued++; -+ -+ elv_rb_add(&bfqq->sort_list, rq); -+ -+ /* -+ * Check if this request is a better next-to-serve candidate. -+ */ -+ prev = bfqq->next_rq; -+ next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position); -+ BUG_ON(!next_rq); -+ bfqq->next_rq = next_rq; -+ -+ /* -+ * Adjust priority tree position, if next_rq changes. -+ */ -+ if (prev != bfqq->next_rq) -+ bfq_pos_tree_add_move(bfqd, bfqq); -+ -+ if (!bfq_bfqq_busy(bfqq)) /* switching to busy ... */ -+ bfq_bfqq_handle_idle_busy_switch(bfqd, bfqq, old_wr_coeff, -+ rq, &interactive); -+ else { -+ if (bfqd->low_latency && old_wr_coeff == 1 && !rq_is_sync(rq) && -+ time_is_before_jiffies( -+ bfqq->last_wr_start_finish + -+ bfqd->bfq_wr_min_inter_arr_async)) { -+ bfqq->wr_coeff = bfqd->bfq_wr_coeff; -+ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); -+ -+ bfqd->wr_busy_queues++; -+ BUG_ON(bfqd->wr_busy_queues > bfq_tot_busy_queues(bfqd)); -+ bfqq->entity.prio_changed = 1; -+ bfq_log_bfqq(bfqd, bfqq, -+ "non-idle wrais starting, " -+ "wr_max_time %u wr_busy %d", -+ jiffies_to_msecs(bfqq->wr_cur_max_time), -+ bfqd->wr_busy_queues); -+ } -+ if (prev != bfqq->next_rq) -+ bfq_updated_next_req(bfqd, bfqq); -+ } -+ -+ /* -+ * Assign jiffies to last_wr_start_finish in the following -+ * cases: -+ * -+ * . if bfqq is not going to be weight-raised, because, for -+ * non weight-raised queues, last_wr_start_finish stores the -+ * arrival time of the last request; as of now, this piece -+ * of information is used only for deciding whether to -+ * weight-raise async queues -+ * -+ * . if bfqq is not weight-raised, because, if bfqq is now -+ * switching to weight-raised, then last_wr_start_finish -+ * stores the time when weight-raising starts -+ * -+ * . if bfqq is interactive, because, regardless of whether -+ * bfqq is currently weight-raised, the weight-raising -+ * period must start or restart (this case is considered -+ * separately because it is not detected by the above -+ * conditions, if bfqq is already weight-raised) -+ * -+ * last_wr_start_finish has to be updated also if bfqq is soft -+ * real-time, because the weight-raising period is constantly -+ * restarted on idle-to-busy transitions for these queues, but -+ * this is already done in bfq_bfqq_handle_idle_busy_switch if -+ * needed. -+ */ -+ if (bfqd->low_latency && -+ (old_wr_coeff == 1 || bfqq->wr_coeff == 1 || interactive)) -+ bfqq->last_wr_start_finish = jiffies; -+} -+ -+static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd, -+ struct bio *bio) -+{ -+ struct task_struct *tsk = current; -+ struct bfq_io_cq *bic; -+ struct bfq_queue *bfqq; -+ -+ bic = bfq_bic_lookup(bfqd, tsk->io_context); -+ if (!bic) -+ return NULL; -+ -+ bfqq = bic_to_bfqq(bic, op_is_sync(bio->bi_opf)); -+ if (bfqq) -+ return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio)); -+ -+ return NULL; -+} -+ -+static sector_t get_sdist(sector_t last_pos, struct request *rq) -+{ -+ sector_t sdist = 0; -+ -+ if (last_pos) { -+ if (last_pos < blk_rq_pos(rq)) -+ sdist = blk_rq_pos(rq) - last_pos; -+ else -+ sdist = last_pos - blk_rq_pos(rq); -+ } -+ -+ return sdist; -+} -+ -+static void bfq_activate_request(struct request_queue *q, struct request *rq) -+{ -+ struct bfq_data *bfqd = q->elevator->elevator_data; -+ bfqd->rq_in_driver++; -+} -+ -+static void bfq_deactivate_request(struct request_queue *q, struct request *rq) -+{ -+ struct bfq_data *bfqd = q->elevator->elevator_data; -+ -+ BUG_ON(bfqd->rq_in_driver == 0); -+ bfqd->rq_in_driver--; -+} -+ -+static void bfq_remove_request(struct request *rq) -+{ -+ struct bfq_queue *bfqq = RQ_BFQQ(rq); -+ struct bfq_data *bfqd = bfqq->bfqd; -+ const int sync = rq_is_sync(rq); -+ -+ /* -+ * NOTE: -+ * (bfqq->entity.service > bfqq->entity.budget) may hold here, -+ * in case of forced dispatches. -+ */ -+ -+ if (bfqq->next_rq == rq) { -+ bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq); -+ bfq_updated_next_req(bfqd, bfqq); -+ } -+ -+ if (rq->queuelist.prev != &rq->queuelist) -+ list_del_init(&rq->queuelist); -+ BUG_ON(bfqq->queued[sync] == 0); -+ bfqq->queued[sync]--; -+ bfqd->queued--; -+ elv_rb_del(&bfqq->sort_list, rq); -+ -+ if (RB_EMPTY_ROOT(&bfqq->sort_list)) { -+ bfqq->next_rq = NULL; -+ -+ BUG_ON(bfqq->entity.budget < 0); -+ -+ if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->in_service_queue) { -+ BUG_ON(bfqq->ref < 2); /* referred by rq and on tree */ -+ bfq_del_bfqq_busy(bfqd, bfqq, false); -+ /* -+ * bfqq emptied. In normal operation, when -+ * bfqq is empty, bfqq->entity.service and -+ * bfqq->entity.budget must contain, -+ * respectively, the service received and the -+ * budget used last time bfqq emptied. These -+ * facts do not hold in this case, as at least -+ * this last removal occurred while bfqq is -+ * not in service. To avoid inconsistencies, -+ * reset both bfqq->entity.service and -+ * bfqq->entity.budget, if bfqq has still a -+ * process that may issue I/O requests to it. -+ */ -+ bfqq->entity.budget = bfqq->entity.service = 0; -+ } -+ -+ /* -+ * Remove queue from request-position tree as it is empty. -+ */ -+ if (bfqq->pos_root) { -+ rb_erase(&bfqq->pos_node, bfqq->pos_root); -+ bfqq->pos_root = NULL; -+ } -+ } else { -+ BUG_ON(!bfqq->next_rq); -+ bfq_pos_tree_add_move(bfqd, bfqq); -+ } -+ -+ if (rq->cmd_flags & REQ_META) { -+ BUG_ON(bfqq->meta_pending == 0); -+ bfqq->meta_pending--; -+ } -+ bfqg_stats_update_io_remove(bfqq_group(bfqq), rq->cmd_flags); -+} -+ -+static enum elv_merge bfq_merge(struct request_queue *q, struct request **req, -+ struct bio *bio) -+{ -+ struct bfq_data *bfqd = q->elevator->elevator_data; -+ struct request *__rq; -+ -+ __rq = bfq_find_rq_fmerge(bfqd, bio); -+ if (__rq && elv_bio_merge_ok(__rq, bio)) { -+ *req = __rq; -+ return ELEVATOR_FRONT_MERGE; -+ } -+ -+ return ELEVATOR_NO_MERGE; -+} -+ -+static void bfq_merged_request(struct request_queue *q, struct request *req, -+ enum elv_merge type) -+{ -+ if (type == ELEVATOR_FRONT_MERGE && -+ rb_prev(&req->rb_node) && -+ blk_rq_pos(req) < -+ blk_rq_pos(container_of(rb_prev(&req->rb_node), -+ struct request, rb_node))) { -+ struct bfq_queue *bfqq = RQ_BFQQ(req); -+ struct bfq_data *bfqd = bfqq->bfqd; -+ struct request *prev, *next_rq; -+ -+ /* Reposition request in its sort_list */ -+ elv_rb_del(&bfqq->sort_list, req); -+ elv_rb_add(&bfqq->sort_list, req); -+ /* Choose next request to be served for bfqq */ -+ prev = bfqq->next_rq; -+ next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req, -+ bfqd->last_position); -+ BUG_ON(!next_rq); -+ bfqq->next_rq = next_rq; -+ /* -+ * If next_rq changes, update both the queue's budget to -+ * fit the new request and the queue's position in its -+ * rq_pos_tree. -+ */ -+ if (prev != bfqq->next_rq) { -+ bfq_updated_next_req(bfqd, bfqq); -+ bfq_pos_tree_add_move(bfqd, bfqq); -+ } -+ } -+} -+ -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+static void bfq_bio_merged(struct request_queue *q, struct request *req, -+ struct bio *bio) -+{ -+ bfqg_stats_update_io_merged(bfqq_group(RQ_BFQQ(req)), bio->bi_opf); -+} -+#endif -+ -+static void bfq_merged_requests(struct request_queue *q, struct request *rq, -+ struct request *next) -+{ -+ struct bfq_queue *bfqq = RQ_BFQQ(rq), *next_bfqq = RQ_BFQQ(next); -+ -+ /* -+ * If next and rq belong to the same bfq_queue and next is older -+ * than rq, then reposition rq in the fifo (by substituting next -+ * with rq). Otherwise, if next and rq belong to different -+ * bfq_queues, never reposition rq: in fact, we would have to -+ * reposition it with respect to next's position in its own fifo, -+ * which would most certainly be too expensive with respect to -+ * the benefits. -+ */ -+ if (bfqq == next_bfqq && -+ !list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && -+ next->fifo_time < rq->fifo_time) { -+ list_del_init(&rq->queuelist); -+ list_replace_init(&next->queuelist, &rq->queuelist); -+ rq->fifo_time = next->fifo_time; -+ } -+ -+ if (bfqq->next_rq == next) -+ bfqq->next_rq = rq; -+ -+ bfq_remove_request(next); -+ bfqg_stats_update_io_merged(bfqq_group(bfqq), next->cmd_flags); -+} -+ -+/* Must be called with bfqq != NULL */ -+static void bfq_bfqq_end_wr(struct bfq_queue *bfqq) -+{ -+ BUG_ON(!bfqq); -+ -+ if (bfq_bfqq_busy(bfqq)) { -+ bfqq->bfqd->wr_busy_queues--; -+ BUG_ON(bfqq->bfqd->wr_busy_queues < 0); -+ } -+ bfqq->wr_coeff = 1; -+ bfqq->wr_cur_max_time = 0; -+ bfqq->last_wr_start_finish = jiffies; -+ /* -+ * Trigger a weight change on the next invocation of -+ * __bfq_entity_update_weight_prio. -+ */ -+ bfqq->entity.prio_changed = 1; -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "wrais ending at %lu, rais_max_time %u", -+ bfqq->last_wr_start_finish, -+ jiffies_to_msecs(bfqq->wr_cur_max_time)); -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "wr_busy %d", -+ bfqq->bfqd->wr_busy_queues); -+} -+ -+static void bfq_end_wr_async_queues(struct bfq_data *bfqd, -+ struct bfq_group *bfqg) -+{ -+ int i, j; -+ -+ for (i = 0; i < 2; i++) -+ for (j = 0; j < IOPRIO_BE_NR; j++) -+ if (bfqg->async_bfqq[i][j]) -+ bfq_bfqq_end_wr(bfqg->async_bfqq[i][j]); -+ if (bfqg->async_idle_bfqq) -+ bfq_bfqq_end_wr(bfqg->async_idle_bfqq); -+} -+ -+static void bfq_end_wr(struct bfq_data *bfqd) -+{ -+ struct bfq_queue *bfqq; -+ -+ spin_lock_irq(bfqd->queue->queue_lock); -+ -+ list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list) -+ bfq_bfqq_end_wr(bfqq); -+ list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list) -+ bfq_bfqq_end_wr(bfqq); -+ bfq_end_wr_async(bfqd); -+ -+ spin_unlock_irq(bfqd->queue->queue_lock); -+} -+ -+static sector_t bfq_io_struct_pos(void *io_struct, bool request) -+{ -+ if (request) -+ return blk_rq_pos(io_struct); -+ else -+ return ((struct bio *)io_struct)->bi_iter.bi_sector; -+} -+ -+static int bfq_rq_close_to_sector(void *io_struct, bool request, -+ sector_t sector) -+{ -+ return abs(bfq_io_struct_pos(io_struct, request) - sector) <= -+ BFQQ_CLOSE_THR; -+} -+ -+static struct bfq_queue *bfqq_find_close(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq, -+ sector_t sector) -+{ -+ struct rb_root *root = &bfq_bfqq_to_bfqg(bfqq)->rq_pos_tree; -+ struct rb_node *parent, *node; -+ struct bfq_queue *__bfqq; -+ -+ if (RB_EMPTY_ROOT(root)) -+ return NULL; -+ -+ /* -+ * First, if we find a request starting at the end of the last -+ * request, choose it. -+ */ -+ __bfqq = bfq_rq_pos_tree_lookup(bfqd, root, sector, &parent, NULL); -+ if (__bfqq) -+ return __bfqq; -+ -+ /* -+ * If the exact sector wasn't found, the parent of the NULL leaf -+ * will contain the closest sector (rq_pos_tree sorted by -+ * next_request position). -+ */ -+ __bfqq = rb_entry(parent, struct bfq_queue, pos_node); -+ if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector)) -+ return __bfqq; -+ -+ if (blk_rq_pos(__bfqq->next_rq) < sector) -+ node = rb_next(&__bfqq->pos_node); -+ else -+ node = rb_prev(&__bfqq->pos_node); -+ if (!node) -+ return NULL; -+ -+ __bfqq = rb_entry(node, struct bfq_queue, pos_node); -+ if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector)) -+ return __bfqq; -+ -+ return NULL; -+} -+ -+static struct bfq_queue *bfq_find_close_cooperator(struct bfq_data *bfqd, -+ struct bfq_queue *cur_bfqq, -+ sector_t sector) -+{ -+ struct bfq_queue *bfqq; -+ -+ /* -+ * We shall notice if some of the queues are cooperating, -+ * e.g., working closely on the same area of the device. In -+ * that case, we can group them together and: 1) don't waste -+ * time idling, and 2) serve the union of their requests in -+ * the best possible order for throughput. -+ */ -+ bfqq = bfqq_find_close(bfqd, cur_bfqq, sector); -+ if (!bfqq || bfqq == cur_bfqq) -+ return NULL; -+ -+ return bfqq; -+} -+ -+static struct bfq_queue * -+bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) -+{ -+ int process_refs, new_process_refs; -+ struct bfq_queue *__bfqq; -+ -+ /* -+ * If there are no process references on the new_bfqq, then it is -+ * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain -+ * may have dropped their last reference (not just their last process -+ * reference). -+ */ -+ if (!bfqq_process_refs(new_bfqq)) -+ return NULL; -+ -+ /* Avoid a circular list and skip interim queue merges. */ -+ while ((__bfqq = new_bfqq->new_bfqq)) { -+ if (__bfqq == bfqq) -+ return NULL; -+ new_bfqq = __bfqq; -+ } -+ -+ process_refs = bfqq_process_refs(bfqq); -+ new_process_refs = bfqq_process_refs(new_bfqq); -+ /* -+ * If the process for the bfqq has gone away, there is no -+ * sense in merging the queues. -+ */ -+ if (process_refs == 0 || new_process_refs == 0) -+ return NULL; -+ -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d", -+ new_bfqq->pid); -+ -+ /* -+ * Merging is just a redirection: the requests of the process -+ * owning one of the two queues are redirected to the other queue. -+ * The latter queue, in its turn, is set as shared if this is the -+ * first time that the requests of some process are redirected to -+ * it. -+ * -+ * We redirect bfqq to new_bfqq and not the opposite, because we -+ * are in the context of the process owning bfqq, hence we have -+ * the io_cq of this process. So we can immediately configure this -+ * io_cq to redirect the requests of the process to new_bfqq. -+ * -+ * NOTE, even if new_bfqq coincides with the in-service queue, the -+ * io_cq of new_bfqq is not available, because, if the in-service -+ * queue is shared, bfqd->in_service_bic may not point to the -+ * io_cq of the in-service queue. -+ * Redirecting the requests of the process owning bfqq to the -+ * currently in-service queue is in any case the best option, as -+ * we feed the in-service queue with new requests close to the -+ * last request served and, by doing so, hopefully increase the -+ * throughput. -+ */ -+ bfqq->new_bfqq = new_bfqq; -+ new_bfqq->ref += process_refs; -+ return new_bfqq; -+} -+ -+static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq, -+ struct bfq_queue *new_bfqq) -+{ -+ if (bfq_too_late_for_merging(new_bfqq)) { -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "too late for bfq%d to be merged", -+ new_bfqq->pid); -+ return false; -+ } -+ -+ if (bfq_class_idle(bfqq) || bfq_class_idle(new_bfqq) || -+ (bfqq->ioprio_class != new_bfqq->ioprio_class)) -+ return false; -+ -+ /* -+ * If either of the queues has already been detected as seeky, -+ * then merging it with the other queue is unlikely to lead to -+ * sequential I/O. -+ */ -+ if (BFQQ_SEEKY(bfqq) || BFQQ_SEEKY(new_bfqq)) -+ return false; -+ -+ /* -+ * Interleaved I/O is known to be done by (some) applications -+ * only for reads, so it does not make sense to merge async -+ * queues. -+ */ -+ if (!bfq_bfqq_sync(bfqq) || !bfq_bfqq_sync(new_bfqq)) -+ return false; -+ -+ return true; -+} -+ -+/* -+ * Attempt to schedule a merge of bfqq with the currently in-service -+ * queue or with a close queue among the scheduled queues. Return -+ * NULL if no merge was scheduled, a pointer to the shared bfq_queue -+ * structure otherwise. -+ * -+ * The OOM queue is not allowed to participate to cooperation: in fact, since -+ * the requests temporarily redirected to the OOM queue could be redirected -+ * again to dedicated queues at any time, the state needed to correctly -+ * handle merging with the OOM queue would be quite complex and expensive -+ * to maintain. Besides, in such a critical condition as an out of memory, -+ * the benefits of queue merging may be little relevant, or even negligible. -+ * -+ * WARNING: queue merging may impair fairness among non-weight raised -+ * queues, for at least two reasons: 1) the original weight of a -+ * merged queue may change during the merged state, 2) even being the -+ * weight the same, a merged queue may be bloated with many more -+ * requests than the ones produced by its originally-associated -+ * process. -+ */ -+static struct bfq_queue * -+bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, -+ void *io_struct, bool request) -+{ -+ struct bfq_queue *in_service_bfqq, *new_bfqq; -+ -+ /* -+ * Prevent bfqq from being merged if it has been created too -+ * long ago. The idea is that true cooperating processes, and -+ * thus their associated bfq_queues, are supposed to be -+ * created shortly after each other. This is the case, e.g., -+ * for KVM/QEMU and dump I/O threads. Basing on this -+ * assumption, the following filtering greatly reduces the -+ * probability that two non-cooperating processes, which just -+ * happen to do close I/O for some short time interval, have -+ * their queues merged by mistake. -+ */ -+ if (bfq_too_late_for_merging(bfqq)) { -+ bfq_log_bfqq(bfqd, bfqq, -+ "would have looked for coop, but too late"); -+ return NULL; -+ } -+ -+ if (bfqq->new_bfqq) -+ return bfqq->new_bfqq; -+ -+ if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq)) -+ return NULL; -+ -+ /* If there is only one backlogged queue, don't search. */ -+ if (bfq_tot_busy_queues(bfqd) == 1) -+ return NULL; -+ -+ in_service_bfqq = bfqd->in_service_queue; -+ -+ if (in_service_bfqq && in_service_bfqq != bfqq && -+ likely(in_service_bfqq != &bfqd->oom_bfqq) && -+ bfq_rq_close_to_sector(io_struct, request, bfqd->in_serv_last_pos) && -+ bfqq->entity.parent == in_service_bfqq->entity.parent && -+ bfq_may_be_close_cooperator(bfqq, in_service_bfqq)) { -+ new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq); -+ if (new_bfqq) -+ return new_bfqq; -+ } -+ /* -+ * Check whether there is a cooperator among currently scheduled -+ * queues. The only thing we need is that the bio/request is not -+ * NULL, as we need it to establish whether a cooperator exists. -+ */ -+ new_bfqq = bfq_find_close_cooperator(bfqd, bfqq, -+ bfq_io_struct_pos(io_struct, request)); -+ -+ BUG_ON(new_bfqq && bfqq->entity.parent != new_bfqq->entity.parent); -+ -+ if (new_bfqq && likely(new_bfqq != &bfqd->oom_bfqq) && -+ bfq_may_be_close_cooperator(bfqq, new_bfqq)) -+ return bfq_setup_merge(bfqq, new_bfqq); -+ -+ return NULL; -+} -+ -+static void bfq_bfqq_save_state(struct bfq_queue *bfqq) -+{ -+ struct bfq_io_cq *bic = bfqq->bic; -+ -+ /* -+ * If !bfqq->bic, the queue is already shared or its requests -+ * have already been redirected to a shared queue; both idle window -+ * and weight raising state have already been saved. Do nothing. -+ */ -+ if (!bic) -+ return; -+ -+ bic->saved_has_short_ttime = bfq_bfqq_has_short_ttime(bfqq); -+ bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq); -+ bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq); -+ bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node); -+ if (unlikely(bfq_bfqq_just_created(bfqq) && -+ !bfq_bfqq_in_large_burst(bfqq) && -+ bfqq->bfqd->low_latency)) { -+ /* -+ * bfqq being merged ritgh after being created: bfqq -+ * would have deserved interactive weight raising, but -+ * did not make it to be set in a weight-raised state, -+ * because of this early merge. Store directly the -+ * weight-raising state that would have been assigned -+ * to bfqq, so that to avoid that bfqq unjustly fails -+ * to enjoy weight raising if split soon. -+ */ -+ bic->saved_wr_coeff = bfqq->bfqd->bfq_wr_coeff; -+ bic->saved_wr_cur_max_time = bfq_wr_duration(bfqq->bfqd); -+ bic->saved_last_wr_start_finish = jiffies; -+ } else { -+ bic->saved_wr_coeff = bfqq->wr_coeff; -+ bic->saved_wr_start_at_switch_to_srt = -+ bfqq->wr_start_at_switch_to_srt; -+ bic->saved_last_wr_start_finish = bfqq->last_wr_start_finish; -+ bic->saved_wr_cur_max_time = bfqq->wr_cur_max_time; -+ } -+ BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish)); -+} -+ -+static void bfq_get_bic_reference(struct bfq_queue *bfqq) -+{ -+ /* -+ * If bfqq->bic has a non-NULL value, the bic to which it belongs -+ * is about to begin using a shared bfq_queue. -+ */ -+ if (bfqq->bic) -+ atomic_long_inc(&bfqq->bic->icq.ioc->refcount); -+} -+ -+static void -+bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic, -+ struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) -+{ -+ bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu", -+ (unsigned long) new_bfqq->pid); -+ /* Save weight raising and idle window of the merged queues */ -+ bfq_bfqq_save_state(bfqq); -+ bfq_bfqq_save_state(new_bfqq); -+ if (bfq_bfqq_IO_bound(bfqq)) -+ bfq_mark_bfqq_IO_bound(new_bfqq); -+ bfq_clear_bfqq_IO_bound(bfqq); -+ -+ /* -+ * If bfqq is weight-raised, then let new_bfqq inherit -+ * weight-raising. To reduce false positives, neglect the case -+ * where bfqq has just been created, but has not yet made it -+ * to be weight-raised (which may happen because EQM may merge -+ * bfqq even before bfq_add_request is executed for the first -+ * time for bfqq). Handling this case would however be very -+ * easy, thanks to the flag just_created. -+ */ -+ if (new_bfqq->wr_coeff == 1 && bfqq->wr_coeff > 1) { -+ new_bfqq->wr_coeff = bfqq->wr_coeff; -+ new_bfqq->wr_cur_max_time = bfqq->wr_cur_max_time; -+ new_bfqq->last_wr_start_finish = bfqq->last_wr_start_finish; -+ new_bfqq->wr_start_at_switch_to_srt = -+ bfqq->wr_start_at_switch_to_srt; -+ if (bfq_bfqq_busy(new_bfqq)) { -+ bfqd->wr_busy_queues++; -+ BUG_ON(bfqd->wr_busy_queues > -+ bfq_tot_busy_queues(bfqd)); -+ } -+ -+ new_bfqq->entity.prio_changed = 1; -+ bfq_log_bfqq(bfqd, new_bfqq, -+ "wr start after merge with %d, rais_max_time %u", -+ bfqq->pid, -+ jiffies_to_msecs(bfqq->wr_cur_max_time)); -+ } -+ -+ if (bfqq->wr_coeff > 1) { /* bfqq has given its wr to new_bfqq */ -+ bfqq->wr_coeff = 1; -+ bfqq->entity.prio_changed = 1; -+ if (bfq_bfqq_busy(bfqq)) { -+ bfqd->wr_busy_queues--; -+ BUG_ON(bfqd->wr_busy_queues < 0); -+ } -+ -+ } -+ -+ bfq_log_bfqq(bfqd, new_bfqq, "wr_busy %d", -+ bfqd->wr_busy_queues); -+ -+ /* -+ * Grab a reference to the bic, to prevent it from being destroyed -+ * before being possibly touched by a bfq_split_bfqq(). -+ */ -+ bfq_get_bic_reference(bfqq); -+ bfq_get_bic_reference(new_bfqq); -+ /* -+ * Merge queues (that is, let bic redirect its requests to new_bfqq) -+ */ -+ bic_set_bfqq(bic, new_bfqq, 1); -+ bfq_mark_bfqq_coop(new_bfqq); -+ /* -+ * new_bfqq now belongs to at least two bics (it is a shared queue): -+ * set new_bfqq->bic to NULL. bfqq either: -+ * - does not belong to any bic any more, and hence bfqq->bic must -+ * be set to NULL, or -+ * - is a queue whose owning bics have already been redirected to a -+ * different queue, hence the queue is destined to not belong to -+ * any bic soon and bfqq->bic is already NULL (therefore the next -+ * assignment causes no harm). -+ */ -+ new_bfqq->bic = NULL; -+ bfqq->bic = NULL; -+ /* release process reference to bfqq */ -+ bfq_put_queue(bfqq); -+} -+ -+static int bfq_allow_bio_merge(struct request_queue *q, struct request *rq, -+ struct bio *bio) -+{ -+ struct bfq_data *bfqd = q->elevator->elevator_data; -+ bool is_sync = op_is_sync(bio->bi_opf); -+ struct bfq_io_cq *bic; -+ struct bfq_queue *bfqq, *new_bfqq; -+ -+ /* -+ * Disallow merge of a sync bio into an async request. -+ */ -+ if (is_sync && !rq_is_sync(rq)) -+ return false; -+ -+ /* -+ * Lookup the bfqq that this bio will be queued with. Allow -+ * merge only if rq is queued there. -+ * Queue lock is held here. -+ */ -+ bic = bfq_bic_lookup(bfqd, current->io_context); -+ if (!bic) -+ return false; -+ -+ bfqq = bic_to_bfqq(bic, is_sync); -+ /* -+ * We take advantage of this function to perform an early merge -+ * of the queues of possible cooperating processes. -+ */ -+ if (bfqq) { -+ new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false); -+ if (new_bfqq) { -+ bfq_merge_bfqqs(bfqd, bic, bfqq, new_bfqq); -+ /* -+ * If we get here, the bio will be queued in the -+ * shared queue, i.e., new_bfqq, so use new_bfqq -+ * to decide whether bio and rq can be merged. -+ */ -+ bfqq = new_bfqq; -+ } -+ } -+ -+ return bfqq == RQ_BFQQ(rq); -+} -+ -+static int bfq_allow_rq_merge(struct request_queue *q, struct request *rq, -+ struct request *next) -+{ -+ return RQ_BFQQ(rq) == RQ_BFQQ(next); -+} -+ -+/* -+ * Set the maximum time for the in-service queue to consume its -+ * budget. This prevents seeky processes from lowering the throughput. -+ * In practice, a time-slice service scheme is used with seeky -+ * processes. -+ */ -+static void bfq_set_budget_timeout(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq) -+{ -+ unsigned int timeout_coeff; -+ -+ if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time) -+ timeout_coeff = 1; -+ else -+ timeout_coeff = bfqq->entity.weight / bfqq->entity.orig_weight; -+ -+ bfqd->last_budget_start = ktime_get(); -+ -+ bfqq->budget_timeout = jiffies + -+ bfqd->bfq_timeout * timeout_coeff; -+ -+ bfq_log_bfqq(bfqd, bfqq, "%u", -+ jiffies_to_msecs(bfqd->bfq_timeout * timeout_coeff)); -+} -+ -+static void __bfq_set_in_service_queue(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq) -+{ -+ if (bfqq) { -+ bfqg_stats_update_avg_queue_size(bfqq_group(bfqq)); -+ bfq_mark_bfqq_must_alloc(bfqq); -+ bfq_clear_bfqq_fifo_expire(bfqq); -+ -+ bfqd->budgets_assigned = (bfqd->budgets_assigned*7 + 256) / 8; -+ -+ BUG_ON(bfqq == bfqd->in_service_queue); -+ BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list)); -+ -+ if (time_is_before_jiffies(bfqq->last_wr_start_finish) && -+ bfqq->wr_coeff > 1 && -+ bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time && -+ time_is_before_jiffies(bfqq->budget_timeout)) { -+ /* -+ * For soft real-time queues, move the start -+ * of the weight-raising period forward by the -+ * time the queue has not received any -+ * service. Otherwise, a relatively long -+ * service delay is likely to cause the -+ * weight-raising period of the queue to end, -+ * because of the short duration of the -+ * weight-raising period of a soft real-time -+ * queue. It is worth noting that this move -+ * is not so dangerous for the other queues, -+ * because soft real-time queues are not -+ * greedy. -+ * -+ * To not add a further variable, we use the -+ * overloaded field budget_timeout to -+ * determine for how long the queue has not -+ * received service, i.e., how much time has -+ * elapsed since the queue expired. However, -+ * this is a little imprecise, because -+ * budget_timeout is set to jiffies if bfqq -+ * not only expires, but also remains with no -+ * request. -+ */ -+ if (time_after(bfqq->budget_timeout, -+ bfqq->last_wr_start_finish)) -+ bfqq->last_wr_start_finish += -+ jiffies - bfqq->budget_timeout; -+ else -+ bfqq->last_wr_start_finish = jiffies; -+ -+ if (time_is_after_jiffies(bfqq->last_wr_start_finish)) { -+ pr_crit( -+ "BFQ WARNING:last %lu budget %lu jiffies %lu", -+ bfqq->last_wr_start_finish, -+ bfqq->budget_timeout, -+ jiffies); -+ pr_crit("diff %lu", jiffies - -+ max_t(unsigned long, -+ bfqq->last_wr_start_finish, -+ bfqq->budget_timeout)); -+ bfqq->last_wr_start_finish = jiffies; -+ } -+ } -+ -+ bfq_set_budget_timeout(bfqd, bfqq); -+ bfq_log_bfqq(bfqd, bfqq, -+ "cur-budget = %d prio_class %d", -+ bfqq->entity.budget, bfqq->ioprio_class); -+ } else -+ bfq_log(bfqd, "NULL"); -+ -+ bfqd->in_service_queue = bfqq; -+} -+ -+/* -+ * Get and set a new queue for service. -+ */ -+static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd) -+{ -+ struct bfq_queue *bfqq = bfq_get_next_queue(bfqd); -+ -+ __bfq_set_in_service_queue(bfqd, bfqq); -+ return bfqq; -+} -+ -+static void bfq_arm_slice_timer(struct bfq_data *bfqd) -+{ -+ struct bfq_queue *bfqq = bfqd->in_service_queue; -+ struct bfq_io_cq *bic; -+ u32 sl; -+ -+ BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list)); -+ -+ /* Processes have exited, don't wait. */ -+ bic = bfqd->in_service_bic; -+ if (!bic || atomic_read(&bic->icq.ioc->active_ref) == 0) -+ return; -+ -+ bfq_mark_bfqq_wait_request(bfqq); -+ -+ /* -+ * We don't want to idle for seeks, but we do want to allow -+ * fair distribution of slice time for a process doing back-to-back -+ * seeks. So allow a little bit of time for him to submit a new rq. -+ * -+ * To prevent processes with (partly) seeky workloads from -+ * being too ill-treated, grant them a small fraction of the -+ * assigned budget before reducing the waiting time to -+ * BFQ_MIN_TT. This happened to help reduce latency. -+ */ -+ sl = bfqd->bfq_slice_idle; -+ /* -+ * Unless the queue is being weight-raised or the scenario is -+ * asymmetric, grant only minimum idle time if the queue -+ * is seeky. A long idling is preserved for a weight-raised -+ * queue, or, more in general, in an asymemtric scenario, -+ * because a long idling is needed for guaranteeing to a queue -+ * its reserved share of the throughput (in particular, it is -+ * needed if the queue has a higher weight than some other -+ * queue). -+ */ -+ if (BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 && -+ bfq_symmetric_scenario(bfqd)) -+ sl = min_t(u32, sl, BFQ_MIN_TT); -+ -+ bfqd->last_idling_start = ktime_get(); -+ hrtimer_start(&bfqd->idle_slice_timer, ns_to_ktime(sl), -+ HRTIMER_MODE_REL); -+ bfqg_stats_set_start_idle_time(bfqq_group(bfqq)); -+ bfq_log(bfqd, "arm idle: %ld/%ld ms", -+ sl / NSEC_PER_MSEC, bfqd->bfq_slice_idle / NSEC_PER_MSEC); -+} -+ -+/* -+ * In autotuning mode, max_budget is dynamically recomputed as the -+ * amount of sectors transferred in timeout at the estimated peak -+ * rate. This enables BFQ to utilize a full timeslice with a full -+ * budget, even if the in-service queue is served at peak rate. And -+ * this maximises throughput with sequential workloads. -+ */ -+static unsigned long bfq_calc_max_budget(struct bfq_data *bfqd) -+{ -+ return (u64)bfqd->peak_rate * USEC_PER_MSEC * -+ jiffies_to_msecs(bfqd->bfq_timeout)>>BFQ_RATE_SHIFT; -+} -+ -+/* -+ * Update parameters related to throughput and responsiveness, as a -+ * function of the estimated peak rate. See comments on -+ * bfq_calc_max_budget(), and on the ref_wr_duration array. -+ */ -+static void update_thr_responsiveness_params(struct bfq_data *bfqd) -+{ -+ if (bfqd->bfq_user_max_budget == 0) { -+ bfqd->bfq_max_budget = -+ bfq_calc_max_budget(bfqd); -+ BUG_ON(bfqd->bfq_max_budget < 0); -+ bfq_log(bfqd, "new max_budget = %d", -+ bfqd->bfq_max_budget); -+ } -+} -+ -+static void bfq_reset_rate_computation(struct bfq_data *bfqd, struct request *rq) -+{ -+ if (rq != NULL) { /* new rq dispatch now, reset accordingly */ -+ bfqd->last_dispatch = bfqd->first_dispatch = ktime_get_ns() ; -+ bfqd->peak_rate_samples = 1; -+ bfqd->sequential_samples = 0; -+ bfqd->tot_sectors_dispatched = bfqd->last_rq_max_size = -+ blk_rq_sectors(rq); -+ } else /* no new rq dispatched, just reset the number of samples */ -+ bfqd->peak_rate_samples = 0; /* full re-init on next disp. */ -+ -+ bfq_log(bfqd, -+ "at end, sample %u/%u tot_sects %llu", -+ bfqd->peak_rate_samples, bfqd->sequential_samples, -+ bfqd->tot_sectors_dispatched); -+} -+ -+static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq) -+{ -+ u32 rate, weight, divisor; -+ -+ /* -+ * For the convergence property to hold (see comments on -+ * bfq_update_peak_rate()) and for the assessment to be -+ * reliable, a minimum number of samples must be present, and -+ * a minimum amount of time must have elapsed. If not so, do -+ * not compute new rate. Just reset parameters, to get ready -+ * for a new evaluation attempt. -+ */ -+ if (bfqd->peak_rate_samples < BFQ_RATE_MIN_SAMPLES || -+ bfqd->delta_from_first < BFQ_RATE_MIN_INTERVAL) { -+ bfq_log(bfqd, -+ "only resetting, delta_first %lluus samples %d", -+ bfqd->delta_from_first>>10, bfqd->peak_rate_samples); -+ goto reset_computation; -+ } -+ -+ /* -+ * If a new request completion has occurred after last -+ * dispatch, then, to approximate the rate at which requests -+ * have been served by the device, it is more precise to -+ * extend the observation interval to the last completion. -+ */ -+ bfqd->delta_from_first = -+ max_t(u64, bfqd->delta_from_first, -+ bfqd->last_completion - bfqd->first_dispatch); -+ -+ BUG_ON(bfqd->delta_from_first == 0); -+ /* -+ * Rate computed in sects/usec, and not sects/nsec, for -+ * precision issues. -+ */ -+ rate = div64_ul(bfqd->tot_sectors_dispatched<delta_from_first, NSEC_PER_USEC)); -+ -+ bfq_log(bfqd, -+"tot_sects %llu delta_first %lluus rate %llu sects/s (%d)", -+ bfqd->tot_sectors_dispatched, bfqd->delta_from_first>>10, -+ ((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT), -+ rate > 20< 20M sectors/sec) -+ */ -+ if ((bfqd->sequential_samples < (3 * bfqd->peak_rate_samples)>>2 && -+ rate <= bfqd->peak_rate) || -+ rate > 20<peak_rate_samples, bfqd->sequential_samples, -+ ((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT), -+ ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT)); -+ goto reset_computation; -+ } else { -+ bfq_log(bfqd, -+ "do update, samples %u/%u rate/peak %llu/%llu", -+ bfqd->peak_rate_samples, bfqd->sequential_samples, -+ ((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT), -+ ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT)); -+ } -+ -+ /* -+ * We have to update the peak rate, at last! To this purpose, -+ * we use a low-pass filter. We compute the smoothing constant -+ * of the filter as a function of the 'weight' of the new -+ * measured rate. -+ * -+ * As can be seen in next formulas, we define this weight as a -+ * quantity proportional to how sequential the workload is, -+ * and to how long the observation time interval is. -+ * -+ * The weight runs from 0 to 8. The maximum value of the -+ * weight, 8, yields the minimum value for the smoothing -+ * constant. At this minimum value for the smoothing constant, -+ * the measured rate contributes for half of the next value of -+ * the estimated peak rate. -+ * -+ * So, the first step is to compute the weight as a function -+ * of how sequential the workload is. Note that the weight -+ * cannot reach 9, because bfqd->sequential_samples cannot -+ * become equal to bfqd->peak_rate_samples, which, in its -+ * turn, holds true because bfqd->sequential_samples is not -+ * incremented for the first sample. -+ */ -+ weight = (9 * bfqd->sequential_samples) / bfqd->peak_rate_samples; -+ -+ /* -+ * Second step: further refine the weight as a function of the -+ * duration of the observation interval. -+ */ -+ weight = min_t(u32, 8, -+ div_u64(weight * bfqd->delta_from_first, -+ BFQ_RATE_REF_INTERVAL)); -+ -+ /* -+ * Divisor ranging from 10, for minimum weight, to 2, for -+ * maximum weight. -+ */ -+ divisor = 10 - weight; -+ BUG_ON(divisor == 0); -+ -+ /* -+ * Finally, update peak rate: -+ * -+ * peak_rate = peak_rate * (divisor-1) / divisor + rate / divisor -+ */ -+ bfqd->peak_rate *= divisor-1; -+ bfqd->peak_rate /= divisor; -+ rate /= divisor; /* smoothing constant alpha = 1/divisor */ -+ -+ bfq_log(bfqd, -+ "divisor %d tmp_peak_rate %llu tmp_rate %u", -+ divisor, -+ ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT), -+ (u32)((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT)); -+ -+ BUG_ON(bfqd->peak_rate == 0); -+ BUG_ON(bfqd->peak_rate > 20<peak_rate += rate; -+ -+ /* -+ * For a very slow device, bfqd->peak_rate can reach 0 (see -+ * the minimum representable values reported in the comments -+ * on BFQ_RATE_SHIFT). Push to 1 if this happens, to avoid -+ * divisions by zero where bfqd->peak_rate is used as a -+ * divisor. -+ */ -+ bfqd->peak_rate = max_t(u32, 1, bfqd->peak_rate); -+ -+ update_thr_responsiveness_params(bfqd); -+ BUG_ON(bfqd->peak_rate > 20<peak_rate_samples == 0) { /* first dispatch */ -+ bfq_log(bfqd, -+ "goto reset, samples %d", -+ bfqd->peak_rate_samples) ; -+ bfq_reset_rate_computation(bfqd, rq); -+ goto update_last_values; /* will add one sample */ -+ } -+ -+ /* -+ * Device idle for very long: the observation interval lasting -+ * up to this dispatch cannot be a valid observation interval -+ * for computing a new peak rate (similarly to the late- -+ * completion event in bfq_completed_request()). Go to -+ * update_rate_and_reset to have the following three steps -+ * taken: -+ * - close the observation interval at the last (previous) -+ * request dispatch or completion -+ * - compute rate, if possible, for that observation interval -+ * - start a new observation interval with this dispatch -+ */ -+ if (now_ns - bfqd->last_dispatch > 100*NSEC_PER_MSEC && -+ bfqd->rq_in_driver == 0) { -+ bfq_log(bfqd, -+"jumping to updating&resetting delta_last %lluus samples %d", -+ (now_ns - bfqd->last_dispatch)>>10, -+ bfqd->peak_rate_samples) ; -+ goto update_rate_and_reset; -+ } -+ -+ /* Update sampling information */ -+ bfqd->peak_rate_samples++; -+ -+ if ((bfqd->rq_in_driver > 0 || -+ now_ns - bfqd->last_completion < BFQ_MIN_TT) -+ && !BFQ_RQ_SEEKY(bfqd, bfqd->last_position, rq)) -+ bfqd->sequential_samples++; -+ -+ bfqd->tot_sectors_dispatched += blk_rq_sectors(rq); -+ -+ /* Reset max observed rq size every 32 dispatches */ -+ if (likely(bfqd->peak_rate_samples % 32)) -+ bfqd->last_rq_max_size = -+ max_t(u32, blk_rq_sectors(rq), bfqd->last_rq_max_size); -+ else -+ bfqd->last_rq_max_size = blk_rq_sectors(rq); -+ -+ bfqd->delta_from_first = now_ns - bfqd->first_dispatch; -+ -+ bfq_log(bfqd, -+ "added samples %u/%u tot_sects %llu delta_first %lluus", -+ bfqd->peak_rate_samples, bfqd->sequential_samples, -+ bfqd->tot_sectors_dispatched, -+ bfqd->delta_from_first>>10); -+ -+ /* Target observation interval not yet reached, go on sampling */ -+ if (bfqd->delta_from_first < BFQ_RATE_REF_INTERVAL) -+ goto update_last_values; -+ -+update_rate_and_reset: -+ bfq_update_rate_reset(bfqd, rq); -+update_last_values: -+ bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq); -+ if (RQ_BFQQ(rq) == bfqd->in_service_queue) -+ bfqd->in_serv_last_pos = bfqd->last_position; -+ bfqd->last_dispatch = now_ns; -+ -+ bfq_log(bfqd, -+ "delta_first %lluus last_pos %llu peak_rate %llu", -+ (now_ns - bfqd->first_dispatch)>>10, -+ (unsigned long long) bfqd->last_position, -+ ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT)); -+ bfq_log(bfqd, -+ "samples at end %d", bfqd->peak_rate_samples); -+} -+ -+/* -+ * Move request from internal lists to the dispatch list of the request queue -+ */ -+static void bfq_dispatch_insert(struct request_queue *q, struct request *rq) -+{ -+ struct bfq_queue *bfqq = RQ_BFQQ(rq); -+ -+ /* -+ * For consistency, the next instruction should have been executed -+ * after removing the request from the queue and dispatching it. -+ * We execute instead this instruction before bfq_remove_request() -+ * (and hence introduce a temporary inconsistency), for efficiency. -+ * In fact, in a forced_dispatch, this prevents two counters related -+ * to bfqq->dispatched to risk to be uselessly decremented if bfqq -+ * is not in service, and then to be incremented again after -+ * incrementing bfqq->dispatched. -+ */ -+ bfqq->dispatched++; -+ bfq_update_peak_rate(q->elevator->elevator_data, rq); -+ -+ bfq_remove_request(rq); -+ elv_dispatch_sort(q, rq); -+} -+ -+static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq) -+{ -+ BUG_ON(bfqq != bfqd->in_service_queue); -+ -+ /* -+ * If this bfqq is shared between multiple processes, check -+ * to make sure that those processes are still issuing I/Os -+ * within the mean seek distance. If not, it may be time to -+ * break the queues apart again. -+ */ -+ if (bfq_bfqq_coop(bfqq) && BFQQ_SEEKY(bfqq)) -+ bfq_mark_bfqq_split_coop(bfqq); -+ -+ if (RB_EMPTY_ROOT(&bfqq->sort_list)) { -+ if (bfqq->dispatched == 0) -+ /* -+ * Overloading budget_timeout field to store -+ * the time at which the queue remains with no -+ * backlog and no outstanding request; used by -+ * the weight-raising mechanism. -+ */ -+ bfqq->budget_timeout = jiffies; -+ -+ bfq_del_bfqq_busy(bfqd, bfqq, true); -+ } else { -+ bfq_requeue_bfqq(bfqd, bfqq, true); -+ /* -+ * Resort priority tree of potential close cooperators. -+ */ -+ bfq_pos_tree_add_move(bfqd, bfqq); -+ } -+ -+ /* -+ * All in-service entities must have been properly deactivated -+ * or requeued before executing the next function, which -+ * resets all in-service entites as no more in service. -+ */ -+ __bfq_bfqd_reset_in_service(bfqd); -+} -+ -+/** -+ * __bfq_bfqq_recalc_budget - try to adapt the budget to the @bfqq behavior. -+ * @bfqd: device data. -+ * @bfqq: queue to update. -+ * @reason: reason for expiration. -+ * -+ * Handle the feedback on @bfqq budget at queue expiration. -+ * See the body for detailed comments. -+ */ -+static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq, -+ enum bfqq_expiration reason) -+{ -+ struct request *next_rq; -+ int budget, min_budget; -+ -+ BUG_ON(bfqq != bfqd->in_service_queue); -+ -+ min_budget = bfq_min_budget(bfqd); -+ -+ if (bfqq->wr_coeff == 1) -+ budget = bfqq->max_budget; -+ else /* -+ * Use a constant, low budget for weight-raised queues, -+ * to help achieve a low latency. Keep it slightly higher -+ * than the minimum possible budget, to cause a little -+ * bit fewer expirations. -+ */ -+ budget = 2 * min_budget; -+ -+ bfq_log_bfqq(bfqd, bfqq, "last budg %d, budg left %d", -+ bfqq->entity.budget, bfq_bfqq_budget_left(bfqq)); -+ bfq_log_bfqq(bfqd, bfqq, "last max_budg %d, min budg %d", -+ budget, bfq_min_budget(bfqd)); -+ bfq_log_bfqq(bfqd, bfqq, "sync %d, seeky %d", -+ bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue)); -+ -+ if (bfq_bfqq_sync(bfqq) && bfqq->wr_coeff == 1) { -+ switch (reason) { -+ /* -+ * Caveat: in all the following cases we trade latency -+ * for throughput. -+ */ -+ case BFQ_BFQQ_TOO_IDLE: -+ /* -+ * This is the only case where we may reduce -+ * the budget: if there is no request of the -+ * process still waiting for completion, then -+ * we assume (tentatively) that the timer has -+ * expired because the batch of requests of -+ * the process could have been served with a -+ * smaller budget. Hence, betting that -+ * process will behave in the same way when it -+ * becomes backlogged again, we reduce its -+ * next budget. As long as we guess right, -+ * this budget cut reduces the latency -+ * experienced by the process. -+ * -+ * However, if there are still outstanding -+ * requests, then the process may have not yet -+ * issued its next request just because it is -+ * still waiting for the completion of some of -+ * the still outstanding ones. So in this -+ * subcase we do not reduce its budget, on the -+ * contrary we increase it to possibly boost -+ * the throughput, as discussed in the -+ * comments to the BUDGET_TIMEOUT case. -+ */ -+ if (bfqq->dispatched > 0) /* still outstanding reqs */ -+ budget = min(budget * 2, bfqd->bfq_max_budget); -+ else { -+ if (budget > 5 * min_budget) -+ budget -= 4 * min_budget; -+ else -+ budget = min_budget; -+ } -+ break; -+ case BFQ_BFQQ_BUDGET_TIMEOUT: -+ /* -+ * We double the budget here because it gives -+ * the chance to boost the throughput if this -+ * is not a seeky process (and has bumped into -+ * this timeout because of, e.g., ZBR). -+ */ -+ budget = min(budget * 2, bfqd->bfq_max_budget); -+ break; -+ case BFQ_BFQQ_BUDGET_EXHAUSTED: -+ /* -+ * The process still has backlog, and did not -+ * let either the budget timeout or the disk -+ * idling timeout expire. Hence it is not -+ * seeky, has a short thinktime and may be -+ * happy with a higher budget too. So -+ * definitely increase the budget of this good -+ * candidate to boost the disk throughput. -+ */ -+ budget = min(budget * 4, bfqd->bfq_max_budget); -+ break; -+ case BFQ_BFQQ_NO_MORE_REQUESTS: -+ /* -+ * For queues that expire for this reason, it -+ * is particularly important to keep the -+ * budget close to the actual service they -+ * need. Doing so reduces the timestamp -+ * misalignment problem described in the -+ * comments in the body of -+ * __bfq_activate_entity. In fact, suppose -+ * that a queue systematically expires for -+ * BFQ_BFQQ_NO_MORE_REQUESTS and presents a -+ * new request in time to enjoy timestamp -+ * back-shifting. The larger the budget of the -+ * queue is with respect to the service the -+ * queue actually requests in each service -+ * slot, the more times the queue can be -+ * reactivated with the same virtual finish -+ * time. It follows that, even if this finish -+ * time is pushed to the system virtual time -+ * to reduce the consequent timestamp -+ * misalignment, the queue unjustly enjoys for -+ * many re-activations a lower finish time -+ * than all newly activated queues. -+ * -+ * The service needed by bfqq is measured -+ * quite precisely by bfqq->entity.service. -+ * Since bfqq does not enjoy device idling, -+ * bfqq->entity.service is equal to the number -+ * of sectors that the process associated with -+ * bfqq requested to read/write before waiting -+ * for request completions, or blocking for -+ * other reasons. -+ */ -+ budget = max_t(int, bfqq->entity.service, min_budget); -+ break; -+ default: -+ return; -+ } -+ } else if (!bfq_bfqq_sync(bfqq)) -+ /* -+ * Async queues get always the maximum possible -+ * budget, as for them we do not care about latency -+ * (in addition, their ability to dispatch is limited -+ * by the charging factor). -+ */ -+ budget = bfqd->bfq_max_budget; -+ -+ bfqq->max_budget = budget; -+ -+ if (bfqd->budgets_assigned >= bfq_stats_min_budgets && -+ !bfqd->bfq_user_max_budget) -+ bfqq->max_budget = min(bfqq->max_budget, bfqd->bfq_max_budget); -+ -+ /* -+ * If there is still backlog, then assign a new budget, making -+ * sure that it is large enough for the next request. Since -+ * the finish time of bfqq must be kept in sync with the -+ * budget, be sure to call __bfq_bfqq_expire() *after* this -+ * update. -+ * -+ * If there is no backlog, then no need to update the budget; -+ * it will be updated on the arrival of a new request. -+ */ -+ next_rq = bfqq->next_rq; -+ if (next_rq) { -+ BUG_ON(reason == BFQ_BFQQ_TOO_IDLE || -+ reason == BFQ_BFQQ_NO_MORE_REQUESTS); -+ bfqq->entity.budget = max_t(unsigned long, bfqq->max_budget, -+ bfq_serv_to_charge(next_rq, bfqq)); -+ BUG_ON(!bfq_bfqq_busy(bfqq)); -+ BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list)); -+ } -+ -+ bfq_log_bfqq(bfqd, bfqq, "head sect: %u, new budget %d", -+ next_rq ? blk_rq_sectors(next_rq) : 0, -+ bfqq->entity.budget); -+} -+ -+/* -+ * Return true if the process associated with bfqq is "slow". The slow -+ * flag is used, in addition to the budget timeout, to reduce the -+ * amount of service provided to seeky processes, and thus reduce -+ * their chances to lower the throughput. More details in the comments -+ * on the function bfq_bfqq_expire(). -+ * -+ * An important observation is in order: as discussed in the comments -+ * on the function bfq_update_peak_rate(), with devices with internal -+ * queues, it is hard if ever possible to know when and for how long -+ * an I/O request is processed by the device (apart from the trivial -+ * I/O pattern where a new request is dispatched only after the -+ * previous one has been completed). This makes it hard to evaluate -+ * the real rate at which the I/O requests of each bfq_queue are -+ * served. In fact, for an I/O scheduler like BFQ, serving a -+ * bfq_queue means just dispatching its requests during its service -+ * slot (i.e., until the budget of the queue is exhausted, or the -+ * queue remains idle, or, finally, a timeout fires). But, during the -+ * service slot of a bfq_queue, around 100 ms at most, the device may -+ * be even still processing requests of bfq_queues served in previous -+ * service slots. On the opposite end, the requests of the in-service -+ * bfq_queue may be completed after the service slot of the queue -+ * finishes. -+ * -+ * Anyway, unless more sophisticated solutions are used -+ * (where possible), the sum of the sizes of the requests dispatched -+ * during the service slot of a bfq_queue is probably the only -+ * approximation available for the service received by the bfq_queue -+ * during its service slot. And this sum is the quantity used in this -+ * function to evaluate the I/O speed of a process. -+ */ -+static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq, -+ bool compensate, enum bfqq_expiration reason, -+ unsigned long *delta_ms) -+{ -+ ktime_t delta_ktime; -+ u32 delta_usecs; -+ bool slow = BFQQ_SEEKY(bfqq); /* if delta too short, use seekyness */ -+ -+ if (!bfq_bfqq_sync(bfqq)) -+ return false; -+ -+ if (compensate) -+ delta_ktime = bfqd->last_idling_start; -+ else -+ delta_ktime = ktime_get(); -+ delta_ktime = ktime_sub(delta_ktime, bfqd->last_budget_start); -+ delta_usecs = ktime_to_us(delta_ktime); -+ -+ /* don't use too short time intervals */ -+ if (delta_usecs < 1000) { -+ if (blk_queue_nonrot(bfqd->queue)) -+ /* -+ * give same worst-case guarantees as idling -+ * for seeky -+ */ -+ *delta_ms = BFQ_MIN_TT / NSEC_PER_MSEC; -+ else /* charge at least one seek */ -+ *delta_ms = bfq_slice_idle / NSEC_PER_MSEC; -+ -+ bfq_log(bfqd, "too short %u", delta_usecs); -+ -+ return slow; -+ } -+ -+ *delta_ms = delta_usecs / USEC_PER_MSEC; -+ -+ /* -+ * Use only long (> 20ms) intervals to filter out excessive -+ * spikes in service rate estimation. -+ */ -+ if (delta_usecs > 20000) { -+ /* -+ * Caveat for rotational devices: processes doing I/O -+ * in the slower disk zones tend to be slow(er) even -+ * if not seeky. In this respect, the estimated peak -+ * rate is likely to be an average over the disk -+ * surface. Accordingly, to not be too harsh with -+ * unlucky processes, a process is deemed slow only if -+ * its rate has been lower than half of the estimated -+ * peak rate. -+ */ -+ slow = bfqq->entity.service < bfqd->bfq_max_budget / 2; -+ bfq_log(bfqd, "relative rate %d/%d", -+ bfqq->entity.service, bfqd->bfq_max_budget); -+ } -+ -+ bfq_log_bfqq(bfqd, bfqq, "slow %d", slow); -+ -+ return slow; -+} -+ -+/* -+ * To be deemed as soft real-time, an application must meet two -+ * requirements. First, the application must not require an average -+ * bandwidth higher than the approximate bandwidth required to playback or -+ * record a compressed high-definition video. -+ * The next function is invoked on the completion of the last request of a -+ * batch, to compute the next-start time instant, soft_rt_next_start, such -+ * that, if the next request of the application does not arrive before -+ * soft_rt_next_start, then the above requirement on the bandwidth is met. -+ * -+ * The second requirement is that the request pattern of the application is -+ * isochronous, i.e., that, after issuing a request or a batch of requests, -+ * the application stops issuing new requests until all its pending requests -+ * have been completed. After that, the application may issue a new batch, -+ * and so on. -+ * For this reason the next function is invoked to compute -+ * soft_rt_next_start only for applications that meet this requirement, -+ * whereas soft_rt_next_start is set to infinity for applications that do -+ * not. -+ * -+ * Unfortunately, even a greedy (i.e., I/O-bound) application may -+ * happen to meet, occasionally or systematically, both the above -+ * bandwidth and isochrony requirements. This may happen at least in -+ * the following circumstances. First, if the CPU load is high. The -+ * application may stop issuing requests while the CPUs are busy -+ * serving other processes, then restart, then stop again for a while, -+ * and so on. The other circumstances are related to the storage -+ * device: the storage device is highly loaded or reaches a low-enough -+ * throughput with the I/O of the application (e.g., because the I/O -+ * is random and/or the device is slow). In all these cases, the -+ * I/O of the application may be simply slowed down enough to meet -+ * the bandwidth and isochrony requirements. To reduce the probability -+ * that greedy applications are deemed as soft real-time in these -+ * corner cases, a further rule is used in the computation of -+ * soft_rt_next_start: the return value of this function is forced to -+ * be higher than the maximum between the following two quantities. -+ * -+ * (a) Current time plus: (1) the maximum time for which the arrival -+ * of a request is waited for when a sync queue becomes idle, -+ * namely bfqd->bfq_slice_idle, and (2) a few extra jiffies. We -+ * postpone for a moment the reason for adding a few extra -+ * jiffies; we get back to it after next item (b). Lower-bounding -+ * the return value of this function with the current time plus -+ * bfqd->bfq_slice_idle tends to filter out greedy applications, -+ * because the latter issue their next request as soon as possible -+ * after the last one has been completed. In contrast, a soft -+ * real-time application spends some time processing data, after a -+ * batch of its requests has been completed. -+ * -+ * (b) Current value of bfqq->soft_rt_next_start. As pointed out -+ * above, greedy applications may happen to meet both the -+ * bandwidth and isochrony requirements under heavy CPU or -+ * storage-device load. In more detail, in these scenarios, these -+ * applications happen, only for limited time periods, to do I/O -+ * slowly enough to meet all the requirements described so far, -+ * including the filtering in above item (a). These slow-speed -+ * time intervals are usually interspersed between other time -+ * intervals during which these applications do I/O at a very high -+ * speed. Fortunately, exactly because of the high speed of the -+ * I/O in the high-speed intervals, the values returned by this -+ * function happen to be so high, near the end of any such -+ * high-speed interval, to be likely to fall *after* the end of -+ * the low-speed time interval that follows. These high values are -+ * stored in bfqq->soft_rt_next_start after each invocation of -+ * this function. As a consequence, if the last value of -+ * bfqq->soft_rt_next_start is constantly used to lower-bound the -+ * next value that this function may return, then, from the very -+ * beginning of a low-speed interval, bfqq->soft_rt_next_start is -+ * likely to be constantly kept so high that any I/O request -+ * issued during the low-speed interval is considered as arriving -+ * to soon for the application to be deemed as soft -+ * real-time. Then, in the high-speed interval that follows, the -+ * application will not be deemed as soft real-time, just because -+ * it will do I/O at a high speed. And so on. -+ * -+ * Getting back to the filtering in item (a), in the following two -+ * cases this filtering might be easily passed by a greedy -+ * application, if the reference quantity was just -+ * bfqd->bfq_slice_idle: -+ * 1) HZ is so low that the duration of a jiffy is comparable to or -+ * higher than bfqd->bfq_slice_idle. This happens, e.g., on slow -+ * devices with HZ=100. The time granularity may be so coarse -+ * that the approximation, in jiffies, of bfqd->bfq_slice_idle -+ * is rather lower than the exact value. -+ * 2) jiffies, instead of increasing at a constant rate, may stop increasing -+ * for a while, then suddenly 'jump' by several units to recover the lost -+ * increments. This seems to happen, e.g., inside virtual machines. -+ * To address this issue, in the filtering in (a) we do not use as a -+ * reference time interval just bfqd->bfq_slice_idle, but -+ * bfqd->bfq_slice_idle plus a few jiffies. In particular, we add the -+ * minimum number of jiffies for which the filter seems to be quite -+ * precise also in embedded systems and KVM/QEMU virtual machines. -+ */ -+static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq) -+{ -+ bfq_log_bfqq(bfqd, bfqq, -+"service_blkg %lu soft_rate %u sects/sec interval %u", -+ bfqq->service_from_backlogged, -+ bfqd->bfq_wr_max_softrt_rate, -+ jiffies_to_msecs(HZ * bfqq->service_from_backlogged / -+ bfqd->bfq_wr_max_softrt_rate)); -+ -+ return max3(bfqq->soft_rt_next_start, -+ bfqq->last_idle_bklogged + -+ HZ * bfqq->service_from_backlogged / -+ bfqd->bfq_wr_max_softrt_rate, -+ jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4); -+} -+ -+static bool bfq_bfqq_injectable(struct bfq_queue *bfqq) -+{ -+ return BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 && -+ blk_queue_nonrot(bfqq->bfqd->queue) && -+ bfqq->bfqd->hw_tag; -+} -+ -+/** -+ * bfq_bfqq_expire - expire a queue. -+ * @bfqd: device owning the queue. -+ * @bfqq: the queue to expire. -+ * @compensate: if true, compensate for the time spent idling. -+ * @reason: the reason causing the expiration. -+ * -+ * If the process associated with bfqq does slow I/O (e.g., because it -+ * issues random requests), we charge bfqq with the time it has been -+ * in service instead of the service it has received (see -+ * bfq_bfqq_charge_time for details on how this goal is achieved). As -+ * a consequence, bfqq will typically get higher timestamps upon -+ * reactivation, and hence it will be rescheduled as if it had -+ * received more service than what it has actually received. In the -+ * end, bfqq receives less service in proportion to how slowly its -+ * associated process consumes its budgets (and hence how seriously it -+ * tends to lower the throughput). In addition, this time-charging -+ * strategy guarantees time fairness among slow processes. In -+ * contrast, if the process associated with bfqq is not slow, we -+ * charge bfqq exactly with the service it has received. -+ * -+ * Charging time to the first type of queues and the exact service to -+ * the other has the effect of using the WF2Q+ policy to schedule the -+ * former on a timeslice basis, without violating service domain -+ * guarantees among the latter. -+ */ -+static void bfq_bfqq_expire(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq, -+ bool compensate, -+ enum bfqq_expiration reason) -+{ -+ bool slow; -+ unsigned long delta = 0; -+ struct bfq_entity *entity = &bfqq->entity; -+ int ref; -+ -+ BUG_ON(bfqq != bfqd->in_service_queue); -+ -+ /* -+ * Check whether the process is slow (see bfq_bfqq_is_slow). -+ */ -+ slow = bfq_bfqq_is_slow(bfqd, bfqq, compensate, reason, &delta); -+ -+ /* -+ * As above explained, charge slow (typically seeky) and -+ * timed-out queues with the time and not the service -+ * received, to favor sequential workloads. -+ * -+ * Processes doing I/O in the slower disk zones will tend to -+ * be slow(er) even if not seeky. Therefore, since the -+ * estimated peak rate is actually an average over the disk -+ * surface, these processes may timeout just for bad luck. To -+ * avoid punishing them, do not charge time to processes that -+ * succeeded in consuming at least 2/3 of their budget. This -+ * allows BFQ to preserve enough elasticity to still perform -+ * bandwidth, and not time, distribution with little unlucky -+ * or quasi-sequential processes. -+ */ -+ if (bfqq->wr_coeff == 1 && -+ (slow || -+ (reason == BFQ_BFQQ_BUDGET_TIMEOUT && -+ bfq_bfqq_budget_left(bfqq) >= entity->budget / 3))) -+ bfq_bfqq_charge_time(bfqd, bfqq, delta); -+ -+ BUG_ON(bfqq->entity.budget < bfqq->entity.service); -+ -+ if (reason == BFQ_BFQQ_TOO_IDLE && -+ entity->service <= 2 * entity->budget / 10) -+ bfq_clear_bfqq_IO_bound(bfqq); -+ -+ if (bfqd->low_latency && bfqq->wr_coeff == 1) -+ bfqq->last_wr_start_finish = jiffies; -+ -+ if (bfqd->low_latency && bfqd->bfq_wr_max_softrt_rate > 0 && -+ RB_EMPTY_ROOT(&bfqq->sort_list)) { -+ /* -+ * If we get here, and there are no outstanding -+ * requests, then the request pattern is isochronous -+ * (see the comments on the function -+ * bfq_bfqq_softrt_next_start()). Thus we can compute -+ * soft_rt_next_start. And we do it, unless bfqq is in -+ * interactive weight raising. We do not do it in the -+ * latter subcase, for the following reason. bfqq may -+ * be conveying the I/O needed to load a soft -+ * real-time application. Such an application will -+ * actually exhibit a soft real-time I/O pattern after -+ * it finally starts doing its job. But, if -+ * soft_rt_next_start is computed here for an -+ * interactive bfqq, and bfqq had received a lot of -+ * service before remaining with no outstanding -+ * request (likely to happen on a fast device), then -+ * soft_rt_next_start would be assigned such a high -+ * value that, for a very long time, bfqq would be -+ * prevented from being possibly considered as soft -+ * real time. -+ * -+ * If, instead, the queue still has outstanding -+ * requests, then we have to wait for the completion -+ * of all the outstanding requests to discover whether -+ * the request pattern is actually isochronous. -+ */ -+ BUG_ON(bfq_tot_busy_queues(bfqd) < 1); -+ if (bfqq->dispatched == 0 && -+ bfqq->wr_coeff != bfqd->bfq_wr_coeff) { -+ bfqq->soft_rt_next_start = -+ bfq_bfqq_softrt_next_start(bfqd, bfqq); -+ bfq_log_bfqq(bfqd, bfqq, "new soft_rt_next %lu", -+ bfqq->soft_rt_next_start); -+ } else if (bfqq->dispatched > 0) { -+ /* -+ * Schedule an update of soft_rt_next_start to when -+ * the task may be discovered to be isochronous. -+ */ -+ bfq_mark_bfqq_softrt_update(bfqq); -+ } -+ } -+ -+ bfq_log_bfqq(bfqd, bfqq, -+ "expire (%s, slow %d, num_disp %d, short %d, weight %d, serv %d/%d)", -+ reason_name[reason], slow, bfqq->dispatched, -+ bfq_bfqq_has_short_ttime(bfqq), entity->weight, -+ entity->service, entity->budget); -+ -+ /* -+ * Increase, decrease or leave budget unchanged according to -+ * reason. -+ */ -+ BUG_ON(bfqq->entity.budget < bfqq->entity.service); -+ __bfq_bfqq_recalc_budget(bfqd, bfqq, reason); -+ BUG_ON(bfqq->next_rq == NULL && -+ bfqq->entity.budget < bfqq->entity.service); -+ ref = bfqq->ref; -+ __bfq_bfqq_expire(bfqd, bfqq); -+ -+ if (ref == 1) /* bfqq is gone, no more actions on it */ -+ return; -+ -+ BUG_ON(ref > 1 && -+ !bfq_bfqq_busy(bfqq) && reason == BFQ_BFQQ_BUDGET_EXHAUSTED && -+ !bfq_class_idle(bfqq)); -+ -+ bfqq->injected_service = 0; -+ -+ /* mark bfqq as waiting a request only if a bic still points to it */ -+ if (!bfq_bfqq_busy(bfqq) && -+ reason != BFQ_BFQQ_BUDGET_TIMEOUT && -+ reason != BFQ_BFQQ_BUDGET_EXHAUSTED) { -+ BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list)); -+ BUG_ON(bfqq->next_rq); -+ bfq_mark_bfqq_non_blocking_wait_rq(bfqq); -+ /* -+ * Not setting service to 0, because, if the next rq -+ * arrives in time, the queue will go on receiving -+ * service with this same budget (as if it never expired) -+ */ -+ } else { -+ entity->service = 0; -+ bfq_log_bfqq(bfqd, bfqq, "resetting service"); -+ } -+ -+ /* -+ * Reset the received-service counter for every parent entity. -+ * Differently from what happens with bfqq->entity.service, -+ * the resetting of this counter never needs to be postponed -+ * for parent entities. In fact, in case bfqq may have a -+ * chance to go on being served using the last, partially -+ * consumed budget, bfqq->entity.service needs to be kept, -+ * because if bfqq then actually goes on being served using -+ * the same budget, the last value of bfqq->entity.service is -+ * needed to properly decrement bfqq->entity.budget by the -+ * portion already consumed. In contrast, it is not necessary -+ * to keep entity->service for parent entities too, because -+ * the bubble up of the new value of bfqq->entity.budget will -+ * make sure that the budgets of parent entities are correct, -+ * even in case bfqq and thus parent entities go on receiving -+ * service with the same budget. -+ */ -+ entity = entity->parent; -+ for_each_entity(entity) -+ entity->service = 0; -+} -+ -+/* -+ * Budget timeout is not implemented through a dedicated timer, but -+ * just checked on request arrivals and completions, as well as on -+ * idle timer expirations. -+ */ -+static bool bfq_bfqq_budget_timeout(struct bfq_queue *bfqq) -+{ -+ return time_is_before_eq_jiffies(bfqq->budget_timeout); -+} -+ -+/* -+ * If we expire a queue that is actively waiting (i.e., with the -+ * device idled) for the arrival of a new request, then we may incur -+ * the timestamp misalignment problem described in the body of the -+ * function __bfq_activate_entity. Hence we return true only if this -+ * condition does not hold, or if the queue is slow enough to deserve -+ * only to be kicked off for preserving a high throughput. -+ */ -+static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq) -+{ -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "wait_request %d left %d timeout %d", -+ bfq_bfqq_wait_request(bfqq), -+ bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3, -+ bfq_bfqq_budget_timeout(bfqq)); -+ -+ return (!bfq_bfqq_wait_request(bfqq) || -+ bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3) -+ && -+ bfq_bfqq_budget_timeout(bfqq); -+} -+ -+static bool idling_boosts_thr_without_issues(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq) -+{ -+ bool rot_without_queueing = -+ !blk_queue_nonrot(bfqd->queue) && !bfqd->hw_tag, -+ bfqq_sequential_and_IO_bound, -+ idling_boosts_thr; -+ -+ bfqq_sequential_and_IO_bound = !BFQQ_SEEKY(bfqq) && -+ bfq_bfqq_IO_bound(bfqq) && bfq_bfqq_has_short_ttime(bfqq); -+ /* -+ * The next variable takes into account the cases where idling -+ * boosts the throughput. -+ * -+ * The value of the variable is computed considering, first, that -+ * idling is virtually always beneficial for the throughput if: -+ * (a) the device is not NCQ-capable and rotational, or -+ * (b) regardless of the presence of NCQ, the device is rotational and -+ * the request pattern for bfqq is I/O-bound and sequential, or -+ * (c) regardless of whether it is rotational, the device is -+ * not NCQ-capable and the request pattern for bfqq is -+ * I/O-bound and sequential. -+ * -+ * Secondly, and in contrast to the above item (b), idling an -+ * NCQ-capable flash-based device would not boost the -+ * throughput even with sequential I/O; rather it would lower -+ * the throughput in proportion to how fast the device -+ * is. Accordingly, the next variable is true if any of the -+ * above conditions (a), (b) or (c) is true, and, in -+ * particular, happens to be false if bfqd is an NCQ-capable -+ * flash-based device. -+ */ -+ idling_boosts_thr = rot_without_queueing || -+ ((!blk_queue_nonrot(bfqd->queue) || !bfqd->hw_tag) && -+ bfqq_sequential_and_IO_bound); -+ -+ bfq_log_bfqq(bfqd, bfqq, "idling_boosts_thr %d", idling_boosts_thr); -+ -+ /* -+ * The return value of this function is equal to that of -+ * idling_boosts_thr, unless a special case holds. In this -+ * special case, described below, idling may cause problems to -+ * weight-raised queues. -+ * -+ * When the request pool is saturated (e.g., in the presence -+ * of write hogs), if the processes associated with -+ * non-weight-raised queues ask for requests at a lower rate, -+ * then processes associated with weight-raised queues have a -+ * higher probability to get a request from the pool -+ * immediately (or at least soon) when they need one. Thus -+ * they have a higher probability to actually get a fraction -+ * of the device throughput proportional to their high -+ * weight. This is especially true with NCQ-capable drives, -+ * which enqueue several requests in advance, and further -+ * reorder internally-queued requests. -+ * -+ * For this reason, we force to false the return value if -+ * there are weight-raised busy queues. In this case, and if -+ * bfqq is not weight-raised, this guarantees that the device -+ * is not idled for bfqq (if, instead, bfqq is weight-raised, -+ * then idling will be guaranteed by another variable, see -+ * below). Combined with the timestamping rules of BFQ (see -+ * [1] for details), this behavior causes bfqq, and hence any -+ * sync non-weight-raised queue, to get a lower number of -+ * requests served, and thus to ask for a lower number of -+ * requests from the request pool, before the busy -+ * weight-raised queues get served again. This often mitigates -+ * starvation problems in the presence of heavy write -+ * workloads and NCQ, thereby guaranteeing a higher -+ * application and system responsiveness in these hostile -+ * scenarios. -+ */ -+ return idling_boosts_thr && -+ bfqd->wr_busy_queues == 0; -+} -+ -+/* -+ * There is a case where idling must be performed not for -+ * throughput concerns, but to preserve service guarantees. -+ * -+ * To introduce this case, we can note that allowing the drive -+ * to enqueue more than one request at a time, and hence -+ * delegating de facto final scheduling decisions to the -+ * drive's internal scheduler, entails loss of control on the -+ * actual request service order. In particular, the critical -+ * situation is when requests from different processes happen -+ * to be present, at the same time, in the internal queue(s) -+ * of the drive. In such a situation, the drive, by deciding -+ * the service order of the internally-queued requests, does -+ * determine also the actual throughput distribution among -+ * these processes. But the drive typically has no notion or -+ * concern about per-process throughput distribution, and -+ * makes its decisions only on a per-request basis. Therefore, -+ * the service distribution enforced by the drive's internal -+ * scheduler is likely to coincide with the desired -+ * device-throughput distribution only in a completely -+ * symmetric scenario where: -+ * (i) each of these processes must get the same throughput as -+ * the others; -+ * (ii) the I/O of each process has the same properties, in -+ * terms of locality (sequential or random), direction -+ * (reads or writes), request sizes, greediness -+ * (from I/O-bound to sporadic), and so on. -+ * In fact, in such a scenario, the drive tends to treat -+ * the requests of each of these processes in about the same -+ * way as the requests of the others, and thus to provide -+ * each of these processes with about the same throughput -+ * (which is exactly the desired throughput distribution). In -+ * contrast, in any asymmetric scenario, device idling is -+ * certainly needed to guarantee that bfqq receives its -+ * assigned fraction of the device throughput (see [1] for -+ * details). -+ * The problem is that idling may significantly reduce -+ * throughput with certain combinations of types of I/O and -+ * devices. An important example is sync random I/O, on flash -+ * storage with command queueing. So, unless bfqq falls in the -+ * above cases where idling also boosts throughput, it would -+ * be important to check conditions (i) and (ii) accurately, -+ * so as to avoid idling when not strictly needed for service -+ * guarantees. -+ * -+ * Unfortunately, it is extremely difficult to thoroughly -+ * check condition (ii). And, in case there are active groups, -+ * it becomes very difficult to check condition (i) too. In -+ * fact, if there are active groups, then, for condition (i) -+ * to become false, it is enough that an active group contains -+ * more active processes or sub-groups than some other active -+ * group. More precisely, for condition (i) to hold because of -+ * such a group, it is not even necessary that the group is -+ * (still) active: it is sufficient that, even if the group -+ * has become inactive, some of its descendant processes still -+ * have some request already dispatched but still waiting for -+ * completion. In fact, requests have still to be guaranteed -+ * their share of the throughput even after being -+ * dispatched. In this respect, it is easy to show that, if a -+ * group frequently becomes inactive while still having -+ * in-flight requests, and if, when this happens, the group is -+ * not considered in the calculation of whether the scenario -+ * is asymmetric, then the group may fail to be guaranteed its -+ * fair share of the throughput (basically because idling may -+ * not be performed for the descendant processes of the group, -+ * but it had to be). We address this issue with the -+ * following bi-modal behavior, implemented in the function -+ * bfq_symmetric_scenario(). -+ * -+ * If there are groups with requests waiting for completion -+ * (as commented above, some of these groups may even be -+ * already inactive), then the scenario is tagged as -+ * asymmetric, conservatively, without checking any of the -+ * conditions (i) and (ii). So the device is idled for bfqq. -+ * This behavior matches also the fact that groups are created -+ * exactly if controlling I/O is a primary concern (to -+ * preserve bandwidth and latency guarantees). -+ * -+ * On the opposite end, if there are no groups with requests -+ * waiting for completion, then only condition (i) is actually -+ * controlled, i.e., provided that condition (i) holds, idling -+ * is not performed, regardless of whether condition (ii) -+ * holds. In other words, only if condition (i) does not hold, -+ * then idling is allowed, and the device tends to be -+ * prevented from queueing many requests, possibly of several -+ * processes. Since there are no groups with requests waiting -+ * for completion, then, to control condition (i) it is enough -+ * to check just whether all the queues with requests waiting -+ * for completion also have the same weight. -+ * -+ * Not checking condition (ii) evidently exposes bfqq to the -+ * risk of getting less throughput than its fair share. -+ * However, for queues with the same weight, a further -+ * mechanism, preemption, mitigates or even eliminates this -+ * problem. And it does so without consequences on overall -+ * throughput. This mechanism and its benefits are explained -+ * in the next three paragraphs. -+ * -+ * Even if a queue, say Q, is expired when it remains idle, Q -+ * can still preempt the new in-service queue if the next -+ * request of Q arrives soon (see the comments on -+ * bfq_bfqq_update_budg_for_activation). If all queues and -+ * groups have the same weight, this form of preemption, -+ * combined with the hole-recovery heuristic described in the -+ * comments on function bfq_bfqq_update_budg_for_activation, -+ * are enough to preserve a correct bandwidth distribution in -+ * the mid term, even without idling. In fact, even if not -+ * idling allows the internal queues of the device to contain -+ * many requests, and thus to reorder requests, we can rather -+ * safely assume that the internal scheduler still preserves a -+ * minimum of mid-term fairness. -+ * -+ * More precisely, this preemption-based, idleless approach -+ * provides fairness in terms of IOPS, and not sectors per -+ * second. This can be seen with a simple example. Suppose -+ * that there are two queues with the same weight, but that -+ * the first queue receives requests of 8 sectors, while the -+ * second queue receives requests of 1024 sectors. In -+ * addition, suppose that each of the two queues contains at -+ * most one request at a time, which implies that each queue -+ * always remains idle after it is served. Finally, after -+ * remaining idle, each queue receives very quickly a new -+ * request. It follows that the two queues are served -+ * alternatively, preempting each other if needed. This -+ * implies that, although both queues have the same weight, -+ * the queue with large requests receives a service that is -+ * 1024/8 times as high as the service received by the other -+ * queue. -+ * -+ * The motivation for using preemption instead of idling (for -+ * queues with the same weight) is that, by not idling, -+ * service guarantees are preserved (completely or at least in -+ * part) without minimally sacrificing throughput. And, if -+ * there is no active group, then the primary expectation for -+ * this device is probably a high throughput. -+ * -+ * We are now left only with explaining the additional -+ * compound condition that is checked below for deciding -+ * whether the scenario is asymmetric. To explain this -+ * compound condition, we need to add that the function -+ * bfq_symmetric_scenario checks the weights of only -+ * non-weight-raised queues, for efficiency reasons (see -+ * comments on bfq_weights_tree_add()). Then the fact that -+ * bfqq is weight-raised is checked explicitly here. More -+ * precisely, the compound condition below takes into account -+ * also the fact that, even if bfqq is being weight-raised, -+ * the scenario is still symmetric if all queues with requests -+ * waiting for completion happen to be -+ * weight-raised. Actually, we should be even more precise -+ * here, and differentiate between interactive weight raising -+ * and soft real-time weight raising. -+ * -+ * As a side note, it is worth considering that the above -+ * device-idling countermeasures may however fail in the -+ * following unlucky scenario: if idling is (correctly) -+ * disabled in a time period during which all symmetry -+ * sub-conditions hold, and hence the device is allowed to -+ * enqueue many requests, but at some later point in time some -+ * sub-condition stops to hold, then it may become impossible -+ * to let requests be served in the desired order until all -+ * the requests already queued in the device have been served. -+ */ -+static bool idling_needed_for_service_guarantees(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq) -+{ -+ bool asymmetric_scenario = (bfqq->wr_coeff > 1 && -+ bfqd->wr_busy_queues < -+ bfq_tot_busy_queues(bfqd)) || -+ !bfq_symmetric_scenario(bfqd); -+ -+ bfq_log_bfqq(bfqd, bfqq, -+ "wr_coeff %d wr_busy %d busy %d asymmetric %d", -+ bfqq->wr_coeff, -+ bfqd->wr_busy_queues, -+ bfq_tot_busy_queues(bfqd), -+ asymmetric_scenario); -+ -+ return asymmetric_scenario; -+} -+ -+/* -+ * For a queue that becomes empty, device idling is allowed only if -+ * this function returns true for that queue. As a consequence, since -+ * device idling plays a critical role for both throughput boosting -+ * and service guarantees, the return value of this function plays a -+ * critical role as well. -+ * -+ * In a nutshell, this function returns true only if idling is -+ * beneficial for throughput or, even if detrimental for throughput, -+ * idling is however necessary to preserve service guarantees (low -+ * latency, desired throughput distribution, ...). In particular, on -+ * NCQ-capable devices, this function tries to return false, so as to -+ * help keep the drives' internal queues full, whenever this helps the -+ * device boost the throughput without causing any service-guarantee -+ * issue. -+ * -+ * Most of the issues taken into account to get the return value of -+ * this function are not trivial. We discuss these issues in the two -+ * functions providing the main pieces of information needed by this -+ * function. -+ */ -+static bool bfq_better_to_idle(struct bfq_queue *bfqq) -+{ -+ struct bfq_data *bfqd = bfqq->bfqd; -+ bool idling_boosts_thr_with_no_issue, idling_needed_for_service_guar; -+ -+ if (unlikely(bfqd->strict_guarantees)) -+ return true; -+ -+ /* -+ * Idling is performed only if slice_idle > 0. In addition, we -+ * do not idle if -+ * (a) bfqq is async -+ * (b) bfqq is in the idle io prio class: in this case we do -+ * not idle because we want to minimize the bandwidth that -+ * queues in this class can steal to higher-priority queues -+ */ -+ if (bfqd->bfq_slice_idle == 0 || !bfq_bfqq_sync(bfqq) || -+ bfq_class_idle(bfqq)) -+ return false; -+ -+ idling_boosts_thr_with_no_issue = -+ idling_boosts_thr_without_issues(bfqd, bfqq); -+ -+ idling_needed_for_service_guar = -+ idling_needed_for_service_guarantees(bfqd, bfqq); -+ -+ /* -+ * We have now the two components we need to compute the -+ * return value of the function, which is true only if idling -+ * either boosts the throughput (without issues), or is -+ * necessary to preserve service guarantees. -+ */ -+ bfq_log_bfqq(bfqd, bfqq, -+ "wr_busy %d boosts %d IO-bound %d guar %d", -+ bfqd->wr_busy_queues, -+ idling_boosts_thr_with_no_issue, -+ bfq_bfqq_IO_bound(bfqq), -+ idling_needed_for_service_guar); -+ -+ return idling_boosts_thr_with_no_issue || -+ idling_needed_for_service_guar; -+} -+ -+/* -+ * If the in-service queue is empty but the function bfq_better_to_idle -+ * returns true, then: -+ * 1) the queue must remain in service and cannot be expired, and -+ * 2) the device must be idled to wait for the possible arrival of a new -+ * request for the queue. -+ * See the comments on the function bfq_better_to_idle for the reasons -+ * why performing device idling is the best choice to boost the throughput -+ * and preserve service guarantees when bfq_better_to_idle itself -+ * returns true. -+ */ -+static bool bfq_bfqq_must_idle(struct bfq_queue *bfqq) -+{ -+ return RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_better_to_idle(bfqq); -+} -+ -+static struct bfq_queue *bfq_choose_bfqq_for_injection(struct bfq_data *bfqd) -+{ -+ struct bfq_queue *bfqq; -+ -+ /* -+ * A linear search; but, with a high probability, very few -+ * steps are needed to find a candidate queue, i.e., a queue -+ * with enough budget left for its next request. In fact: -+ * - BFQ dynamically updates the budget of every queue so as -+ * to accomodate the expected backlog of the queue; -+ * - if a queue gets all its requests dispatched as injected -+ * service, then the queue is removed from the active list -+ * (and re-added only if it gets new requests, but with -+ * enough budget for its new backlog). -+ */ -+ list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list) -+ if (!RB_EMPTY_ROOT(&bfqq->sort_list) && -+ bfq_serv_to_charge(bfqq->next_rq, bfqq) <= -+ bfq_bfqq_budget_left(bfqq)) { -+ bfq_log_bfqq(bfqd, bfqq, "returned this queue"); -+ return bfqq; -+ } -+ -+ bfq_log(bfqd, "no queue found"); -+ return NULL; -+} -+ -+/* -+ * Select a queue for service. If we have a current queue in service, -+ * check whether to continue servicing it, or retrieve and set a new one. -+ */ -+static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd) -+{ -+ struct bfq_queue *bfqq; -+ struct request *next_rq; -+ enum bfqq_expiration reason = BFQ_BFQQ_BUDGET_TIMEOUT; -+ -+ bfqq = bfqd->in_service_queue; -+ if (!bfqq) -+ goto new_queue; -+ -+ bfq_log_bfqq(bfqd, bfqq, "already in-service queue"); -+ -+ /* -+ * Do not expire bfqq for budget timeout if bfqq may be about -+ * to enjoy device idling. The reason why, in this case, we -+ * prevent bfqq from expiring is the same as in the comments -+ * on the case where bfq_bfqq_must_idle() returns true, in -+ * bfq_completed_request(). -+ */ -+ if (bfq_may_expire_for_budg_timeout(bfqq) && -+ !bfq_bfqq_must_idle(bfqq)) -+ goto expire; -+ -+check_queue: -+ /* -+ * This loop is rarely executed more than once. Even when it -+ * happens, it is much more convenient to re-execute this loop -+ * than to return NULL and trigger a new dispatch to get a -+ * request served. -+ */ -+ next_rq = bfqq->next_rq; -+ /* -+ * If bfqq has requests queued and it has enough budget left to -+ * serve them, keep the queue, otherwise expire it. -+ */ -+ if (next_rq) { -+ BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list)); -+ -+ if (bfq_serv_to_charge(next_rq, bfqq) > -+ bfq_bfqq_budget_left(bfqq)) { -+ /* -+ * Expire the queue for budget exhaustion, -+ * which makes sure that the next budget is -+ * enough to serve the next request, even if -+ * it comes from the fifo expired path. -+ */ -+ reason = BFQ_BFQQ_BUDGET_EXHAUSTED; -+ goto expire; -+ } else { -+ /* -+ * The idle timer may be pending because we may -+ * not disable disk idling even when a new request -+ * arrives. -+ */ -+ if (bfq_bfqq_wait_request(bfqq)) { -+ BUG_ON(!hrtimer_active(&bfqd->idle_slice_timer)); -+ /* -+ * If we get here: 1) at least a new request -+ * has arrived but we have not disabled the -+ * timer because the request was too small, -+ * 2) then the block layer has unplugged -+ * the device, causing the dispatch to be -+ * invoked. -+ * -+ * Since the device is unplugged, now the -+ * requests are probably large enough to -+ * provide a reasonable throughput. -+ * So we disable idling. -+ */ -+ bfq_clear_bfqq_wait_request(bfqq); -+ hrtimer_try_to_cancel(&bfqd->idle_slice_timer); -+ bfqg_stats_update_idle_time(bfqq_group(bfqq)); -+ } -+ goto keep_queue; -+ } -+ } -+ -+ /* -+ * No requests pending. However, if the in-service queue is idling -+ * for a new request, or has requests waiting for a completion and -+ * may idle after their completion, then keep it anyway. -+ * -+ * Yet, to boost throughput, inject service from other queues if -+ * possible. -+ */ -+ if (hrtimer_active(&bfqd->idle_slice_timer) || -+ (bfqq->dispatched != 0 && bfq_better_to_idle(bfqq))) { -+ if (bfq_bfqq_injectable(bfqq) && -+ bfqq->injected_service * bfqq->inject_coeff < -+ bfqq->entity.service * 10) { -+ bfq_log_bfqq(bfqd, bfqq, "looking for queue for injection"); -+ bfqq = bfq_choose_bfqq_for_injection(bfqd); -+ } else { -+ if (BFQQ_SEEKY(bfqq)) -+ bfq_log_bfqq(bfqd, bfqq, -+ "injection saturated %d * %d >= %d * 10", -+ bfqq->injected_service, bfqq->inject_coeff, -+ bfqq->entity.service); -+ bfqq = NULL; -+ } -+ goto keep_queue; -+ } -+ -+ reason = BFQ_BFQQ_NO_MORE_REQUESTS; -+expire: -+ bfq_bfqq_expire(bfqd, bfqq, false, reason); -+new_queue: -+ bfqq = bfq_set_in_service_queue(bfqd); -+ if (bfqq) { -+ bfq_log_bfqq(bfqd, bfqq, "checking new queue"); -+ goto check_queue; -+ } -+keep_queue: -+ if (bfqq) -+ bfq_log_bfqq(bfqd, bfqq, "returned this queue"); -+ else -+ bfq_log(bfqd, "no queue returned"); -+ -+ return bfqq; -+} -+ -+static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq) -+{ -+ struct bfq_entity *entity = &bfqq->entity; -+ -+ if (bfqq->wr_coeff > 1) { /* queue is being weight-raised */ -+ BUG_ON(bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time && -+ time_is_after_jiffies(bfqq->last_wr_start_finish)); -+ -+ bfq_log_bfqq(bfqd, bfqq, -+ "raising period dur %u/%u msec, old coeff %u, w %d(%d)", -+ jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish), -+ jiffies_to_msecs(bfqq->wr_cur_max_time), -+ bfqq->wr_coeff, -+ bfqq->entity.weight, bfqq->entity.orig_weight); -+ -+ BUG_ON(bfqq != bfqd->in_service_queue && entity->weight != -+ entity->orig_weight * bfqq->wr_coeff); -+ if (entity->prio_changed) -+ bfq_log_bfqq(bfqd, bfqq, "WARN: pending prio change"); -+ -+ /* -+ * If the queue was activated in a burst, or too much -+ * time has elapsed from the beginning of this -+ * weight-raising period, then end weight raising. -+ */ -+ if (bfq_bfqq_in_large_burst(bfqq)) -+ bfq_bfqq_end_wr(bfqq); -+ else if (time_is_before_jiffies(bfqq->last_wr_start_finish + -+ bfqq->wr_cur_max_time)) { -+ if (bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time || -+ time_is_before_jiffies(bfqq->wr_start_at_switch_to_srt + -+ bfq_wr_duration(bfqd))) -+ bfq_bfqq_end_wr(bfqq); -+ else { -+ switch_back_to_interactive_wr(bfqq, bfqd); -+ BUG_ON(time_is_after_jiffies( -+ bfqq->last_wr_start_finish)); -+ bfqq->entity.prio_changed = 1; -+ bfq_log_bfqq(bfqd, bfqq, -+ "back to interactive wr"); -+ } -+ } -+ if (bfqq->wr_coeff > 1 && -+ bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time && -+ bfqq->service_from_wr > max_service_from_wr) { -+ /* see comments on max_service_from_wr */ -+ bfq_bfqq_end_wr(bfqq); -+ bfq_log_bfqq(bfqd, bfqq, -+ "too much service"); -+ } -+ } -+ /* -+ * To improve latency (for this or other queues), immediately -+ * update weight both if it must be raised and if it must be -+ * lowered. Since, entity may be on some active tree here, and -+ * might have a pending change of its ioprio class, invoke -+ * next function with the last parameter unset (see the -+ * comments on the function). -+ */ -+ if ((entity->weight > entity->orig_weight) != (bfqq->wr_coeff > 1)) -+ __bfq_entity_update_weight_prio(bfq_entity_service_tree(entity), -+ entity, false); -+} -+ -+/* -+ * Dispatch one request from bfqq, moving it to the request queue -+ * dispatch list. -+ */ -+static int bfq_dispatch_request(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq) -+{ -+ int dispatched = 0; -+ struct request *rq = bfqq->next_rq; -+ unsigned long service_to_charge; -+ -+ BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list)); -+ BUG_ON(!rq); -+ service_to_charge = bfq_serv_to_charge(rq, bfqq); -+ -+ BUG_ON(service_to_charge > bfq_bfqq_budget_left(bfqq)); -+ -+ BUG_ON(bfqq->entity.budget < bfqq->entity.service); -+ -+ bfq_bfqq_served(bfqq, service_to_charge); -+ -+ BUG_ON(bfqq->entity.budget < bfqq->entity.service); -+ -+ bfq_dispatch_insert(bfqd->queue, rq); -+ -+ bfq_log_bfqq(bfqd, bfqq, -+ "dispatched %u sec req (%llu), budg left %d, new disp_nr %d", -+ blk_rq_sectors(rq), -+ (unsigned long long) blk_rq_pos(rq), -+ bfq_bfqq_budget_left(bfqq), -+ bfqq->dispatched); -+ -+ dispatched++; -+ -+ if (bfqq != bfqd->in_service_queue) { -+ if (likely(bfqd->in_service_queue)) { -+ bfqd->in_service_queue->injected_service += -+ bfq_serv_to_charge(rq, bfqq); -+ bfq_log_bfqq(bfqd, bfqd->in_service_queue, -+ "injected_service increased to %d", -+ bfqd->in_service_queue->injected_service); -+ } -+ return dispatched; -+ } -+ -+ /* -+ * If weight raising has to terminate for bfqq, then next -+ * function causes an immediate update of bfqq's weight, -+ * without waiting for next activation. As a consequence, on -+ * expiration, bfqq will be timestamped as if has never been -+ * weight-raised during this service slot, even if it has -+ * received part or even most of the service as a -+ * weight-raised queue. This inflates bfqq's timestamps, which -+ * is beneficial, as bfqq is then more willing to leave the -+ * device immediately to possible other weight-raised queues. -+ */ -+ bfq_update_wr_data(bfqd, bfqq); -+ -+ if (!bfqd->in_service_bic) { -+ atomic_long_inc(&RQ_BIC(rq)->icq.ioc->refcount); -+ bfqd->in_service_bic = RQ_BIC(rq); -+ BUG_ON(!bfqd->in_service_bic); -+ } -+ -+ if (bfq_tot_busy_queues(bfqd) > 1 && bfq_class_idle(bfqq)) -+ goto expire; -+ -+ return dispatched; -+ -+expire: -+ bfq_bfqq_expire(bfqd, bfqq, false, BFQ_BFQQ_BUDGET_EXHAUSTED); -+ return dispatched; -+} -+ -+static int __bfq_forced_dispatch_bfqq(struct bfq_queue *bfqq) -+{ -+ int dispatched = 0; -+ -+ while (bfqq->next_rq) { -+ bfq_dispatch_insert(bfqq->bfqd->queue, bfqq->next_rq); -+ dispatched++; -+ } -+ -+ BUG_ON(!list_empty(&bfqq->fifo)); -+ return dispatched; -+} -+ -+/* -+ * Drain our current requests. -+ * Used for barriers and when switching io schedulers on-the-fly. -+ */ -+static int bfq_forced_dispatch(struct bfq_data *bfqd) -+{ -+ struct bfq_queue *bfqq, *n; -+ struct bfq_service_tree *st; -+ int dispatched = 0; -+ -+ bfqq = bfqd->in_service_queue; -+ if (bfqq) -+ __bfq_bfqq_expire(bfqd, bfqq); -+ -+ /* -+ * Loop through classes, and be careful to leave the scheduler -+ * in a consistent state, as feedback mechanisms and vtime -+ * updates cannot be disabled during the process. -+ */ -+ list_for_each_entry_safe(bfqq, n, &bfqd->active_list, bfqq_list) { -+ st = bfq_entity_service_tree(&bfqq->entity); -+ -+ dispatched += __bfq_forced_dispatch_bfqq(bfqq); -+ -+ bfqq->max_budget = bfq_max_budget(bfqd); -+ bfq_forget_idle(st); -+ } -+ -+ BUG_ON(bfq_tot_busy_queues(bfqd) != 0); -+ -+ return dispatched; -+} -+ -+static int bfq_dispatch_requests(struct request_queue *q, int force) -+{ -+ struct bfq_data *bfqd = q->elevator->elevator_data; -+ struct bfq_queue *bfqq; -+ -+ bfq_log(bfqd, "%d busy queues", bfq_tot_busy_queues(bfqd)); -+ -+ if (bfq_tot_busy_queues(bfqd) == 0) -+ return 0; -+ -+ if (unlikely(force)) -+ return bfq_forced_dispatch(bfqd); -+ -+ /* -+ * Force device to serve one request at a time if -+ * strict_guarantees is true. Forcing this service scheme is -+ * currently the ONLY way to guarantee that the request -+ * service order enforced by the scheduler is respected by a -+ * queueing device. Otherwise the device is free even to make -+ * some unlucky request wait for as long as the device -+ * wishes. -+ * -+ * Of course, serving one request at at time may cause loss of -+ * throughput. -+ */ -+ if (bfqd->strict_guarantees && bfqd->rq_in_driver > 0) -+ return 0; -+ -+ bfqq = bfq_select_queue(bfqd); -+ if (!bfqq) -+ return 0; -+ -+ BUG_ON(bfqq == bfqd->in_service_queue && -+ bfqq->entity.budget < bfqq->entity.service); -+ -+ BUG_ON(bfqq == bfqd->in_service_queue && -+ bfq_bfqq_wait_request(bfqq)); -+ -+ if (!bfq_dispatch_request(bfqd, bfqq)) -+ return 0; -+ -+ bfq_log_bfqq(bfqd, bfqq, "%s request", -+ bfq_bfqq_sync(bfqq) ? "sync" : "async"); -+ -+ BUG_ON(bfqq->next_rq == NULL && -+ bfqq->entity.budget < bfqq->entity.service); -+ return 1; -+} -+ -+/* -+ * Task holds one reference to the queue, dropped when task exits. Each rq -+ * in-flight on this queue also holds a reference, dropped when rq is freed. -+ * -+ * Queue lock must be held here. Recall not to use bfqq after calling -+ * this function on it. -+ */ -+static void bfq_put_queue(struct bfq_queue *bfqq) -+{ -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ struct bfq_group *bfqg = bfqq_group(bfqq); -+#endif -+ -+ BUG_ON(bfqq->ref <= 0); -+ -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "%p %d", bfqq, bfqq->ref); -+ bfqq->ref--; -+ if (bfqq->ref) -+ return; -+ -+ BUG_ON(rb_first(&bfqq->sort_list)); -+ BUG_ON(bfqq->allocated[READ] + bfqq->allocated[WRITE] != 0); -+ BUG_ON(bfqq->entity.tree); -+ BUG_ON(bfq_bfqq_busy(bfqq)); -+ -+ if (!hlist_unhashed(&bfqq->burst_list_node)) { -+ hlist_del_init(&bfqq->burst_list_node); -+ /* -+ * Decrement also burst size after the removal, if the -+ * process associated with bfqq is exiting, and thus -+ * does not contribute to the burst any longer. This -+ * decrement helps filter out false positives of large -+ * bursts, when some short-lived process (often due to -+ * the execution of commands by some service) happens -+ * to start and exit while a complex application is -+ * starting, and thus spawning several processes that -+ * do I/O (and that *must not* be treated as a large -+ * burst, see comments on bfq_handle_burst). -+ * -+ * In particular, the decrement is performed only if: -+ * 1) bfqq is not a merged queue, because, if it is, -+ * then this free of bfqq is not triggered by the exit -+ * of the process bfqq is associated with, but exactly -+ * by the fact that bfqq has just been merged. -+ * 2) burst_size is greater than 0, to handle -+ * unbalanced decrements. Unbalanced decrements may -+ * happen in te following case: bfqq is inserted into -+ * the current burst list--without incrementing -+ * bust_size--because of a split, but the current -+ * burst list is not the burst list bfqq belonged to -+ * (see comments on the case of a split in -+ * bfq_set_request). -+ */ -+ if (bfqq->bic && bfqq->bfqd->burst_size > 0) -+ bfqq->bfqd->burst_size--; -+ } -+ -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "%p freed", bfqq); -+ -+ kmem_cache_free(bfq_pool, bfqq); -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ bfqg_put(bfqg); -+#endif -+} -+ -+static void bfq_put_cooperator(struct bfq_queue *bfqq) -+{ -+ struct bfq_queue *__bfqq, *next; -+ -+ /* -+ * If this queue was scheduled to merge with another queue, be -+ * sure to drop the reference taken on that queue (and others in -+ * the merge chain). See bfq_setup_merge and bfq_merge_bfqqs. -+ */ -+ __bfqq = bfqq->new_bfqq; -+ while (__bfqq) { -+ if (__bfqq == bfqq) -+ break; -+ next = __bfqq->new_bfqq; -+ bfq_put_queue(__bfqq); -+ __bfqq = next; -+ } -+} -+ -+static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) -+{ -+ if (bfqq == bfqd->in_service_queue) { -+ __bfq_bfqq_expire(bfqd, bfqq); -+ bfq_schedule_dispatch(bfqd); -+ } -+ -+ bfq_log_bfqq(bfqd, bfqq, "%p, %d", bfqq, bfqq->ref); -+ -+ bfq_put_cooperator(bfqq); -+ -+ bfq_put_queue(bfqq); /* release process reference */ -+} -+ -+static void bfq_init_icq(struct io_cq *icq) -+{ -+ icq_to_bic(icq)->ttime.last_end_request = ktime_get_ns() - (1ULL<<32); -+} -+ -+static void bfq_exit_icq(struct io_cq *icq) -+{ -+ struct bfq_io_cq *bic = icq_to_bic(icq); -+ struct bfq_data *bfqd = bic_to_bfqd(bic); -+ -+ if (bic_to_bfqq(bic, false)) { -+ bfq_exit_bfqq(bfqd, bic_to_bfqq(bic, false)); -+ bic_set_bfqq(bic, NULL, false); -+ } -+ -+ if (bic_to_bfqq(bic, true)) { -+ /* -+ * If the bic is using a shared queue, put the reference -+ * taken on the io_context when the bic started using a -+ * shared bfq_queue. -+ */ -+ if (bfq_bfqq_coop(bic_to_bfqq(bic, true))) -+ put_io_context(icq->ioc); -+ bfq_exit_bfqq(bfqd, bic_to_bfqq(bic, true)); -+ bic_set_bfqq(bic, NULL, true); -+ } -+} -+ -+/* -+ * Update the entity prio values; note that the new values will not -+ * be used until the next (re)activation. -+ */ -+static void bfq_set_next_ioprio_data(struct bfq_queue *bfqq, -+ struct bfq_io_cq *bic) -+{ -+ struct task_struct *tsk = current; -+ int ioprio_class; -+ -+ ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio); -+ switch (ioprio_class) { -+ default: -+ dev_err(bfqq->bfqd->queue->backing_dev_info->dev, -+ "bfq: bad prio class %d\n", ioprio_class); -+ case IOPRIO_CLASS_NONE: -+ /* -+ * No prio set, inherit CPU scheduling settings. -+ */ -+ bfqq->new_ioprio = task_nice_ioprio(tsk); -+ bfqq->new_ioprio_class = task_nice_ioclass(tsk); -+ break; -+ case IOPRIO_CLASS_RT: -+ bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio); -+ bfqq->new_ioprio_class = IOPRIO_CLASS_RT; -+ break; -+ case IOPRIO_CLASS_BE: -+ bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio); -+ bfqq->new_ioprio_class = IOPRIO_CLASS_BE; -+ break; -+ case IOPRIO_CLASS_IDLE: -+ bfqq->new_ioprio_class = IOPRIO_CLASS_IDLE; -+ bfqq->new_ioprio = 7; -+ break; -+ } -+ -+ if (bfqq->new_ioprio >= IOPRIO_BE_NR) { -+ pr_crit("bfq_set_next_ioprio_data: new_ioprio %d\n", -+ bfqq->new_ioprio); -+ BUG(); -+ } -+ -+ bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->new_ioprio); -+ bfqq->entity.prio_changed = 1; -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "bic_class %d prio %d class %d", -+ ioprio_class, bfqq->new_ioprio, bfqq->new_ioprio_class); -+} -+ -+static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio) -+{ -+ struct bfq_data *bfqd = bic_to_bfqd(bic); -+ struct bfq_queue *bfqq; -+ unsigned long uninitialized_var(flags); -+ int ioprio = bic->icq.ioc->ioprio; -+ -+ /* -+ * This condition may trigger on a newly created bic, be sure to -+ * drop the lock before returning. -+ */ -+ if (unlikely(!bfqd) || likely(bic->ioprio == ioprio)) -+ return; -+ -+ bic->ioprio = ioprio; -+ -+ bfqq = bic_to_bfqq(bic, false); -+ if (bfqq) { -+ /* release process reference on this queue */ -+ bfq_put_queue(bfqq); -+ bfqq = bfq_get_queue(bfqd, bio, BLK_RW_ASYNC, bic); -+ bic_set_bfqq(bic, bfqq, false); -+ bfq_log_bfqq(bfqd, bfqq, -+ "bfqq %p %d", -+ bfqq, bfqq->ref); -+ } -+ -+ bfqq = bic_to_bfqq(bic, true); -+ if (bfqq) -+ bfq_set_next_ioprio_data(bfqq, bic); -+} -+ -+static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, -+ struct bfq_io_cq *bic, pid_t pid, int is_sync) -+{ -+ RB_CLEAR_NODE(&bfqq->entity.rb_node); -+ INIT_LIST_HEAD(&bfqq->fifo); -+ INIT_HLIST_NODE(&bfqq->burst_list_node); -+ BUG_ON(!hlist_unhashed(&bfqq->burst_list_node)); -+ -+ bfqq->ref = 0; -+ bfqq->bfqd = bfqd; -+ -+ if (bic) -+ bfq_set_next_ioprio_data(bfqq, bic); -+ -+ if (is_sync) { -+ /* -+ * No need to mark as has_short_ttime if in -+ * idle_class, because no device idling is performed -+ * for queues in idle class -+ */ -+ if (!bfq_class_idle(bfqq)) -+ /* tentatively mark as has_short_ttime */ -+ bfq_mark_bfqq_has_short_ttime(bfqq); -+ bfq_mark_bfqq_sync(bfqq); -+ bfq_mark_bfqq_just_created(bfqq); -+ /* -+ * Aggressively inject a lot of service: up to 90%. -+ * This coefficient remains constant during bfqq life, -+ * but this behavior might be changed, after enough -+ * testing and tuning. -+ */ -+ bfqq->inject_coeff = 1; -+ } else -+ bfq_clear_bfqq_sync(bfqq); -+ bfq_mark_bfqq_IO_bound(bfqq); -+ -+ /* Tentative initial value to trade off between thr and lat */ -+ bfqq->max_budget = (2 * bfq_max_budget(bfqd)) / 3; -+ bfqq->pid = pid; -+ -+ bfqq->wr_coeff = 1; -+ bfqq->last_wr_start_finish = jiffies; -+ bfqq->wr_start_at_switch_to_srt = bfq_smallest_from_now(); -+ bfqq->budget_timeout = bfq_smallest_from_now(); -+ bfqq->split_time = bfq_smallest_from_now(); -+ -+ /* -+ * To not forget the possibly high bandwidth consumed by a -+ * process/queue in the recent past, -+ * bfq_bfqq_softrt_next_start() returns a value at least equal -+ * to the current value of bfqq->soft_rt_next_start (see -+ * comments on bfq_bfqq_softrt_next_start). Set -+ * soft_rt_next_start to now, to mean that bfqq has consumed -+ * no bandwidth so far. -+ */ -+ bfqq->soft_rt_next_start = jiffies; -+ -+ /* first request is almost certainly seeky */ -+ bfqq->seek_history = 1; -+} -+ -+static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd, -+ struct bfq_group *bfqg, -+ int ioprio_class, int ioprio) -+{ -+ switch (ioprio_class) { -+ case IOPRIO_CLASS_RT: -+ return &bfqg->async_bfqq[0][ioprio]; -+ case IOPRIO_CLASS_NONE: -+ ioprio = IOPRIO_NORM; -+ /* fall through */ -+ case IOPRIO_CLASS_BE: -+ return &bfqg->async_bfqq[1][ioprio]; -+ case IOPRIO_CLASS_IDLE: -+ return &bfqg->async_idle_bfqq; -+ default: -+ BUG(); -+ } -+} -+ -+static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd, -+ struct bio *bio, bool is_sync, -+ struct bfq_io_cq *bic) -+{ -+ const int ioprio = IOPRIO_PRIO_DATA(bic->ioprio); -+ const int ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio); -+ struct bfq_queue **async_bfqq = NULL; -+ struct bfq_queue *bfqq; -+ struct bfq_group *bfqg; -+ -+ rcu_read_lock(); -+ -+ bfqg = bfq_find_set_group(bfqd, bio_blkcg(bio)); -+ if (!bfqg) { -+ bfqq = &bfqd->oom_bfqq; -+ goto out; -+ } -+ -+ if (!is_sync) { -+ async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class, -+ ioprio); -+ bfqq = *async_bfqq; -+ if (bfqq) -+ goto out; -+ } -+ -+ bfqq = kmem_cache_alloc_node(bfq_pool, -+ GFP_NOWAIT | __GFP_ZERO | __GFP_NOWARN, -+ bfqd->queue->node); -+ -+ if (bfqq) { -+ bfq_init_bfqq(bfqd, bfqq, bic, current->pid, -+ is_sync); -+ bfq_init_entity(&bfqq->entity, bfqg); -+ bfq_log_bfqq(bfqd, bfqq, "allocated"); -+ } else { -+ bfqq = &bfqd->oom_bfqq; -+ bfq_log_bfqq(bfqd, bfqq, "using oom bfqq"); -+ goto out; -+ } -+ -+ /* -+ * Pin the queue now that it's allocated, scheduler exit will -+ * prune it. -+ */ -+ if (async_bfqq) { -+ bfqq->ref++; /* -+ * Extra group reference, w.r.t. sync -+ * queue. This extra reference is removed -+ * only if bfqq->bfqg disappears, to -+ * guarantee that this queue is not freed -+ * until its group goes away. -+ */ -+ bfq_log_bfqq(bfqd, bfqq, "bfqq not in async: %p, %d", -+ bfqq, bfqq->ref); -+ *async_bfqq = bfqq; -+ } -+ -+out: -+ bfqq->ref++; /* get a process reference to this queue */ -+ bfq_log_bfqq(bfqd, bfqq, "at end: %p, %d", bfqq, bfqq->ref); -+ rcu_read_unlock(); -+ return bfqq; -+} -+ -+static void bfq_update_io_thinktime(struct bfq_data *bfqd, -+ struct bfq_io_cq *bic) -+{ -+ struct bfq_ttime *ttime = &bic->ttime; -+ u64 elapsed = ktime_get_ns() - bic->ttime.last_end_request; -+ -+ elapsed = min_t(u64, elapsed, 2 * bfqd->bfq_slice_idle); -+ -+ ttime->ttime_samples = (7*bic->ttime.ttime_samples + 256) / 8; -+ ttime->ttime_total = div_u64(7*ttime->ttime_total + 256*elapsed, 8); -+ ttime->ttime_mean = div64_ul(ttime->ttime_total + 128, -+ ttime->ttime_samples); -+} -+ -+static void -+bfq_update_io_seektime(struct bfq_data *bfqd, struct bfq_queue *bfqq, -+ struct request *rq) -+{ -+ bfqq->seek_history <<= 1; -+ bfqq->seek_history |= BFQ_RQ_SEEKY(bfqd, bfqq->last_request_pos, rq); -+} -+ -+static void bfq_update_has_short_ttime(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq, -+ struct bfq_io_cq *bic) -+{ -+ bool has_short_ttime = true; -+ -+ /* -+ * No need to update has_short_ttime if bfqq is async or in -+ * idle io prio class, or if bfq_slice_idle is zero, because -+ * no device idling is performed for bfqq in this case. -+ */ -+ if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq) || -+ bfqd->bfq_slice_idle == 0) -+ return; -+ -+ /* Idle window just restored, statistics are meaningless. */ -+ if (time_is_after_eq_jiffies(bfqq->split_time + -+ bfqd->bfq_wr_min_idle_time)) -+ return; -+ -+ /* Think time is infinite if no process is linked to -+ * bfqq. Otherwise check average think time to -+ * decide whether to mark as has_short_ttime -+ */ -+ if (atomic_read(&bic->icq.ioc->active_ref) == 0 || -+ (bfq_sample_valid(bic->ttime.ttime_samples) && -+ bic->ttime.ttime_mean > bfqd->bfq_slice_idle)) -+ has_short_ttime = false; -+ -+ bfq_log_bfqq(bfqd, bfqq, "has_short_ttime %d", -+ has_short_ttime); -+ -+ if (has_short_ttime) -+ bfq_mark_bfqq_has_short_ttime(bfqq); -+ else -+ bfq_clear_bfqq_has_short_ttime(bfqq); -+} -+ -+/* -+ * Called when a new fs request (rq) is added to bfqq. Check if there's -+ * something we should do about it. -+ */ -+static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, -+ struct request *rq) -+{ -+ struct bfq_io_cq *bic = RQ_BIC(rq); -+ -+ if (rq->cmd_flags & REQ_META) -+ bfqq->meta_pending++; -+ -+ bfq_update_io_thinktime(bfqd, bic); -+ bfq_update_has_short_ttime(bfqd, bfqq, bic); -+ bfq_update_io_seektime(bfqd, bfqq, rq); -+ -+ bfq_log_bfqq(bfqd, bfqq, -+ "has_short_ttime=%d (seeky %d)", -+ bfq_bfqq_has_short_ttime(bfqq), BFQQ_SEEKY(bfqq)); -+ -+ bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); -+ -+ if (bfqq == bfqd->in_service_queue && bfq_bfqq_wait_request(bfqq)) { -+ bool small_req = bfqq->queued[rq_is_sync(rq)] == 1 && -+ blk_rq_sectors(rq) < 32; -+ bool budget_timeout = bfq_bfqq_budget_timeout(bfqq); -+ -+ /* -+ * There is just this request queued: if -+ * - the request is small, and -+ * - we are idling to boost throughput, and -+ * - the queue is not to be expired, -+ * then just exit. -+ * -+ * In this way, if the device is being idled to wait -+ * for a new request from the in-service queue, we -+ * avoid unplugging the device and committing the -+ * device to serve just a small request. In contrast -+ * we wait for the block layer to decide when to -+ * unplug the device: hopefully, new requests will be -+ * merged to this one quickly, then the device will be -+ * unplugged and larger requests will be dispatched. -+ */ -+ if (small_req && idling_boosts_thr_without_issues(bfqd, bfqq) && -+ !budget_timeout) -+ return; -+ -+ /* -+ * A large enough request arrived, or idling is being -+ * performed to preserve service guarantees, or -+ * finally the queue is to be expired: in all these -+ * cases disk idling is to be stopped, so clear -+ * wait_request flag and reset timer. -+ */ -+ bfq_clear_bfqq_wait_request(bfqq); -+ hrtimer_try_to_cancel(&bfqd->idle_slice_timer); -+ bfqg_stats_update_idle_time(bfqq_group(bfqq)); -+ -+ /* -+ * The queue is not empty, because a new request just -+ * arrived. Hence we can safely expire the queue, in -+ * case of budget timeout, without risking that the -+ * timestamps of the queue are not updated correctly. -+ * See [1] for more details. -+ */ -+ if (budget_timeout) -+ bfq_bfqq_expire(bfqd, bfqq, false, -+ BFQ_BFQQ_BUDGET_TIMEOUT); -+ -+ /* -+ * Let the request rip immediately, or let a new queue be -+ * selected if bfqq has just been expired. -+ */ -+ __blk_run_queue(bfqd->queue); -+ } -+} -+ -+static void bfq_insert_request(struct request_queue *q, struct request *rq) -+{ -+ struct bfq_data *bfqd = q->elevator->elevator_data; -+ struct bfq_queue *bfqq = RQ_BFQQ(rq), *new_bfqq; -+ -+ assert_spin_locked(bfqd->queue->queue_lock); -+ -+ /* -+ * An unplug may trigger a requeue of a request from the device -+ * driver: make sure we are in process context while trying to -+ * merge two bfq_queues. -+ */ -+ if (!in_interrupt()) { -+ new_bfqq = bfq_setup_cooperator(bfqd, bfqq, rq, true); -+ if (new_bfqq) { -+ if (bic_to_bfqq(RQ_BIC(rq), 1) != bfqq) -+ new_bfqq = bic_to_bfqq(RQ_BIC(rq), 1); -+ /* -+ * Release the request's reference to the old bfqq -+ * and make sure one is taken to the shared queue. -+ */ -+ new_bfqq->allocated[rq_data_dir(rq)]++; -+ bfqq->allocated[rq_data_dir(rq)]--; -+ new_bfqq->ref++; -+ if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq) -+ bfq_merge_bfqqs(bfqd, RQ_BIC(rq), -+ bfqq, new_bfqq); -+ -+ bfq_clear_bfqq_just_created(bfqq); -+ /* -+ * rq is about to be enqueued into new_bfqq, -+ * release rq reference on bfqq -+ */ -+ bfq_put_queue(bfqq); -+ rq->elv.priv[1] = new_bfqq; -+ bfqq = new_bfqq; -+ } -+ } -+ -+ bfq_add_request(rq); -+ -+ rq->fifo_time = ktime_get_ns() + bfqd->bfq_fifo_expire[rq_is_sync(rq)]; -+ list_add_tail(&rq->queuelist, &bfqq->fifo); -+ -+ bfq_rq_enqueued(bfqd, bfqq, rq); -+} -+ -+static void bfq_update_hw_tag(struct bfq_data *bfqd) -+{ -+ struct bfq_queue *bfqq = bfqd->in_service_queue; -+ -+ bfqd->max_rq_in_driver = max_t(int, bfqd->max_rq_in_driver, -+ bfqd->rq_in_driver); -+ -+ if (bfqd->hw_tag == 1) -+ return; -+ -+ /* -+ * This sample is valid if the number of outstanding requests -+ * is large enough to allow a queueing behavior. Note that the -+ * sum is not exact, as it's not taking into account deactivated -+ * requests. -+ */ -+ if (bfqd->rq_in_driver + bfqd->queued <= BFQ_HW_QUEUE_THRESHOLD) -+ return; -+ -+ /* -+ * If active queue hasn't enough requests and can idle, bfq might not -+ * dispatch sufficient requests to hardware. Don't zero hw_tag in this -+ * case -+ */ -+ if (bfqq && bfq_bfqq_has_short_ttime(bfqq) && -+ bfqq->dispatched + bfqq->queued[0] + bfqq->queued[1] < -+ BFQ_HW_QUEUE_THRESHOLD && bfqd->rq_in_driver < BFQ_HW_QUEUE_THRESHOLD) -+ return; -+ -+ if (bfqd->hw_tag_samples++ < BFQ_HW_QUEUE_SAMPLES) -+ return; -+ -+ bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD; -+ bfqd->max_rq_in_driver = 0; -+ bfqd->hw_tag_samples = 0; -+} -+ -+static void bfq_completed_request(struct request_queue *q, struct request *rq) -+{ -+ struct bfq_queue *bfqq = RQ_BFQQ(rq); -+ struct bfq_data *bfqd = bfqq->bfqd; -+ u64 now_ns; -+ u32 delta_us; -+ -+ bfq_log_bfqq(bfqd, bfqq, "completed one req with %u sects left", -+ blk_rq_sectors(rq)); -+ -+ assert_spin_locked(bfqd->queue->queue_lock); -+ bfq_update_hw_tag(bfqd); -+ -+ BUG_ON(!bfqd->rq_in_driver); -+ BUG_ON(!bfqq->dispatched); -+ bfqd->rq_in_driver--; -+ bfqq->dispatched--; -+ bfqg_stats_update_completion(bfqq_group(bfqq), -+ rq->start_time_ns, -+ rq->io_start_time_ns, -+ rq->cmd_flags); -+ -+ if (!bfqq->dispatched && !bfq_bfqq_busy(bfqq)) { -+ BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list)); -+ /* -+ * Set budget_timeout (which we overload to store the -+ * time at which the queue remains with no backlog and -+ * no outstanding request; used by the weight-raising -+ * mechanism). -+ */ -+ bfqq->budget_timeout = jiffies; -+ -+ bfq_weights_tree_remove(bfqd, bfqq); -+ } -+ -+ now_ns = ktime_get_ns(); -+ -+ RQ_BIC(rq)->ttime.last_end_request = now_ns; -+ -+ /* -+ * Using us instead of ns, to get a reasonable precision in -+ * computing rate in next check. -+ */ -+ delta_us = div_u64(now_ns - bfqd->last_completion, NSEC_PER_USEC); -+ -+ bfq_log(bfqd, "delta %uus/%luus max_size %u rate %llu/%llu", -+ delta_us, BFQ_MIN_TT/NSEC_PER_USEC, bfqd->last_rq_max_size, -+ delta_us > 0 ? -+ (USEC_PER_SEC* -+ (u64)((bfqd->last_rq_max_size<>BFQ_RATE_SHIFT : -+ (USEC_PER_SEC* -+ (u64)(bfqd->last_rq_max_size<>BFQ_RATE_SHIFT, -+ (USEC_PER_SEC*(u64)(1UL<<(BFQ_RATE_SHIFT-10)))>>BFQ_RATE_SHIFT); -+ -+ /* -+ * If the request took rather long to complete, and, according -+ * to the maximum request size recorded, this completion latency -+ * implies that the request was certainly served at a very low -+ * rate (less than 1M sectors/sec), then the whole observation -+ * interval that lasts up to this time instant cannot be a -+ * valid time interval for computing a new peak rate. Invoke -+ * bfq_update_rate_reset to have the following three steps -+ * taken: -+ * - close the observation interval at the last (previous) -+ * request dispatch or completion -+ * - compute rate, if possible, for that observation interval -+ * - reset to zero samples, which will trigger a proper -+ * re-initialization of the observation interval on next -+ * dispatch -+ */ -+ if (delta_us > BFQ_MIN_TT/NSEC_PER_USEC && -+ (bfqd->last_rq_max_size<last_completion = now_ns; -+ -+ /* -+ * If we are waiting to discover whether the request pattern -+ * of the task associated with the queue is actually -+ * isochronous, and both requisites for this condition to hold -+ * are now satisfied, then compute soft_rt_next_start (see the -+ * comments on the function bfq_bfqq_softrt_next_start()). We -+ * do not compute soft_rt_next_start if bfqq is in interactive -+ * weight raising (see the comments in bfq_bfqq_expire() for -+ * an explanation). We schedule this delayed update when bfqq -+ * expires, if it still has in-flight requests. -+ */ -+ if (bfq_bfqq_softrt_update(bfqq) && bfqq->dispatched == 0 && -+ RB_EMPTY_ROOT(&bfqq->sort_list) && -+ bfqq->wr_coeff != bfqd->bfq_wr_coeff) -+ bfqq->soft_rt_next_start = -+ bfq_bfqq_softrt_next_start(bfqd, bfqq); -+ -+ /* -+ * If this is the in-service queue, check if it needs to be expired, -+ * or if we want to idle in case it has no pending requests. -+ */ -+ if (bfqd->in_service_queue == bfqq) { -+ if (bfq_bfqq_must_idle(bfqq)) { -+ if (bfqq->dispatched == 0) -+ bfq_arm_slice_timer(bfqd); -+ /* -+ * If we get here, we do not expire bfqq, even -+ * if bfqq was in budget timeout or had no -+ * more requests (as controlled in the next -+ * conditional instructions). The reason for -+ * not expiring bfqq is as follows. -+ * -+ * Here bfqq->dispatched > 0 holds, but -+ * bfq_bfqq_must_idle() returned true. This -+ * implies that, even if no request arrives -+ * for bfqq before bfqq->dispatched reaches 0, -+ * bfqq will, however, not be expired on the -+ * completion event that causes bfqq->dispatch -+ * to reach zero. In contrast, on this event, -+ * bfqq will start enjoying device idling -+ * (I/O-dispatch plugging). -+ * -+ * But, if we expired bfqq here, bfqq would -+ * not have the chance to enjoy device idling -+ * when bfqq->dispatched finally reaches -+ * zero. This would expose bfqq to violation -+ * of its reserved service guarantees. -+ */ -+ goto out; -+ } else if (bfq_may_expire_for_budg_timeout(bfqq)) -+ bfq_bfqq_expire(bfqd, bfqq, false, -+ BFQ_BFQQ_BUDGET_TIMEOUT); -+ else if (RB_EMPTY_ROOT(&bfqq->sort_list) && -+ (bfqq->dispatched == 0 || -+ !bfq_better_to_idle(bfqq))) -+ bfq_bfqq_expire(bfqd, bfqq, false, -+ BFQ_BFQQ_NO_MORE_REQUESTS); -+ } -+ -+ if (!bfqd->rq_in_driver) -+ bfq_schedule_dispatch(bfqd); -+ -+out: -+ return; -+} -+ -+static int __bfq_may_queue(struct bfq_queue *bfqq) -+{ -+ if (bfq_bfqq_wait_request(bfqq) && bfq_bfqq_must_alloc(bfqq)) { -+ bfq_clear_bfqq_must_alloc(bfqq); -+ return ELV_MQUEUE_MUST; -+ } -+ -+ return ELV_MQUEUE_MAY; -+} -+ -+static int bfq_may_queue(struct request_queue *q, unsigned int op) -+{ -+ struct bfq_data *bfqd = q->elevator->elevator_data; -+ struct task_struct *tsk = current; -+ struct bfq_io_cq *bic; -+ struct bfq_queue *bfqq; -+ -+ /* -+ * Don't force setup of a queue from here, as a call to may_queue -+ * does not necessarily imply that a request actually will be -+ * queued. So just lookup a possibly existing queue, or return -+ * 'may queue' if that fails. -+ */ -+ bic = bfq_bic_lookup(bfqd, tsk->io_context); -+ if (!bic) -+ return ELV_MQUEUE_MAY; -+ -+ bfqq = bic_to_bfqq(bic, op_is_sync(op)); -+ if (bfqq) -+ return __bfq_may_queue(bfqq); -+ -+ return ELV_MQUEUE_MAY; -+} -+ -+/* -+ * Queue lock held here. -+ */ -+static void bfq_put_request(struct request *rq) -+{ -+ struct bfq_queue *bfqq = RQ_BFQQ(rq); -+ -+ if (bfqq) { -+ const int rw = rq_data_dir(rq); -+ -+ BUG_ON(!bfqq->allocated[rw]); -+ bfqq->allocated[rw]--; -+ -+ rq->elv.priv[0] = NULL; -+ rq->elv.priv[1] = NULL; -+ -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "%p, %d", -+ bfqq, bfqq->ref); -+ bfq_put_queue(bfqq); -+ } -+} -+ -+/* -+ * Returns NULL if a new bfqq should be allocated, or the old bfqq if this -+ * was the last process referring to that bfqq. -+ */ -+static struct bfq_queue * -+bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq) -+{ -+ bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue"); -+ -+ put_io_context(bic->icq.ioc); -+ -+ if (bfqq_process_refs(bfqq) == 1) { -+ bfqq->pid = current->pid; -+ bfq_clear_bfqq_coop(bfqq); -+ bfq_clear_bfqq_split_coop(bfqq); -+ return bfqq; -+ } -+ -+ bic_set_bfqq(bic, NULL, 1); -+ -+ bfq_put_cooperator(bfqq); -+ -+ bfq_put_queue(bfqq); -+ return NULL; -+} -+ -+/* -+ * Allocate bfq data structures associated with this request. -+ */ -+static int bfq_set_request(struct request_queue *q, struct request *rq, -+ struct bio *bio, gfp_t gfp_mask) -+{ -+ struct bfq_data *bfqd = q->elevator->elevator_data; -+ struct bfq_io_cq *bic = icq_to_bic(rq->elv.icq); -+ const int rw = rq_data_dir(rq); -+ const int is_sync = rq_is_sync(rq); -+ struct bfq_queue *bfqq; -+ unsigned long flags; -+ bool bfqq_already_existing = false, split = false; -+ -+ spin_lock_irqsave(q->queue_lock, flags); -+ -+ if (!bic) -+ goto queue_fail; -+ -+ bfq_check_ioprio_change(bic, bio); -+ -+ bfq_bic_update_cgroup(bic, bio); -+ -+new_queue: -+ bfqq = bic_to_bfqq(bic, is_sync); -+ if (!bfqq || bfqq == &bfqd->oom_bfqq) { -+ if (bfqq) -+ bfq_put_queue(bfqq); -+ bfqq = bfq_get_queue(bfqd, bio, is_sync, bic); -+ BUG_ON(!hlist_unhashed(&bfqq->burst_list_node)); -+ -+ bic_set_bfqq(bic, bfqq, is_sync); -+ if (split && is_sync) { -+ bfq_log_bfqq(bfqd, bfqq, -+ "was_in_list %d " -+ "was_in_large_burst %d " -+ "large burst in progress %d", -+ bic->was_in_burst_list, -+ bic->saved_in_large_burst, -+ bfqd->large_burst); -+ -+ if ((bic->was_in_burst_list && bfqd->large_burst) || -+ bic->saved_in_large_burst) { -+ bfq_log_bfqq(bfqd, bfqq, -+ "marking in " -+ "large burst"); -+ bfq_mark_bfqq_in_large_burst(bfqq); -+ } else { -+ bfq_log_bfqq(bfqd, bfqq, -+ "clearing in " -+ "large burst"); -+ bfq_clear_bfqq_in_large_burst(bfqq); -+ if (bic->was_in_burst_list) -+ /* -+ * If bfqq was in the current -+ * burst list before being -+ * merged, then we have to add -+ * it back. And we do not need -+ * to increase burst_size, as -+ * we did not decrement -+ * burst_size when we removed -+ * bfqq from the burst list as -+ * a consequence of a merge -+ * (see comments in -+ * bfq_put_queue). In this -+ * respect, it would be rather -+ * costly to know whether the -+ * current burst list is still -+ * the same burst list from -+ * which bfqq was removed on -+ * the merge. To avoid this -+ * cost, if bfqq was in a -+ * burst list, then we add -+ * bfqq to the current burst -+ * list without any further -+ * check. This can cause -+ * inappropriate insertions, -+ * but rarely enough to not -+ * harm the detection of large -+ * bursts significantly. -+ */ -+ hlist_add_head(&bfqq->burst_list_node, -+ &bfqd->burst_list); -+ } -+ bfqq->split_time = jiffies; -+ } -+ } else { -+ /* If the queue was seeky for too long, break it apart. */ -+ if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) { -+ bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq"); -+ -+ /* Update bic before losing reference to bfqq */ -+ if (bfq_bfqq_in_large_burst(bfqq)) -+ bic->saved_in_large_burst = true; -+ -+ bfqq = bfq_split_bfqq(bic, bfqq); -+ split = true; -+ if (!bfqq) -+ goto new_queue; -+ else -+ bfqq_already_existing = true; -+ } -+ } -+ -+ bfqq->allocated[rw]++; -+ bfqq->ref++; -+ bfq_log_bfqq(bfqd, bfqq, "bfqq %p, %d", bfqq, bfqq->ref); -+ -+ rq->elv.priv[0] = bic; -+ rq->elv.priv[1] = bfqq; -+ -+ /* -+ * If a bfq_queue has only one process reference, it is owned -+ * by only one bfq_io_cq: we can set the bic field of the -+ * bfq_queue to the address of that structure. Also, if the -+ * queue has just been split, mark a flag so that the -+ * information is available to the other scheduler hooks. -+ */ -+ if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) { -+ bfqq->bic = bic; -+ if (split) { -+ /* -+ * If the queue has just been split from a shared -+ * queue, restore the idle window and the possible -+ * weight raising period. -+ */ -+ bfq_bfqq_resume_state(bfqq, bfqd, bic, -+ bfqq_already_existing); -+ } -+ } -+ -+ if (unlikely(bfq_bfqq_just_created(bfqq))) -+ bfq_handle_burst(bfqd, bfqq); -+ -+ spin_unlock_irqrestore(q->queue_lock, flags); -+ -+ return 0; -+ -+queue_fail: -+ bfq_schedule_dispatch(bfqd); -+ spin_unlock_irqrestore(q->queue_lock, flags); -+ -+ return 1; -+} -+ -+static void bfq_kick_queue(struct work_struct *work) -+{ -+ struct bfq_data *bfqd = -+ container_of(work, struct bfq_data, unplug_work); -+ struct request_queue *q = bfqd->queue; -+ -+ spin_lock_irq(q->queue_lock); -+ __blk_run_queue(q); -+ spin_unlock_irq(q->queue_lock); -+} -+ -+/* -+ * Handler of the expiration of the timer running if the in-service queue -+ * is idling inside its time slice. -+ */ -+static enum hrtimer_restart bfq_idle_slice_timer(struct hrtimer *timer) -+{ -+ struct bfq_data *bfqd = container_of(timer, struct bfq_data, -+ idle_slice_timer); -+ struct bfq_queue *bfqq; -+ unsigned long flags; -+ enum bfqq_expiration reason; -+ -+ spin_lock_irqsave(bfqd->queue->queue_lock, flags); -+ -+ bfqq = bfqd->in_service_queue; -+ /* -+ * Theoretical race here: the in-service queue can be NULL or -+ * different from the queue that was idling if the timer handler -+ * spins on the queue_lock and a new request arrives for the -+ * current queue and there is a full dispatch cycle that changes -+ * the in-service queue. This can hardly happen, but in the worst -+ * case we just expire a queue too early. -+ */ -+ if (bfqq) { -+ bfq_log_bfqq(bfqd, bfqq, "expired"); -+ bfq_clear_bfqq_wait_request(bfqq); -+ -+ if (bfq_bfqq_budget_timeout(bfqq)) -+ /* -+ * Also here the queue can be safely expired -+ * for budget timeout without wasting -+ * guarantees -+ */ -+ reason = BFQ_BFQQ_BUDGET_TIMEOUT; -+ else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0) -+ /* -+ * The queue may not be empty upon timer expiration, -+ * because we may not disable the timer when the -+ * first request of the in-service queue arrives -+ * during disk idling. -+ */ -+ reason = BFQ_BFQQ_TOO_IDLE; -+ else -+ goto schedule_dispatch; -+ -+ bfq_bfqq_expire(bfqd, bfqq, true, reason); -+ } -+ -+schedule_dispatch: -+ bfq_schedule_dispatch(bfqd); -+ -+ spin_unlock_irqrestore(bfqd->queue->queue_lock, flags); -+ return HRTIMER_NORESTART; -+} -+ -+static void bfq_shutdown_timer_wq(struct bfq_data *bfqd) -+{ -+ hrtimer_cancel(&bfqd->idle_slice_timer); -+ cancel_work_sync(&bfqd->unplug_work); -+} -+ -+static void __bfq_put_async_bfqq(struct bfq_data *bfqd, -+ struct bfq_queue **bfqq_ptr) -+{ -+ struct bfq_group *root_group = bfqd->root_group; -+ struct bfq_queue *bfqq = *bfqq_ptr; -+ -+ bfq_log(bfqd, "%p", bfqq); -+ if (bfqq) { -+ bfq_bfqq_move(bfqd, bfqq, root_group); -+ bfq_log_bfqq(bfqd, bfqq, "putting %p, %d", -+ bfqq, bfqq->ref); -+ bfq_put_queue(bfqq); -+ *bfqq_ptr = NULL; -+ } -+} -+ -+/* -+ * Release all the bfqg references to its async queues. If we are -+ * deallocating the group these queues may still contain requests, so -+ * we reparent them to the root cgroup (i.e., the only one that will -+ * exist for sure until all the requests on a device are gone). -+ */ -+static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg) -+{ -+ int i, j; -+ -+ for (i = 0; i < 2; i++) -+ for (j = 0; j < IOPRIO_BE_NR; j++) -+ __bfq_put_async_bfqq(bfqd, &bfqg->async_bfqq[i][j]); -+ -+ __bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq); -+} -+ -+static void bfq_exit_queue(struct elevator_queue *e) -+{ -+ struct bfq_data *bfqd = e->elevator_data; -+ struct request_queue *q = bfqd->queue; -+ struct bfq_queue *bfqq, *n; -+ -+ bfq_shutdown_timer_wq(bfqd); -+ -+ spin_lock_irq(q->queue_lock); -+ -+ BUG_ON(bfqd->in_service_queue); -+ list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list) -+ bfq_deactivate_bfqq(bfqd, bfqq, false, false); -+ -+ spin_unlock_irq(q->queue_lock); -+ -+ bfq_shutdown_timer_wq(bfqd); -+ -+ BUG_ON(hrtimer_active(&bfqd->idle_slice_timer)); -+ -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ /* release oom-queue reference to root group */ -+ bfqg_put(bfqd->root_group); -+ -+ blkcg_deactivate_policy(q, &blkcg_policy_bfq); -+#else -+ bfq_put_async_queues(bfqd, bfqd->root_group); -+ kfree(bfqd->root_group); -+#endif -+ -+ kfree(bfqd); -+} -+ -+static void bfq_init_root_group(struct bfq_group *root_group, -+ struct bfq_data *bfqd) -+{ -+ int i; -+ -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ root_group->entity.parent = NULL; -+ root_group->my_entity = NULL; -+ root_group->bfqd = bfqd; -+#endif -+ root_group->rq_pos_tree = RB_ROOT; -+ for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) -+ root_group->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT; -+ root_group->sched_data.bfq_class_idle_last_service = jiffies; -+} -+ -+static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) -+{ -+ struct bfq_data *bfqd; -+ struct elevator_queue *eq; -+ -+ eq = elevator_alloc(q, e); -+ if (!eq) -+ return -ENOMEM; -+ -+ bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node); -+ if (!bfqd) { -+ kobject_put(&eq->kobj); -+ return -ENOMEM; -+ } -+ eq->elevator_data = bfqd; -+ -+ /* -+ * Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues. -+ * Grab a permanent reference to it, so that the normal code flow -+ * will not attempt to free it. -+ */ -+ bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, NULL, 1, 0); -+ bfqd->oom_bfqq.ref++; -+ bfqd->oom_bfqq.new_ioprio = BFQ_DEFAULT_QUEUE_IOPRIO; -+ bfqd->oom_bfqq.new_ioprio_class = IOPRIO_CLASS_BE; -+ bfqd->oom_bfqq.entity.new_weight = -+ bfq_ioprio_to_weight(bfqd->oom_bfqq.new_ioprio); -+ -+ /* oom_bfqq does not participate to bursts */ -+ bfq_clear_bfqq_just_created(&bfqd->oom_bfqq); -+ /* -+ * Trigger weight initialization, according to ioprio, at the -+ * oom_bfqq's first activation. The oom_bfqq's ioprio and ioprio -+ * class won't be changed any more. -+ */ -+ bfqd->oom_bfqq.entity.prio_changed = 1; -+ -+ bfqd->queue = q; -+ -+ spin_lock_irq(q->queue_lock); -+ q->elevator = eq; -+ spin_unlock_irq(q->queue_lock); -+ -+ bfqd->root_group = bfq_create_group_hierarchy(bfqd, q->node); -+ if (!bfqd->root_group) -+ goto out_free; -+ bfq_init_root_group(bfqd->root_group, bfqd); -+ bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group); -+ -+ hrtimer_init(&bfqd->idle_slice_timer, CLOCK_MONOTONIC, -+ HRTIMER_MODE_REL); -+ bfqd->idle_slice_timer.function = bfq_idle_slice_timer; -+ -+ bfqd->queue_weights_tree = RB_ROOT; -+ bfqd->num_groups_with_pending_reqs = 0; -+ -+ INIT_WORK(&bfqd->unplug_work, bfq_kick_queue); -+ -+ INIT_LIST_HEAD(&bfqd->active_list); -+ INIT_LIST_HEAD(&bfqd->idle_list); -+ INIT_HLIST_HEAD(&bfqd->burst_list); -+ -+ bfqd->hw_tag = -1; -+ -+ bfqd->bfq_max_budget = bfq_default_max_budget; -+ -+ bfqd->bfq_fifo_expire[0] = bfq_fifo_expire[0]; -+ bfqd->bfq_fifo_expire[1] = bfq_fifo_expire[1]; -+ bfqd->bfq_back_max = bfq_back_max; -+ bfqd->bfq_back_penalty = bfq_back_penalty; -+ bfqd->bfq_slice_idle = bfq_slice_idle; -+ bfqd->bfq_timeout = bfq_timeout; -+ -+ bfqd->bfq_requests_within_timer = 120; -+ -+ bfqd->bfq_large_burst_thresh = 8; -+ bfqd->bfq_burst_interval = msecs_to_jiffies(180); -+ -+ bfqd->low_latency = true; -+ -+ /* -+ * Trade-off between responsiveness and fairness. -+ */ -+ bfqd->bfq_wr_coeff = 30; -+ bfqd->bfq_wr_rt_max_time = msecs_to_jiffies(300); -+ bfqd->bfq_wr_max_time = 0; -+ bfqd->bfq_wr_min_idle_time = msecs_to_jiffies(2000); -+ bfqd->bfq_wr_min_inter_arr_async = msecs_to_jiffies(500); -+ bfqd->bfq_wr_max_softrt_rate = 7000; /* -+ * Approximate rate required -+ * to playback or record a -+ * high-definition compressed -+ * video. -+ */ -+ bfqd->wr_busy_queues = 0; -+ -+ /* -+ * Begin by assuming, optimistically, that the device peak -+ * rate is equal to 2/3 of the highest reference rate. -+ */ -+ bfqd->rate_dur_prod = ref_rate[blk_queue_nonrot(bfqd->queue)] * -+ ref_wr_duration[blk_queue_nonrot(bfqd->queue)]; -+ bfqd->peak_rate = ref_rate[blk_queue_nonrot(bfqd->queue)] * 2 / 3; -+ -+ return 0; -+ -+out_free: -+ kfree(bfqd); -+ kobject_put(&eq->kobj); -+ return -ENOMEM; -+} -+ -+static void bfq_registered_queue(struct request_queue *q) -+{ -+ wbt_disable_default(q); -+} -+ -+static void bfq_slab_kill(void) -+{ -+ kmem_cache_destroy(bfq_pool); -+} -+ -+static int __init bfq_slab_setup(void) -+{ -+ bfq_pool = KMEM_CACHE(bfq_queue, 0); -+ if (!bfq_pool) -+ return -ENOMEM; -+ return 0; -+} -+ -+static ssize_t bfq_var_show(unsigned int var, char *page) -+{ -+ return sprintf(page, "%u\n", var); -+} -+ -+static ssize_t bfq_var_store(unsigned long *var, const char *page, -+ size_t count) -+{ -+ unsigned long new_val; -+ int ret = kstrtoul(page, 10, &new_val); -+ -+ if (ret == 0) -+ *var = new_val; -+ -+ return count; -+} -+ -+static ssize_t bfq_wr_max_time_show(struct elevator_queue *e, char *page) -+{ -+ struct bfq_data *bfqd = e->elevator_data; -+ -+ return sprintf(page, "%d\n", bfqd->bfq_wr_max_time > 0 ? -+ jiffies_to_msecs(bfqd->bfq_wr_max_time) : -+ jiffies_to_msecs(bfq_wr_duration(bfqd))); -+} -+ -+static ssize_t bfq_weights_show(struct elevator_queue *e, char *page) -+{ -+ struct bfq_queue *bfqq; -+ struct bfq_data *bfqd = e->elevator_data; -+ ssize_t num_char = 0; -+ -+ num_char += sprintf(page + num_char, "Tot reqs queued %d\n\n", -+ bfqd->queued); -+ -+ spin_lock_irq(bfqd->queue->queue_lock); -+ -+ num_char += sprintf(page + num_char, "Active:\n"); -+ list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list) { -+ num_char += sprintf(page + num_char, -+ "pid%d: weight %hu, nr_queued %d %d, ", -+ bfqq->pid, -+ bfqq->entity.weight, -+ bfqq->queued[0], -+ bfqq->queued[1]); -+ num_char += sprintf(page + num_char, -+ "dur %d/%u\n", -+ jiffies_to_msecs( -+ jiffies - -+ bfqq->last_wr_start_finish), -+ jiffies_to_msecs(bfqq->wr_cur_max_time)); -+ } -+ -+ num_char += sprintf(page + num_char, "Idle:\n"); -+ list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list) { -+ num_char += sprintf(page + num_char, -+ "pid%d: weight %hu, dur %d/%u\n", -+ bfqq->pid, -+ bfqq->entity.weight, -+ jiffies_to_msecs(jiffies - -+ bfqq->last_wr_start_finish), -+ jiffies_to_msecs(bfqq->wr_cur_max_time)); -+ } -+ -+ spin_unlock_irq(bfqd->queue->queue_lock); -+ -+ return num_char; -+} -+ -+#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ -+static ssize_t __FUNC(struct elevator_queue *e, char *page) \ -+{ \ -+ struct bfq_data *bfqd = e->elevator_data; \ -+ u64 __data = __VAR; \ -+ if (__CONV == 1) \ -+ __data = jiffies_to_msecs(__data); \ -+ else if (__CONV == 2) \ -+ __data = div_u64(__data, NSEC_PER_MSEC); \ -+ return bfq_var_show(__data, (page)); \ -+} -+SHOW_FUNCTION(bfq_fifo_expire_sync_show, bfqd->bfq_fifo_expire[1], 2); -+SHOW_FUNCTION(bfq_fifo_expire_async_show, bfqd->bfq_fifo_expire[0], 2); -+SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0); -+SHOW_FUNCTION(bfq_back_seek_penalty_show, bfqd->bfq_back_penalty, 0); -+SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 2); -+SHOW_FUNCTION(bfq_max_budget_show, bfqd->bfq_user_max_budget, 0); -+SHOW_FUNCTION(bfq_timeout_sync_show, bfqd->bfq_timeout, 1); -+SHOW_FUNCTION(bfq_strict_guarantees_show, bfqd->strict_guarantees, 0); -+SHOW_FUNCTION(bfq_low_latency_show, bfqd->low_latency, 0); -+SHOW_FUNCTION(bfq_wr_coeff_show, bfqd->bfq_wr_coeff, 0); -+SHOW_FUNCTION(bfq_wr_rt_max_time_show, bfqd->bfq_wr_rt_max_time, 1); -+SHOW_FUNCTION(bfq_wr_min_idle_time_show, bfqd->bfq_wr_min_idle_time, 1); -+SHOW_FUNCTION(bfq_wr_min_inter_arr_async_show, bfqd->bfq_wr_min_inter_arr_async, -+ 1); -+SHOW_FUNCTION(bfq_wr_max_softrt_rate_show, bfqd->bfq_wr_max_softrt_rate, 0); -+#undef SHOW_FUNCTION -+ -+#define USEC_SHOW_FUNCTION(__FUNC, __VAR) \ -+static ssize_t __FUNC(struct elevator_queue *e, char *page) \ -+{ \ -+ struct bfq_data *bfqd = e->elevator_data; \ -+ u64 __data = __VAR; \ -+ __data = div_u64(__data, NSEC_PER_USEC); \ -+ return bfq_var_show(__data, (page)); \ -+} -+USEC_SHOW_FUNCTION(bfq_slice_idle_us_show, bfqd->bfq_slice_idle); -+#undef USEC_SHOW_FUNCTION -+ -+#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ -+static ssize_t \ -+__FUNC(struct elevator_queue *e, const char *page, size_t count) \ -+{ \ -+ struct bfq_data *bfqd = e->elevator_data; \ -+ unsigned long uninitialized_var(__data); \ -+ int ret = bfq_var_store(&__data, (page), count); \ -+ if (__data < (MIN)) \ -+ __data = (MIN); \ -+ else if (__data > (MAX)) \ -+ __data = (MAX); \ -+ if (__CONV == 1) \ -+ *(__PTR) = msecs_to_jiffies(__data); \ -+ else if (__CONV == 2) \ -+ *(__PTR) = (u64)__data * NSEC_PER_MSEC; \ -+ else \ -+ *(__PTR) = __data; \ -+ return ret; \ -+} -+STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1, -+ INT_MAX, 2); -+STORE_FUNCTION(bfq_fifo_expire_async_store, &bfqd->bfq_fifo_expire[0], 1, -+ INT_MAX, 2); -+STORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0); -+STORE_FUNCTION(bfq_back_seek_penalty_store, &bfqd->bfq_back_penalty, 1, -+ INT_MAX, 0); -+STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 2); -+STORE_FUNCTION(bfq_wr_coeff_store, &bfqd->bfq_wr_coeff, 1, INT_MAX, 0); -+STORE_FUNCTION(bfq_wr_max_time_store, &bfqd->bfq_wr_max_time, 0, INT_MAX, 1); -+STORE_FUNCTION(bfq_wr_rt_max_time_store, &bfqd->bfq_wr_rt_max_time, 0, INT_MAX, -+ 1); -+STORE_FUNCTION(bfq_wr_min_idle_time_store, &bfqd->bfq_wr_min_idle_time, 0, -+ INT_MAX, 1); -+STORE_FUNCTION(bfq_wr_min_inter_arr_async_store, -+ &bfqd->bfq_wr_min_inter_arr_async, 0, INT_MAX, 1); -+STORE_FUNCTION(bfq_wr_max_softrt_rate_store, &bfqd->bfq_wr_max_softrt_rate, 0, -+ INT_MAX, 0); -+#undef STORE_FUNCTION -+ -+#define USEC_STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \ -+static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)\ -+{ \ -+ struct bfq_data *bfqd = e->elevator_data; \ -+ unsigned long uninitialized_var(__data); \ -+ int ret = bfq_var_store(&__data, (page), count); \ -+ if (__data < (MIN)) \ -+ __data = (MIN); \ -+ else if (__data > (MAX)) \ -+ __data = (MAX); \ -+ *(__PTR) = (u64)__data * NSEC_PER_USEC; \ -+ return ret; \ -+} -+USEC_STORE_FUNCTION(bfq_slice_idle_us_store, &bfqd->bfq_slice_idle, 0, -+ UINT_MAX); -+#undef USEC_STORE_FUNCTION -+ -+/* do nothing for the moment */ -+static ssize_t bfq_weights_store(struct elevator_queue *e, -+ const char *page, size_t count) -+{ -+ return count; -+} -+ -+static ssize_t bfq_max_budget_store(struct elevator_queue *e, -+ const char *page, size_t count) -+{ -+ struct bfq_data *bfqd = e->elevator_data; -+ unsigned long uninitialized_var(__data); -+ int ret = bfq_var_store(&__data, (page), count); -+ -+ if (__data == 0) -+ bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd); -+ else { -+ if (__data > INT_MAX) -+ __data = INT_MAX; -+ bfqd->bfq_max_budget = __data; -+ } -+ -+ bfqd->bfq_user_max_budget = __data; -+ -+ return ret; -+} -+ -+/* -+ * Leaving this name to preserve name compatibility with cfq -+ * parameters, but this timeout is used for both sync and async. -+ */ -+static ssize_t bfq_timeout_sync_store(struct elevator_queue *e, -+ const char *page, size_t count) -+{ -+ struct bfq_data *bfqd = e->elevator_data; -+ unsigned long uninitialized_var(__data); -+ int ret = bfq_var_store(&__data, (page), count); -+ -+ if (__data < 1) -+ __data = 1; -+ else if (__data > INT_MAX) -+ __data = INT_MAX; -+ -+ bfqd->bfq_timeout = msecs_to_jiffies(__data); -+ if (bfqd->bfq_user_max_budget == 0) -+ bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd); -+ -+ return ret; -+} -+ -+static ssize_t bfq_strict_guarantees_store(struct elevator_queue *e, -+ const char *page, size_t count) -+{ -+ struct bfq_data *bfqd = e->elevator_data; -+ unsigned long uninitialized_var(__data); -+ int ret = bfq_var_store(&__data, (page), count); -+ -+ if (__data > 1) -+ __data = 1; -+ if (!bfqd->strict_guarantees && __data == 1 -+ && bfqd->bfq_slice_idle < 8 * NSEC_PER_MSEC) -+ bfqd->bfq_slice_idle = 8 * NSEC_PER_MSEC; -+ -+ bfqd->strict_guarantees = __data; -+ -+ return ret; -+} -+ -+static ssize_t bfq_low_latency_store(struct elevator_queue *e, -+ const char *page, size_t count) -+{ -+ struct bfq_data *bfqd = e->elevator_data; -+ unsigned long uninitialized_var(__data); -+ int ret = bfq_var_store(&__data, (page), count); -+ -+ if (__data > 1) -+ __data = 1; -+ if (__data == 0 && bfqd->low_latency != 0) -+ bfq_end_wr(bfqd); -+ bfqd->low_latency = __data; -+ -+ return ret; -+} -+ -+#define BFQ_ATTR(name) \ -+ __ATTR(name, S_IRUGO|S_IWUSR, bfq_##name##_show, bfq_##name##_store) -+ -+static struct elv_fs_entry bfq_attrs[] = { -+ BFQ_ATTR(fifo_expire_sync), -+ BFQ_ATTR(fifo_expire_async), -+ BFQ_ATTR(back_seek_max), -+ BFQ_ATTR(back_seek_penalty), -+ BFQ_ATTR(slice_idle), -+ BFQ_ATTR(slice_idle_us), -+ BFQ_ATTR(max_budget), -+ BFQ_ATTR(timeout_sync), -+ BFQ_ATTR(strict_guarantees), -+ BFQ_ATTR(low_latency), -+ BFQ_ATTR(wr_coeff), -+ BFQ_ATTR(wr_max_time), -+ BFQ_ATTR(wr_rt_max_time), -+ BFQ_ATTR(wr_min_idle_time), -+ BFQ_ATTR(wr_min_inter_arr_async), -+ BFQ_ATTR(wr_max_softrt_rate), -+ BFQ_ATTR(weights), -+ __ATTR_NULL -+}; -+ -+static struct elevator_type iosched_bfq = { -+ .ops.sq = { -+ .elevator_merge_fn = bfq_merge, -+ .elevator_merged_fn = bfq_merged_request, -+ .elevator_merge_req_fn = bfq_merged_requests, -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ .elevator_bio_merged_fn = bfq_bio_merged, -+#endif -+ .elevator_allow_bio_merge_fn = bfq_allow_bio_merge, -+ .elevator_allow_rq_merge_fn = bfq_allow_rq_merge, -+ .elevator_dispatch_fn = bfq_dispatch_requests, -+ .elevator_add_req_fn = bfq_insert_request, -+ .elevator_activate_req_fn = bfq_activate_request, -+ .elevator_deactivate_req_fn = bfq_deactivate_request, -+ .elevator_completed_req_fn = bfq_completed_request, -+ .elevator_former_req_fn = elv_rb_former_request, -+ .elevator_latter_req_fn = elv_rb_latter_request, -+ .elevator_init_icq_fn = bfq_init_icq, -+ .elevator_exit_icq_fn = bfq_exit_icq, -+ .elevator_set_req_fn = bfq_set_request, -+ .elevator_put_req_fn = bfq_put_request, -+ .elevator_may_queue_fn = bfq_may_queue, -+ .elevator_init_fn = bfq_init_queue, -+ .elevator_exit_fn = bfq_exit_queue, -+ .elevator_registered_fn = bfq_registered_queue, -+ }, -+ .icq_size = sizeof(struct bfq_io_cq), -+ .icq_align = __alignof__(struct bfq_io_cq), -+ .elevator_attrs = bfq_attrs, -+ .elevator_name = "bfq-sq", -+ .elevator_owner = THIS_MODULE, -+}; -+ -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+static struct blkcg_policy blkcg_policy_bfq = { -+ .dfl_cftypes = bfq_blkg_files, -+ .legacy_cftypes = bfq_blkcg_legacy_files, -+ -+ .cpd_alloc_fn = bfq_cpd_alloc, -+ .cpd_init_fn = bfq_cpd_init, -+ .cpd_bind_fn = bfq_cpd_init, -+ .cpd_free_fn = bfq_cpd_free, -+ -+ .pd_alloc_fn = bfq_pd_alloc, -+ .pd_init_fn = bfq_pd_init, -+ .pd_offline_fn = bfq_pd_offline, -+ .pd_free_fn = bfq_pd_free, -+ .pd_reset_stats_fn = bfq_pd_reset_stats, -+}; -+#endif -+ -+static int __init bfq_init(void) -+{ -+ int ret; -+ char msg[60] = "BFQ I/O-scheduler: v9"; -+ -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ ret = blkcg_policy_register(&blkcg_policy_bfq); -+ if (ret) -+ return ret; -+#endif -+ -+ ret = -ENOMEM; -+ if (bfq_slab_setup()) -+ goto err_pol_unreg; -+ -+ /* -+ * Times to load large popular applications for the typical -+ * systems installed on the reference devices (see the -+ * comments before the definition of the next -+ * array). Actually, we use slightly lower values, as the -+ * estimated peak rate tends to be smaller than the actual -+ * peak rate. The reason for this last fact is that estimates -+ * are computed over much shorter time intervals than the long -+ * intervals typically used for benchmarking. Why? First, to -+ * adapt more quickly to variations. Second, because an I/O -+ * scheduler cannot rely on a peak-rate-evaluation workload to -+ * be run for a long time. -+ */ -+ ref_wr_duration[0] = msecs_to_jiffies(7000); /* actually 8 sec */ -+ ref_wr_duration[1] = msecs_to_jiffies(2500); /* actually 3 sec */ -+ -+ ret = elv_register(&iosched_bfq); -+ if (ret) -+ goto slab_kill; -+ -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ strcat(msg, " (with cgroups support)"); -+#endif -+ pr_info("%s", msg); -+ -+ return 0; -+ -+slab_kill: -+ bfq_slab_kill(); -+err_pol_unreg: -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ blkcg_policy_unregister(&blkcg_policy_bfq); -+#endif -+ return ret; -+} -+ -+static void __exit bfq_exit(void) -+{ -+ elv_unregister(&iosched_bfq); -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ blkcg_policy_unregister(&blkcg_policy_bfq); -+#endif -+ bfq_slab_kill(); -+} -+ -+module_init(bfq_init); -+module_exit(bfq_exit); -+ -+MODULE_AUTHOR("Arianna Avanzini, Fabio Checconi, Paolo Valente"); -+MODULE_LICENSE("GPL"); -diff --git a/block/bfq.h b/block/bfq.h -new file mode 100644 -index 000000000000..0177fc7205d7 ---- /dev/null -+++ b/block/bfq.h -@@ -0,0 +1,1074 @@ -+/* -+ * BFQ v9: data structures and common functions prototypes. -+ * -+ * Based on ideas and code from CFQ: -+ * Copyright (C) 2003 Jens Axboe -+ * -+ * Copyright (C) 2008 Fabio Checconi -+ * Paolo Valente -+ * -+ * Copyright (C) 2015 Paolo Valente -+ * -+ * Copyright (C) 2017 Paolo Valente -+ */ -+ -+#ifndef _BFQ_H -+#define _BFQ_H -+ -+#include -+#include -+ -+/* -+ * Define an alternative macro to compile cgroups support. This is one -+ * of the steps needed to let bfq-mq share the files bfq-sched.c and -+ * bfq-cgroup.c with bfq-sq. For bfq-mq, the macro -+ * BFQ_GROUP_IOSCHED_ENABLED will be defined as a function of whether -+ * the configuration option CONFIG_BFQ_MQ_GROUP_IOSCHED, and not -+ * CONFIG_BFQ_GROUP_IOSCHED, is defined. -+ */ -+#ifdef CONFIG_BFQ_SQ_GROUP_IOSCHED -+#define BFQ_GROUP_IOSCHED_ENABLED -+#endif -+ -+#define BFQ_IOPRIO_CLASSES 3 -+#define BFQ_CL_IDLE_TIMEOUT (HZ/5) -+ -+#define BFQ_MIN_WEIGHT 1 -+#define BFQ_MAX_WEIGHT 1000 -+#define BFQ_WEIGHT_CONVERSION_COEFF 10 -+ -+#define BFQ_DEFAULT_QUEUE_IOPRIO 4 -+ -+#define BFQ_WEIGHT_LEGACY_DFL 100 -+#define BFQ_DEFAULT_GRP_IOPRIO 0 -+#define BFQ_DEFAULT_GRP_CLASS IOPRIO_CLASS_BE -+ -+/* -+ * Soft real-time applications are extremely more latency sensitive -+ * than interactive ones. Over-raise the weight of the former to -+ * privilege them against the latter. -+ */ -+#define BFQ_SOFTRT_WEIGHT_FACTOR 100 -+ -+struct bfq_entity; -+ -+/** -+ * struct bfq_service_tree - per ioprio_class service tree. -+ * -+ * Each service tree represents a B-WF2Q+ scheduler on its own. Each -+ * ioprio_class has its own independent scheduler, and so its own -+ * bfq_service_tree. All the fields are protected by the queue lock -+ * of the containing bfqd. -+ */ -+struct bfq_service_tree { -+ /* tree for active entities (i.e., those backlogged) */ -+ struct rb_root active; -+ /* tree for idle entities (i.e., not backlogged, with V <= F_i)*/ -+ struct rb_root idle; -+ -+ struct bfq_entity *first_idle; /* idle entity with minimum F_i */ -+ struct bfq_entity *last_idle; /* idle entity with maximum F_i */ -+ -+ u64 vtime; /* scheduler virtual time */ -+ /* scheduler weight sum; active and idle entities contribute to it */ -+ unsigned long wsum; -+}; -+ -+/** -+ * struct bfq_sched_data - multi-class scheduler. -+ * -+ * bfq_sched_data is the basic scheduler queue. It supports three -+ * ioprio_classes, and can be used either as a toplevel queue or as an -+ * intermediate queue in a hierarchical setup. -+ * -+ * The supported ioprio_classes are the same as in CFQ, in descending -+ * priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE. -+ * Requests from higher priority queues are served before all the -+ * requests from lower priority queues; among requests of the same -+ * queue requests are served according to B-WF2Q+. -+ * -+ * The schedule is implemented by the service trees, plus the field -+ * @next_in_service, which points to the entity on the active trees -+ * that will be served next, if 1) no changes in the schedule occurs -+ * before the current in-service entity is expired, 2) the in-service -+ * queue becomes idle when it expires, and 3) if the entity pointed by -+ * in_service_entity is not a queue, then the in-service child entity -+ * of the entity pointed by in_service_entity becomes idle on -+ * expiration. This peculiar definition allows for the following -+ * optimization, not yet exploited: while a given entity is still in -+ * service, we already know which is the best candidate for next -+ * service among the other active entitities in the same parent -+ * entity. We can then quickly compare the timestamps of the -+ * in-service entity with those of such best candidate. -+ * -+ * All the fields are protected by the queue lock of the containing -+ * bfqd. -+ */ -+struct bfq_sched_data { -+ struct bfq_entity *in_service_entity; /* entity in service */ -+ /* head-of-the-line entity in the scheduler (see comments above) */ -+ struct bfq_entity *next_in_service; -+ /* array of service trees, one per ioprio_class */ -+ struct bfq_service_tree service_tree[BFQ_IOPRIO_CLASSES]; -+ /* last time CLASS_IDLE was served */ -+ unsigned long bfq_class_idle_last_service; -+ -+}; -+ -+/** -+ * struct bfq_weight_counter - counter of the number of all active queues -+ * with a given weight. -+ */ -+struct bfq_weight_counter { -+ unsigned int weight; /* weight of the queues this counter refers to */ -+ unsigned int num_active; /* nr of active queues with this weight */ -+ /* -+ * Weights tree member (see bfq_data's @queue_weights_tree) -+ */ -+ struct rb_node weights_node; -+}; -+ -+/** -+ * struct bfq_entity - schedulable entity. -+ * -+ * A bfq_entity is used to represent either a bfq_queue (leaf node in the -+ * cgroup hierarchy) or a bfq_group into the upper level scheduler. Each -+ * entity belongs to the sched_data of the parent group in the cgroup -+ * hierarchy. Non-leaf entities have also their own sched_data, stored -+ * in @my_sched_data. -+ * -+ * Each entity stores independently its priority values; this would -+ * allow different weights on different devices, but this -+ * functionality is not exported to userspace by now. Priorities and -+ * weights are updated lazily, first storing the new values into the -+ * new_* fields, then setting the @prio_changed flag. As soon as -+ * there is a transition in the entity state that allows the priority -+ * update to take place the effective and the requested priority -+ * values are synchronized. -+ * -+ * Unless cgroups are used, the weight value is calculated from the -+ * ioprio to export the same interface as CFQ. When dealing with -+ * ``well-behaved'' queues (i.e., queues that do not spend too much -+ * time to consume their budget and have true sequential behavior, and -+ * when there are no external factors breaking anticipation) the -+ * relative weights at each level of the cgroups hierarchy should be -+ * guaranteed. All the fields are protected by the queue lock of the -+ * containing bfqd. -+ */ -+struct bfq_entity { -+ struct rb_node rb_node; /* service_tree member */ -+ -+ /* -+ * Flag, true if the entity is on a tree (either the active or -+ * the idle one of its service_tree) or is in service. -+ */ -+ bool on_st; -+ -+ u64 finish; /* B-WF2Q+ finish timestamp (aka F_i) */ -+ u64 start; /* B-WF2Q+ start timestamp (aka S_i) */ -+ -+ /* tree the entity is enqueued into; %NULL if not on a tree */ -+ struct rb_root *tree; -+ -+ /* -+ * minimum start time of the (active) subtree rooted at this -+ * entity; used for O(log N) lookups into active trees -+ */ -+ u64 min_start; -+ -+ /* amount of service received during the last service slot */ -+ int service; -+ -+ /* budget, used also to calculate F_i: F_i = S_i + @budget / @weight */ -+ int budget; -+ -+ unsigned int weight; /* weight of the queue */ -+ unsigned int new_weight; /* next weight if a change is in progress */ -+ -+ /* original weight, used to implement weight boosting */ -+ unsigned int orig_weight; -+ -+ /* parent entity, for hierarchical scheduling */ -+ struct bfq_entity *parent; -+ -+ /* -+ * For non-leaf nodes in the hierarchy, the associated -+ * scheduler queue, %NULL on leaf nodes. -+ */ -+ struct bfq_sched_data *my_sched_data; -+ /* the scheduler queue this entity belongs to */ -+ struct bfq_sched_data *sched_data; -+ -+ /* flag, set to request a weight, ioprio or ioprio_class change */ -+ int prio_changed; -+ -+ /* flag, set if the entity is counted in groups_with_pending_reqs */ -+ bool in_groups_with_pending_reqs; -+}; -+ -+struct bfq_group; -+ -+/** -+ * struct bfq_queue - leaf schedulable entity. -+ * -+ * A bfq_queue is a leaf request queue; it can be associated with an -+ * io_context or more, if it is async or shared between cooperating -+ * processes. @cgroup holds a reference to the cgroup, to be sure that it -+ * does not disappear while a bfqq still references it (mostly to avoid -+ * races between request issuing and task migration followed by cgroup -+ * destruction). -+ * All the fields are protected by the queue lock of the containing bfqd. -+ */ -+struct bfq_queue { -+ /* reference counter */ -+ int ref; -+ /* parent bfq_data */ -+ struct bfq_data *bfqd; -+ -+ /* current ioprio and ioprio class */ -+ unsigned short ioprio, ioprio_class; -+ /* next ioprio and ioprio class if a change is in progress */ -+ unsigned short new_ioprio, new_ioprio_class; -+ -+ /* -+ * Shared bfq_queue if queue is cooperating with one or more -+ * other queues. -+ */ -+ struct bfq_queue *new_bfqq; -+ /* request-position tree member (see bfq_group's @rq_pos_tree) */ -+ struct rb_node pos_node; -+ /* request-position tree root (see bfq_group's @rq_pos_tree) */ -+ struct rb_root *pos_root; -+ -+ /* sorted list of pending requests */ -+ struct rb_root sort_list; -+ /* if fifo isn't expired, next request to serve */ -+ struct request *next_rq; -+ /* number of sync and async requests queued */ -+ int queued[2]; -+ /* number of sync and async requests currently allocated */ -+ int allocated[2]; -+ /* number of pending metadata requests */ -+ int meta_pending; -+ /* fifo list of requests in sort_list */ -+ struct list_head fifo; -+ -+ /* entity representing this queue in the scheduler */ -+ struct bfq_entity entity; -+ -+ /* pointer to the weight counter associated with this queue */ -+ struct bfq_weight_counter *weight_counter; -+ -+ /* maximum budget allowed from the feedback mechanism */ -+ int max_budget; -+ /* budget expiration (in jiffies) */ -+ unsigned long budget_timeout; -+ -+ /* number of requests on the dispatch list or inside driver */ -+ int dispatched; -+ -+ unsigned int flags; /* status flags.*/ -+ -+ /* node for active/idle bfqq list inside parent bfqd */ -+ struct list_head bfqq_list; -+ -+ /* bit vector: a 1 for each seeky requests in history */ -+ u32 seek_history; -+ -+ /* node for the device's burst list */ -+ struct hlist_node burst_list_node; -+ -+ /* position of the last request enqueued */ -+ sector_t last_request_pos; -+ -+ /* Number of consecutive pairs of request completion and -+ * arrival, such that the queue becomes idle after the -+ * completion, but the next request arrives within an idle -+ * time slice; used only if the queue's IO_bound flag has been -+ * cleared. -+ */ -+ unsigned int requests_within_timer; -+ -+ /* pid of the process owning the queue, used for logging purposes */ -+ pid_t pid; -+ -+ /* -+ * Pointer to the bfq_io_cq owning the bfq_queue, set to %NULL -+ * if the queue is shared. -+ */ -+ struct bfq_io_cq *bic; -+ -+ /* current maximum weight-raising time for this queue */ -+ unsigned long wr_cur_max_time; -+ /* -+ * Minimum time instant such that, only if a new request is -+ * enqueued after this time instant in an idle @bfq_queue with -+ * no outstanding requests, then the task associated with the -+ * queue it is deemed as soft real-time (see the comments on -+ * the function bfq_bfqq_softrt_next_start()) -+ */ -+ unsigned long soft_rt_next_start; -+ /* -+ * Start time of the current weight-raising period if -+ * the @bfq-queue is being weight-raised, otherwise -+ * finish time of the last weight-raising period. -+ */ -+ unsigned long last_wr_start_finish; -+ /* factor by which the weight of this queue is multiplied */ -+ unsigned int wr_coeff; -+ /* -+ * Time of the last transition of the @bfq_queue from idle to -+ * backlogged. -+ */ -+ unsigned long last_idle_bklogged; -+ /* -+ * Cumulative service received from the @bfq_queue since the -+ * last transition from idle to backlogged. -+ */ -+ unsigned long service_from_backlogged; -+ /* -+ * Cumulative service received from the @bfq_queue since its -+ * last transition to weight-raised state. -+ */ -+ unsigned long service_from_wr; -+ /* -+ * Value of wr start time when switching to soft rt -+ */ -+ unsigned long wr_start_at_switch_to_srt; -+ -+ unsigned long split_time; /* time of last split */ -+ -+ unsigned long first_IO_time; /* time of first I/O for this queue */ -+ -+ /* max service rate measured so far */ -+ u32 max_service_rate; -+ /* -+ * Ratio between the service received by bfqq while it is in -+ * service, and the cumulative service (of requests of other -+ * queues) that may be injected while bfqq is empty but still -+ * in service. To increase precision, the coefficient is -+ * measured in tenths of unit. Here are some example of (1) -+ * ratios, (2) resulting percentages of service injected -+ * w.r.t. to the total service dispatched while bfqq is in -+ * service, and (3) corresponding values of the coefficient: -+ * 1 (50%) -> 10 -+ * 2 (33%) -> 20 -+ * 10 (9%) -> 100 -+ * 9.9 (9%) -> 99 -+ * 1.5 (40%) -> 15 -+ * 0.5 (66%) -> 5 -+ * 0.1 (90%) -> 1 -+ * -+ * So, if the coefficient is lower than 10, then -+ * injected service is more than bfqq service. -+ */ -+ unsigned int inject_coeff; -+ /* amount of service injected in current service slot */ -+ unsigned int injected_service; -+}; -+ -+/** -+ * struct bfq_ttime - per process thinktime stats. -+ */ -+struct bfq_ttime { -+ u64 last_end_request; /* completion time of last request */ -+ -+ u64 ttime_total; /* total process thinktime */ -+ unsigned long ttime_samples; /* number of thinktime samples */ -+ u64 ttime_mean; /* average process thinktime */ -+ -+}; -+ -+/** -+ * struct bfq_io_cq - per (request_queue, io_context) structure. -+ */ -+struct bfq_io_cq { -+ /* associated io_cq structure */ -+ struct io_cq icq; /* must be the first member */ -+ /* array of two process queues, the sync and the async */ -+ struct bfq_queue *bfqq[2]; -+ /* associated @bfq_ttime struct */ -+ struct bfq_ttime ttime; -+ /* per (request_queue, blkcg) ioprio */ -+ int ioprio; -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ uint64_t blkcg_serial_nr; /* the current blkcg serial */ -+#endif -+ -+ /* -+ * Snapshot of the has_short_time flag before merging; taken -+ * to remember its value while the queue is merged, so as to -+ * be able to restore it in case of split. -+ */ -+ bool saved_has_short_ttime; -+ /* -+ * Same purpose as the previous two fields for the I/O bound -+ * classification of a queue. -+ */ -+ bool saved_IO_bound; -+ -+ /* -+ * Same purpose as the previous fields for the value of the -+ * field keeping the queue's belonging to a large burst -+ */ -+ bool saved_in_large_burst; -+ /* -+ * True if the queue belonged to a burst list before its merge -+ * with another cooperating queue. -+ */ -+ bool was_in_burst_list; -+ -+ /* -+ * Similar to previous fields: save wr information. -+ */ -+ unsigned long saved_wr_coeff; -+ unsigned long saved_last_wr_start_finish; -+ unsigned long saved_wr_start_at_switch_to_srt; -+ unsigned int saved_wr_cur_max_time; -+}; -+ -+/** -+ * struct bfq_data - per-device data structure. -+ * -+ * All the fields are protected by the @queue lock. -+ */ -+struct bfq_data { -+ /* request queue for the device */ -+ struct request_queue *queue; -+ -+ /* root bfq_group for the device */ -+ struct bfq_group *root_group; -+ -+ /* -+ * rbtree of weight counters of @bfq_queues, sorted by -+ * weight. Used to keep track of whether all @bfq_queues have -+ * the same weight. The tree contains one counter for each -+ * distinct weight associated to some active and not -+ * weight-raised @bfq_queue (see the comments to the functions -+ * bfq_weights_tree_[add|remove] for further details). -+ */ -+ struct rb_root queue_weights_tree; -+ -+ /* -+ * Number of groups with at least one descendant process that -+ * has at least one request waiting for completion. Note that -+ * this accounts for also requests already dispatched, but not -+ * yet completed. Therefore this number of groups may differ -+ * (be larger) than the number of active groups, as a group is -+ * considered active only if its corresponding entity has -+ * descendant queues with at least one request queued. This -+ * number is used to decide whether a scenario is symmetric. -+ * For a detailed explanation see comments on the computation -+ * of the variable asymmetric_scenario in the function -+ * bfq_better_to_idle(). -+ * -+ * However, it is hard to compute this number exactly, for -+ * groups with multiple descendant processes. Consider a group -+ * that is inactive, i.e., that has no descendant process with -+ * pending I/O inside BFQ queues. Then suppose that -+ * num_groups_with_pending_reqs is still accounting for this -+ * group, because the group has descendant processes with some -+ * I/O request still in flight. num_groups_with_pending_reqs -+ * should be decremented when the in-flight request of the -+ * last descendant process is finally completed (assuming that -+ * nothing else has changed for the group in the meantime, in -+ * terms of composition of the group and active/inactive state of child -+ * groups and processes). To accomplish this, an additional -+ * pending-request counter must be added to entities, and must -+ * be updated correctly. To avoid this additional field and operations, -+ * we resort to the following tradeoff between simplicity and -+ * accuracy: for an inactive group that is still counted in -+ * num_groups_with_pending_reqs, we decrement -+ * num_groups_with_pending_reqs when the first descendant -+ * process of the group remains with no request waiting for -+ * completion. -+ * -+ * Even this simpler decrement strategy requires a little -+ * carefulness: to avoid multiple decrements, we flag a group, -+ * more precisely an entity representing a group, as still -+ * counted in num_groups_with_pending_reqs when it becomes -+ * inactive. Then, when the first descendant queue of the -+ * entity remains with no request waiting for completion, -+ * num_groups_with_pending_reqs is decremented, and this flag -+ * is reset. After this flag is reset for the entity, -+ * num_groups_with_pending_reqs won't be decremented any -+ * longer in case a new descendant queue of the entity remains -+ * with no request waiting for completion. -+ */ -+ unsigned int num_groups_with_pending_reqs; -+ -+ /* -+ * Per-class (RT, BE, IDLE) number of bfq_queues containing -+ * requests (including the queue in service, even if it is -+ * idling). -+ */ -+ unsigned int busy_queues[3]; -+ /* number of weight-raised busy @bfq_queues */ -+ int wr_busy_queues; -+ /* number of queued requests */ -+ int queued; -+ /* number of requests dispatched and waiting for completion */ -+ int rq_in_driver; -+ -+ /* -+ * Maximum number of requests in driver in the last -+ * @hw_tag_samples completed requests. -+ */ -+ int max_rq_in_driver; -+ /* number of samples used to calculate hw_tag */ -+ int hw_tag_samples; -+ /* flag set to one if the driver is showing a queueing behavior */ -+ int hw_tag; -+ -+ /* number of budgets assigned */ -+ int budgets_assigned; -+ -+ /* -+ * Timer set when idling (waiting) for the next request from -+ * the queue in service. -+ */ -+ struct hrtimer idle_slice_timer; -+ /* delayed work to restart dispatching on the request queue */ -+ struct work_struct unplug_work; -+ -+ /* bfq_queue in service */ -+ struct bfq_queue *in_service_queue; -+ /* bfq_io_cq (bic) associated with the @in_service_queue */ -+ struct bfq_io_cq *in_service_bic; -+ -+ /* on-disk position of the last served request */ -+ sector_t last_position; -+ -+ /* position of the last served request for the in-service queue */ -+ sector_t in_serv_last_pos; -+ -+ /* time of last request completion (ns) */ -+ u64 last_completion; -+ -+ /* time of first rq dispatch in current observation interval (ns) */ -+ u64 first_dispatch; -+ /* time of last rq dispatch in current observation interval (ns) */ -+ u64 last_dispatch; -+ -+ /* beginning of the last budget */ -+ ktime_t last_budget_start; -+ /* beginning of the last idle slice */ -+ ktime_t last_idling_start; -+ -+ /* number of samples in current observation interval */ -+ int peak_rate_samples; -+ /* num of samples of seq dispatches in current observation interval */ -+ u32 sequential_samples; -+ /* total num of sectors transferred in current observation interval */ -+ u64 tot_sectors_dispatched; -+ /* max rq size seen during current observation interval (sectors) */ -+ u32 last_rq_max_size; -+ /* time elapsed from first dispatch in current observ. interval (us) */ -+ u64 delta_from_first; -+ /* -+ * Current estimate of the device peak rate, measured in -+ * [(sectors/usec) / 2^BFQ_RATE_SHIFT]. The left-shift by -+ * BFQ_RATE_SHIFT is performed to increase precision in -+ * fixed-point calculations. -+ */ -+ u32 peak_rate; -+ -+ /* maximum budget allotted to a bfq_queue before rescheduling */ -+ int bfq_max_budget; -+ -+ /* list of all the bfq_queues active on the device */ -+ struct list_head active_list; -+ /* list of all the bfq_queues idle on the device */ -+ struct list_head idle_list; -+ -+ /* -+ * Timeout for async/sync requests; when it fires, requests -+ * are served in fifo order. -+ */ -+ u64 bfq_fifo_expire[2]; -+ /* weight of backward seeks wrt forward ones */ -+ unsigned int bfq_back_penalty; -+ /* maximum allowed backward seek */ -+ unsigned int bfq_back_max; -+ /* maximum idling time */ -+ u32 bfq_slice_idle; -+ -+ /* user-configured max budget value (0 for auto-tuning) */ -+ int bfq_user_max_budget; -+ /* -+ * Timeout for bfq_queues to consume their budget; used to -+ * prevent seeky queues from imposing long latencies to -+ * sequential or quasi-sequential ones (this also implies that -+ * seeky queues cannot receive guarantees in the service -+ * domain; after a timeout they are charged for the time they -+ * have been in service, to preserve fairness among them, but -+ * without service-domain guarantees). -+ */ -+ unsigned int bfq_timeout; -+ -+ /* -+ * Number of consecutive requests that must be issued within -+ * the idle time slice to set again idling to a queue which -+ * was marked as non-I/O-bound (see the definition of the -+ * IO_bound flag for further details). -+ */ -+ unsigned int bfq_requests_within_timer; -+ -+ /* -+ * Force device idling whenever needed to provide accurate -+ * service guarantees, without caring about throughput -+ * issues. CAVEAT: this may even increase latencies, in case -+ * of useless idling for processes that did stop doing I/O. -+ */ -+ bool strict_guarantees; -+ -+ /* -+ * Last time at which a queue entered the current burst of -+ * queues being activated shortly after each other; for more -+ * details about this and the following parameters related to -+ * a burst of activations, see the comments on the function -+ * bfq_handle_burst. -+ */ -+ unsigned long last_ins_in_burst; -+ /* -+ * Reference time interval used to decide whether a queue has -+ * been activated shortly after @last_ins_in_burst. -+ */ -+ unsigned long bfq_burst_interval; -+ /* number of queues in the current burst of queue activations */ -+ int burst_size; -+ -+ /* common parent entity for the queues in the burst */ -+ struct bfq_entity *burst_parent_entity; -+ /* Maximum burst size above which the current queue-activation -+ * burst is deemed as 'large'. -+ */ -+ unsigned long bfq_large_burst_thresh; -+ /* true if a large queue-activation burst is in progress */ -+ bool large_burst; -+ /* -+ * Head of the burst list (as for the above fields, more -+ * details in the comments on the function bfq_handle_burst). -+ */ -+ struct hlist_head burst_list; -+ -+ /* if set to true, low-latency heuristics are enabled */ -+ bool low_latency; -+ /* -+ * Maximum factor by which the weight of a weight-raised queue -+ * is multiplied. -+ */ -+ unsigned int bfq_wr_coeff; -+ /* maximum duration of a weight-raising period (jiffies) */ -+ unsigned int bfq_wr_max_time; -+ -+ /* Maximum weight-raising duration for soft real-time processes */ -+ unsigned int bfq_wr_rt_max_time; -+ /* -+ * Minimum idle period after which weight-raising may be -+ * reactivated for a queue (in jiffies). -+ */ -+ unsigned int bfq_wr_min_idle_time; -+ /* -+ * Minimum period between request arrivals after which -+ * weight-raising may be reactivated for an already busy async -+ * queue (in jiffies). -+ */ -+ unsigned long bfq_wr_min_inter_arr_async; -+ -+ /* Max service-rate for a soft real-time queue, in sectors/sec */ -+ unsigned int bfq_wr_max_softrt_rate; -+ /* -+ * Cached value of the product ref_rate*ref_wr_duration, used -+ * for computing the maximum duration of weight raising -+ * automatically. -+ */ -+ u64 rate_dur_prod; -+ -+ /* fallback dummy bfqq for extreme OOM conditions */ -+ struct bfq_queue oom_bfqq; -+}; -+ -+enum bfqq_state_flags { -+ BFQ_BFQQ_FLAG_just_created = 0, /* queue just allocated */ -+ BFQ_BFQQ_FLAG_busy, /* has requests or is in service */ -+ BFQ_BFQQ_FLAG_wait_request, /* waiting for a request */ -+ BFQ_BFQQ_FLAG_non_blocking_wait_rq, /* -+ * waiting for a request -+ * without idling the device -+ */ -+ BFQ_BFQQ_FLAG_must_alloc, /* must be allowed rq alloc */ -+ BFQ_BFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */ -+ BFQ_BFQQ_FLAG_has_short_ttime, /* queue has a short think time */ -+ BFQ_BFQQ_FLAG_sync, /* synchronous queue */ -+ BFQ_BFQQ_FLAG_IO_bound, /* -+ * bfqq has timed-out at least once -+ * having consumed at most 2/10 of -+ * its budget -+ */ -+ BFQ_BFQQ_FLAG_in_large_burst, /* -+ * bfqq activated in a large burst, -+ * see comments to bfq_handle_burst. -+ */ -+ BFQ_BFQQ_FLAG_softrt_update, /* -+ * may need softrt-next-start -+ * update -+ */ -+ BFQ_BFQQ_FLAG_coop, /* bfqq is shared */ -+ BFQ_BFQQ_FLAG_split_coop /* shared bfqq will be split */ -+}; -+ -+#define BFQ_BFQQ_FNS(name) \ -+static void bfq_mark_bfqq_##name(struct bfq_queue *bfqq) \ -+{ \ -+ (bfqq)->flags |= (1 << BFQ_BFQQ_FLAG_##name); \ -+} \ -+static void bfq_clear_bfqq_##name(struct bfq_queue *bfqq) \ -+{ \ -+ (bfqq)->flags &= ~(1 << BFQ_BFQQ_FLAG_##name); \ -+} \ -+static int bfq_bfqq_##name(const struct bfq_queue *bfqq) \ -+{ \ -+ return ((bfqq)->flags & (1 << BFQ_BFQQ_FLAG_##name)) != 0; \ -+} -+ -+BFQ_BFQQ_FNS(just_created); -+BFQ_BFQQ_FNS(busy); -+BFQ_BFQQ_FNS(wait_request); -+BFQ_BFQQ_FNS(non_blocking_wait_rq); -+BFQ_BFQQ_FNS(must_alloc); -+BFQ_BFQQ_FNS(fifo_expire); -+BFQ_BFQQ_FNS(has_short_ttime); -+BFQ_BFQQ_FNS(sync); -+BFQ_BFQQ_FNS(IO_bound); -+BFQ_BFQQ_FNS(in_large_burst); -+BFQ_BFQQ_FNS(coop); -+BFQ_BFQQ_FNS(split_coop); -+BFQ_BFQQ_FNS(softrt_update); -+#undef BFQ_BFQQ_FNS -+ -+/* Logging facilities. */ -+#ifdef CONFIG_BFQ_REDIRECT_TO_CONSOLE -+ -+static const char *checked_dev_name(const struct device *dev) -+{ -+ static const char nodev[] = "nodev"; -+ -+ if (dev) -+ return dev_name(dev); -+ -+ return nodev; -+} -+ -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+static struct bfq_group *bfqq_group(struct bfq_queue *bfqq); -+static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg); -+ -+#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \ -+ char __pbuf[128]; \ -+ \ -+ assert_spin_locked((bfqd)->queue->queue_lock); \ -+ blkg_path(bfqg_to_blkg(bfqq_group(bfqq)), __pbuf, sizeof(__pbuf)); \ -+ pr_crit("%s bfq%d%c %s [%s] " fmt "\n", \ -+ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \ -+ (bfqq)->pid, \ -+ bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \ -+ __pbuf, __func__, ##args); \ -+} while (0) -+ -+#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \ -+ char __pbuf[128]; \ -+ \ -+ blkg_path(bfqg_to_blkg(bfqg), __pbuf, sizeof(__pbuf)); \ -+ pr_crit("%s %s [%s] " fmt "\n", \ -+ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \ -+ __pbuf, __func__, ##args); \ -+} while (0) -+ -+#else /* BFQ_GROUP_IOSCHED_ENABLED */ -+ -+#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \ -+ pr_crit("%s bfq%d%c [%s] " fmt "\n", \ -+ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \ -+ (bfqq)->pid, bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \ -+ __func__, ##args) -+#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do {} while (0) -+ -+#endif /* BFQ_GROUP_IOSCHED_ENABLED */ -+ -+#define bfq_log(bfqd, fmt, args...) \ -+ pr_crit("%s bfq [%s] " fmt "\n", \ -+ checked_dev_name((bfqd)->queue->backing_dev_info->dev), \ -+ __func__, ##args) -+ -+#else /* CONFIG_BFQ_REDIRECT_TO_CONSOLE */ -+ -+#if !defined(CONFIG_BLK_DEV_IO_TRACE) -+ -+/* Avoid possible "unused-variable" warning. See commit message. */ -+ -+#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) ((void) (bfqq)) -+ -+#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) ((void) (bfqg)) -+ -+#define bfq_log(bfqd, fmt, args...) do {} while (0) -+ -+#else /* CONFIG_BLK_DEV_IO_TRACE */ -+ -+#include -+ -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+static struct bfq_group *bfqq_group(struct bfq_queue *bfqq); -+static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg); -+ -+#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \ -+ char __pbuf[128]; \ -+ \ -+ assert_spin_locked((bfqd)->queue->queue_lock); \ -+ blkg_path(bfqg_to_blkg(bfqq_group(bfqq)), __pbuf, sizeof(__pbuf)); \ -+ blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s [%s] " fmt, \ -+ (bfqq)->pid, \ -+ bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \ -+ __pbuf, __func__, ##args); \ -+} while (0) -+ -+#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \ -+ char __pbuf[128]; \ -+ \ -+ blkg_path(bfqg_to_blkg(bfqg), __pbuf, sizeof(__pbuf)); \ -+ blk_add_trace_msg((bfqd)->queue, "%s [%s] " fmt, __pbuf, \ -+ __func__, ##args); \ -+} while (0) -+ -+#else /* BFQ_GROUP_IOSCHED_ENABLED */ -+ -+#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \ -+ blk_add_trace_msg((bfqd)->queue, "bfq%d%c [%s] " fmt, (bfqq)->pid, \ -+ bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \ -+ __func__, ##args) -+#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do {} while (0) -+ -+#endif /* BFQ_GROUP_IOSCHED_ENABLED */ -+ -+#define bfq_log(bfqd, fmt, args...) \ -+ blk_add_trace_msg((bfqd)->queue, "bfq [%s] " fmt, __func__, ##args) -+ -+#endif /* CONFIG_BLK_DEV_IO_TRACE */ -+#endif /* CONFIG_BFQ_REDIRECT_TO_CONSOLE */ -+ -+/* Expiration reasons. */ -+enum bfqq_expiration { -+ BFQ_BFQQ_TOO_IDLE = 0, /* -+ * queue has been idling for -+ * too long -+ */ -+ BFQ_BFQQ_BUDGET_TIMEOUT, /* budget took too long to be used */ -+ BFQ_BFQQ_BUDGET_EXHAUSTED, /* budget consumed */ -+ BFQ_BFQQ_NO_MORE_REQUESTS, /* the queue has no more requests */ -+ BFQ_BFQQ_PREEMPTED /* preemption in progress */ -+}; -+ -+ -+struct bfqg_stats { -+#if defined(BFQ_GROUP_IOSCHED_ENABLED) && defined(CONFIG_DEBUG_BLK_CGROUP) -+ /* number of ios merged */ -+ struct blkg_rwstat merged; -+ /* total time spent on device in ns, may not be accurate w/ queueing */ -+ struct blkg_rwstat service_time; -+ /* total time spent waiting in scheduler queue in ns */ -+ struct blkg_rwstat wait_time; -+ /* number of IOs queued up */ -+ struct blkg_rwstat queued; -+ /* total disk time and nr sectors dispatched by this group */ -+ struct blkg_stat time; -+ /* sum of number of ios queued across all samples */ -+ struct blkg_stat avg_queue_size_sum; -+ /* count of samples taken for average */ -+ struct blkg_stat avg_queue_size_samples; -+ /* how many times this group has been removed from service tree */ -+ struct blkg_stat dequeue; -+ /* total time spent waiting for it to be assigned a timeslice. */ -+ struct blkg_stat group_wait_time; -+ /* time spent idling for this blkcg_gq */ -+ struct blkg_stat idle_time; -+ /* total time with empty current active q with other requests queued */ -+ struct blkg_stat empty_time; -+ /* fields after this shouldn't be cleared on stat reset */ -+ uint64_t start_group_wait_time; -+ uint64_t start_idle_time; -+ uint64_t start_empty_time; -+ uint16_t flags; -+#endif /* BFQ_GROUP_IOSCHED_ENABLED && CONFIG_DEBUG_BLK_CGROUP */ -+}; -+ -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+/* -+ * struct bfq_group_data - per-blkcg storage for the blkio subsystem. -+ * -+ * @ps: @blkcg_policy_storage that this structure inherits -+ * @weight: weight of the bfq_group -+ */ -+struct bfq_group_data { -+ /* must be the first member */ -+ struct blkcg_policy_data pd; -+ -+ unsigned int weight; -+}; -+ -+/** -+ * struct bfq_group - per (device, cgroup) data structure. -+ * @entity: schedulable entity to insert into the parent group sched_data. -+ * @sched_data: own sched_data, to contain child entities (they may be -+ * both bfq_queues and bfq_groups). -+ * @bfqd: the bfq_data for the device this group acts upon. -+ * @async_bfqq: array of async queues for all the tasks belonging to -+ * the group, one queue per ioprio value per ioprio_class, -+ * except for the idle class that has only one queue. -+ * @async_idle_bfqq: async queue for the idle class (ioprio is ignored). -+ * @my_entity: pointer to @entity, %NULL for the toplevel group; used -+ * to avoid too many special cases during group creation/ -+ * migration. -+ * @active_entities: number of active entities belonging to the group; -+ * unused for the root group. Used to know whether there -+ * are groups with more than one active @bfq_entity -+ * (see the comments to the function -+ * bfq_bfqq_may_idle()). -+ * @rq_pos_tree: rbtree sorted by next_request position, used when -+ * determining if two or more queues have interleaving -+ * requests (see bfq_find_close_cooperator()). -+ * -+ * Each (device, cgroup) pair has its own bfq_group, i.e., for each cgroup -+ * there is a set of bfq_groups, each one collecting the lower-level -+ * entities belonging to the group that are acting on the same device. -+ * -+ * Locking works as follows: -+ * o @bfqd is protected by the queue lock, RCU is used to access it -+ * from the readers. -+ * o All the other fields are protected by the @bfqd queue lock. -+ */ -+struct bfq_group { -+ /* must be the first member */ -+ struct blkg_policy_data pd; -+ -+ struct bfq_entity entity; -+ struct bfq_sched_data sched_data; -+ -+ void *bfqd; -+ -+ struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR]; -+ struct bfq_queue *async_idle_bfqq; -+ -+ struct bfq_entity *my_entity; -+ -+ int active_entities; -+ -+ struct rb_root rq_pos_tree; -+ -+ struct bfqg_stats stats; -+}; -+ -+#else -+struct bfq_group { -+ struct bfq_sched_data sched_data; -+ -+ struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR]; -+ struct bfq_queue *async_idle_bfqq; -+ -+ struct rb_root rq_pos_tree; -+}; -+#endif -+ -+static struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity); -+ -+static unsigned int bfq_class_idx(struct bfq_entity *entity) -+{ -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); -+ -+ return bfqq ? bfqq->ioprio_class - 1 : -+ BFQ_DEFAULT_GRP_CLASS - 1; -+} -+ -+static unsigned int bfq_tot_busy_queues(struct bfq_data *bfqd) -+{ -+ return bfqd->busy_queues[0] + bfqd->busy_queues[1] + -+ bfqd->busy_queues[2]; -+} -+ -+static struct bfq_service_tree * -+bfq_entity_service_tree(struct bfq_entity *entity) -+{ -+ struct bfq_sched_data *sched_data = entity->sched_data; -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); -+ unsigned int idx = bfq_class_idx(entity); -+ -+ BUG_ON(idx >= BFQ_IOPRIO_CLASSES); -+ BUG_ON(sched_data == NULL); -+ -+ if (bfqq) -+ bfq_log_bfqq(bfqq->bfqd, bfqq, -+ "%p %d", -+ sched_data->service_tree + idx, idx); -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ else { -+ struct bfq_group *bfqg = -+ container_of(entity, struct bfq_group, entity); -+ -+ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg, -+ "%p %d", -+ sched_data->service_tree + idx, idx); -+ } -+#endif -+ return sched_data->service_tree + idx; -+} -+ -+static struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, bool is_sync) -+{ -+ return bic->bfqq[is_sync]; -+} -+ -+static void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, -+ bool is_sync) -+{ -+ bic->bfqq[is_sync] = bfqq; -+} -+ -+static struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic) -+{ -+ return bic->icq.q->elevator->elevator_data; -+} -+ -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+ -+static struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq) -+{ -+ struct bfq_entity *group_entity = bfqq->entity.parent; -+ -+ if (!group_entity) -+ group_entity = &bfqq->bfqd->root_group->entity; -+ -+ return container_of(group_entity, struct bfq_group, entity); -+} -+ -+#else -+ -+static struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq) -+{ -+ return bfqq->bfqd->root_group; -+} -+ -+#endif -+ -+static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio); -+static void bfq_put_queue(struct bfq_queue *bfqq); -+static void bfq_dispatch_insert(struct request_queue *q, struct request *rq); -+static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd, -+ struct bio *bio, bool is_sync, -+ struct bfq_io_cq *bic); -+static void bfq_end_wr_async_queues(struct bfq_data *bfqd, -+ struct bfq_group *bfqg); -+#ifdef BFQ_GROUP_IOSCHED_ENABLED -+static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg); -+#endif -+static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq); -+ -+#endif /* _BFQ_H */ -diff --git a/block/blk-mq.c b/block/blk-mq.c -index e3c39ea8e17b..7a57368841f6 100644 ---- a/block/blk-mq.c -+++ b/block/blk-mq.c -@@ -2878,6 +2878,8 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) - } - if (ret) - break; -+ if (q->elevator && q->elevator->type->ops.mq.depth_updated) -+ q->elevator->type->ops.mq.depth_updated(hctx); - } - - if (!ret) -diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h -index 6980014357d4..8c4568ea6884 100644 ---- a/include/linux/blkdev.h -+++ b/include/linux/blkdev.h -@@ -54,7 +54,7 @@ struct blk_stat_callback; - * Maximum number of blkcg policies allowed to be registered concurrently. - * Defined here to simplify include dependency. - */ --#define BLKCG_MAX_POLS 5 -+#define BLKCG_MAX_POLS 7 - - typedef void (rq_end_io_fn)(struct request *, blk_status_t); - -@@ -127,6 +127,10 @@ typedef __u32 __bitwise req_flags_t; - #define RQF_MQ_POLL_SLEPT ((__force req_flags_t)(1 << 20)) - /* ->timeout has been called, don't expire again */ - #define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21)) -+/* DEBUG: rq in bfq-mq dispatch list */ -+#define RQF_DISP_LIST ((__force req_flags_t)(1 << 22)) -+/* DEBUG: rq had get_rq_private executed on it */ -+#define RQF_GOT ((__force req_flags_t)(1 << 23)) - - /* flags that prevent us from merging requests: */ - #define RQF_NOMERGE_FLAGS \ -diff --git a/include/linux/elevator.h b/include/linux/elevator.h -index a02deea30185..a2bf4a6b9316 100644 ---- a/include/linux/elevator.h -+++ b/include/linux/elevator.h -@@ -99,6 +99,7 @@ struct elevator_mq_ops { - void (*exit_sched)(struct elevator_queue *); - int (*init_hctx)(struct blk_mq_hw_ctx *, unsigned int); - void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int); -+ void (*depth_updated)(struct blk_mq_hw_ctx *); - - bool (*allow_merge)(struct request_queue *, struct request *, struct bio *); - bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *); diff --git a/sys-kernel/linux-image-redcore-lts/files/4.19-redcore-lts-amd64.config b/sys-kernel/linux-image-redcore-lts/files/4.19-redcore-lts-amd64.config deleted file mode 100644 index c5bedf65..00000000 --- a/sys-kernel/linux-image-redcore-lts/files/4.19-redcore-lts-amd64.config +++ /dev/null @@ -1,9348 +0,0 @@ -# -# Automatically generated file; DO NOT EDIT. -# Linux/x86 4.19.20-redcore-lts Kernel Configuration -# - -# -# Compiler: gcc (Gentoo Hardened 8.2.0-r1337 p1.6) 8.2.0 -# -CONFIG_CC_IS_GCC=y -CONFIG_GCC_VERSION=80200 -CONFIG_CLANG_VERSION=0 -CONFIG_IRQ_WORK=y -CONFIG_BUILDTIME_EXTABLE_SORT=y -CONFIG_THREAD_INFO_IN_TASK=y - -# -# General setup -# -CONFIG_SCHED_MUQSS=y -CONFIG_INIT_ENV_ARG_LIMIT=32 -# CONFIG_COMPILE_TEST is not set -CONFIG_LOCALVERSION="" -CONFIG_LOCALVERSION_AUTO=y -CONFIG_BUILD_SALT="" -CONFIG_HAVE_KERNEL_GZIP=y -CONFIG_HAVE_KERNEL_BZIP2=y -CONFIG_HAVE_KERNEL_LZMA=y -CONFIG_HAVE_KERNEL_XZ=y -CONFIG_HAVE_KERNEL_LZO=y -CONFIG_HAVE_KERNEL_LZ4=y -# CONFIG_KERNEL_GZIP is not set -# CONFIG_KERNEL_BZIP2 is not set -# CONFIG_KERNEL_LZMA is not set -# CONFIG_KERNEL_XZ is not set -# CONFIG_KERNEL_LZO is not set -CONFIG_KERNEL_LZ4=y -CONFIG_DEFAULT_HOSTNAME="(none)" -CONFIG_SWAP=y -CONFIG_SYSVIPC=y -CONFIG_SYSVIPC_SYSCTL=y -CONFIG_POSIX_MQUEUE=y -CONFIG_POSIX_MQUEUE_SYSCTL=y -CONFIG_CROSS_MEMORY_ATTACH=y -# CONFIG_USELIB is not set -CONFIG_AUDIT=y -CONFIG_HAVE_ARCH_AUDITSYSCALL=y -CONFIG_AUDITSYSCALL=y -CONFIG_AUDIT_WATCH=y -CONFIG_AUDIT_TREE=y - -# -# IRQ subsystem -# -CONFIG_GENERIC_IRQ_PROBE=y -CONFIG_GENERIC_IRQ_SHOW=y -CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y -CONFIG_GENERIC_PENDING_IRQ=y -CONFIG_GENERIC_IRQ_MIGRATION=y -CONFIG_GENERIC_IRQ_CHIP=y -CONFIG_IRQ_DOMAIN=y -CONFIG_IRQ_SIM=y -CONFIG_IRQ_DOMAIN_HIERARCHY=y -CONFIG_GENERIC_MSI_IRQ=y -CONFIG_GENERIC_MSI_IRQ_DOMAIN=y -CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR=y -CONFIG_GENERIC_IRQ_RESERVATION_MODE=y -CONFIG_IRQ_FORCED_THREADING=y -# CONFIG_FORCE_IRQ_THREADING is not set -CONFIG_SPARSE_IRQ=y -# CONFIG_GENERIC_IRQ_DEBUGFS is not set -CONFIG_CLOCKSOURCE_WATCHDOG=y -CONFIG_ARCH_CLOCKSOURCE_DATA=y -CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE=y -CONFIG_GENERIC_TIME_VSYSCALL=y -CONFIG_GENERIC_CLOCKEVENTS=y -CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y -CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y -CONFIG_GENERIC_CMOS_UPDATE=y - -# -# Timers subsystem -# -CONFIG_TICK_ONESHOT=y -CONFIG_HZ_PERIODIC=y -# CONFIG_NO_HZ_IDLE is not set -# CONFIG_NO_HZ_FULL is not set -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y -# CONFIG_PREEMPT_NONE is not set -# CONFIG_PREEMPT_VOLUNTARY is not set -CONFIG_PREEMPT=y -CONFIG_PREEMPT_COUNT=y - -# -# CPU/Task time and stats accounting -# -CONFIG_VIRT_CPU_ACCOUNTING=y -# CONFIG_TICK_CPU_ACCOUNTING is not set -CONFIG_VIRT_CPU_ACCOUNTING_GEN=y -CONFIG_IRQ_TIME_ACCOUNTING=y -CONFIG_HAVE_SCHED_AVG_IRQ=y -CONFIG_BSD_PROCESS_ACCT=y -# CONFIG_BSD_PROCESS_ACCT_V3 is not set -CONFIG_TASKSTATS=y -CONFIG_TASK_DELAY_ACCT=y -CONFIG_TASK_XACCT=y -CONFIG_TASK_IO_ACCOUNTING=y -CONFIG_CPU_ISOLATION=y - -# -# RCU Subsystem -# -CONFIG_PREEMPT_RCU=y -# CONFIG_RCU_EXPERT is not set -CONFIG_SRCU=y -CONFIG_TREE_SRCU=y -CONFIG_TASKS_RCU=y -CONFIG_RCU_STALL_COMMON=y -CONFIG_RCU_NEED_SEGCBLIST=y -CONFIG_CONTEXT_TRACKING=y -# CONFIG_CONTEXT_TRACKING_FORCE is not set -CONFIG_BUILD_BIN2C=y -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -CONFIG_LOG_BUF_SHIFT=17 -CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 -CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13 -CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y -CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y -CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y -CONFIG_ARCH_SUPPORTS_INT128=y -CONFIG_CGROUPS=y -CONFIG_PAGE_COUNTER=y -CONFIG_MEMCG=y -CONFIG_MEMCG_SWAP=y -CONFIG_MEMCG_SWAP_ENABLED=y -CONFIG_MEMCG_KMEM=y -CONFIG_BLK_CGROUP=y -# CONFIG_DEBUG_BLK_CGROUP is not set -CONFIG_CGROUP_WRITEBACK=y -CONFIG_CGROUP_SCHED=y -CONFIG_CGROUP_PIDS=y -CONFIG_CGROUP_RDMA=y -CONFIG_CGROUP_FREEZER=y -CONFIG_CGROUP_HUGETLB=y -CONFIG_CPUSETS=y -CONFIG_PROC_PID_CPUSET=y -CONFIG_CGROUP_DEVICE=y -CONFIG_CGROUP_PERF=y -CONFIG_CGROUP_BPF=y -# CONFIG_CGROUP_DEBUG is not set -CONFIG_SOCK_CGROUP_DATA=y -CONFIG_NAMESPACES=y -CONFIG_UTS_NS=y -CONFIG_IPC_NS=y -CONFIG_USER_NS=y -CONFIG_PID_NS=y -CONFIG_NET_NS=y -# CONFIG_CHECKPOINT_RESTORE is not set -# CONFIG_SYSFS_DEPRECATED is not set -CONFIG_RELAY=y -CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="" -CONFIG_RD_GZIP=y -CONFIG_RD_BZIP2=y -CONFIG_RD_LZMA=y -CONFIG_RD_XZ=y -CONFIG_RD_LZO=y -CONFIG_RD_LZ4=y -CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -# CONFIG_LOCAL_INIT is not set -CONFIG_SYSCTL=y -CONFIG_ANON_INODES=y -CONFIG_HAVE_UID16=y -CONFIG_SYSCTL_EXCEPTION_TRACE=y -CONFIG_HAVE_PCSPKR_PLATFORM=y -CONFIG_BPF=y -# CONFIG_EXPERT is not set -CONFIG_UID16=y -CONFIG_MULTIUSER=y -CONFIG_SGETMASK_SYSCALL=y -CONFIG_SYSFS_SYSCALL=y -CONFIG_FHANDLE=y -CONFIG_POSIX_TIMERS=y -CONFIG_PRINTK=y -CONFIG_PRINTK_NMI=y -CONFIG_BUG=y -CONFIG_ELF_CORE=y -CONFIG_PCSPKR_PLATFORM=y -CONFIG_BASE_FULL=y -CONFIG_FUTEX=y -CONFIG_FUTEX_PI=y -CONFIG_EPOLL=y -CONFIG_SIGNALFD=y -CONFIG_TIMERFD=y -CONFIG_EVENTFD=y -CONFIG_SHMEM=y -CONFIG_AIO=y -CONFIG_ADVISE_SYSCALLS=y -CONFIG_MEMBARRIER=y -CONFIG_KALLSYMS=y -CONFIG_KALLSYMS_ALL=y -CONFIG_KALLSYMS_ABSOLUTE_PERCPU=y -CONFIG_KALLSYMS_BASE_RELATIVE=y -CONFIG_BPF_SYSCALL=y -CONFIG_BPF_JIT_ALWAYS_ON=y -CONFIG_USERFAULTFD=y -CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y -CONFIG_RSEQ=y -# CONFIG_EMBEDDED is not set -CONFIG_HAVE_PERF_EVENTS=y - -# -# Kernel Performance Events And Counters -# -CONFIG_PERF_EVENTS=y -# CONFIG_DEBUG_PERF_USE_VMALLOC is not set -CONFIG_VM_EVENT_COUNTERS=y -CONFIG_SLUB_DEBUG=y -# CONFIG_COMPAT_BRK is not set -# CONFIG_SLAB is not set -CONFIG_SLUB=y -CONFIG_SLAB_MERGE_DEFAULT=y -CONFIG_SLAB_FREELIST_RANDOM=y -CONFIG_SLAB_FREELIST_HARDENED=y -CONFIG_SLAB_HARDENED=y -CONFIG_SLAB_SANITIZE=y -CONFIG_SLAB_SANITIZE_VERIFY=y -CONFIG_SLUB_CPU_PARTIAL=y -CONFIG_SYSTEM_DATA_VERIFICATION=y -CONFIG_PROFILING=y -CONFIG_TRACEPOINTS=y -CONFIG_64BIT=y -CONFIG_X86_64=y -CONFIG_X86=y -CONFIG_INSTRUCTION_DECODER=y -CONFIG_OUTPUT_FORMAT="elf64-x86-64" -CONFIG_ARCH_DEFCONFIG="arch/x86/configs/x86_64_defconfig" -CONFIG_LOCKDEP_SUPPORT=y -CONFIG_STACKTRACE_SUPPORT=y -CONFIG_MMU=y -CONFIG_ARCH_MMAP_RND_BITS_MIN=28 -CONFIG_ARCH_MMAP_RND_BITS_MAX=32 -CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8 -CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 -CONFIG_GENERIC_ISA_DMA=y -CONFIG_GENERIC_BUG=y -CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y -CONFIG_GENERIC_HWEIGHT=y -CONFIG_ARCH_MAY_HAVE_PC_FDC=y -CONFIG_RWSEM_XCHGADD_ALGORITHM=y -CONFIG_GENERIC_CALIBRATE_DELAY=y -CONFIG_ARCH_HAS_CPU_RELAX=y -CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y -CONFIG_ARCH_HAS_FILTER_PGPROT=y -CONFIG_HAVE_SETUP_PER_CPU_AREA=y -CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y -CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y -CONFIG_ARCH_HIBERNATION_POSSIBLE=y -CONFIG_ARCH_SUSPEND_POSSIBLE=y -CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y -CONFIG_ARCH_WANT_GENERAL_HUGETLB=y -CONFIG_ZONE_DMA32=y -CONFIG_AUDIT_ARCH=y -CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y -CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y -CONFIG_HAVE_INTEL_TXT=y -CONFIG_X86_64_SMP=y -CONFIG_ARCH_SUPPORTS_UPROBES=y -CONFIG_FIX_EARLYCON_MEM=y -CONFIG_DYNAMIC_PHYSICAL_MASK=y -CONFIG_PGTABLE_LEVELS=4 -CONFIG_CC_HAS_SANE_STACKPROTECTOR=y - -# -# Processor type and features -# -CONFIG_ZONE_DMA=y -CONFIG_SMP=y -CONFIG_X86_FEATURE_NAMES=y -CONFIG_X86_X2APIC=y -CONFIG_X86_MPPARSE=y -# CONFIG_GOLDFISH is not set -CONFIG_RETPOLINE=y -CONFIG_INTEL_RDT=y -# CONFIG_X86_EXTENDED_PLATFORM is not set -CONFIG_X86_INTEL_LPSS=y -CONFIG_X86_AMD_PLATFORM_DEVICE=y -CONFIG_IOSF_MBI=y -# CONFIG_IOSF_MBI_DEBUG is not set -CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y -CONFIG_SCHED_OMIT_FRAME_POINTER=y -CONFIG_HYPERVISOR_GUEST=y -CONFIG_PARAVIRT=y -# CONFIG_PARAVIRT_DEBUG is not set -# CONFIG_PARAVIRT_SPINLOCKS is not set -# CONFIG_XEN is not set -CONFIG_KVM_GUEST=y -# CONFIG_KVM_DEBUG_FS is not set -# CONFIG_PARAVIRT_TIME_ACCOUNTING is not set -CONFIG_PARAVIRT_CLOCK=y -CONFIG_JAILHOUSE_GUEST=y -CONFIG_NO_BOOTMEM=y -# CONFIG_MK8 is not set -# CONFIG_MPSC is not set -# CONFIG_MCORE2 is not set -# CONFIG_MATOM is not set -CONFIG_GENERIC_CPU=y -CONFIG_X86_INTERNODE_CACHE_SHIFT=6 -CONFIG_X86_L1_CACHE_SHIFT=6 -CONFIG_X86_TSC=y -CONFIG_X86_CMPXCHG64=y -CONFIG_X86_CMOV=y -CONFIG_X86_MINIMUM_CPU_FAMILY=64 -CONFIG_X86_DEBUGCTLMSR=y -CONFIG_CPU_SUP_INTEL=y -CONFIG_CPU_SUP_AMD=y -CONFIG_CPU_SUP_CENTAUR=y -CONFIG_HPET_TIMER=y -CONFIG_HPET_EMULATE_RTC=y -CONFIG_DMI=y -CONFIG_GART_IOMMU=y -# CONFIG_CALGARY_IOMMU is not set -CONFIG_MAXSMP=y -CONFIG_NR_CPUS_RANGE_BEGIN=8192 -CONFIG_NR_CPUS_RANGE_END=8192 -CONFIG_NR_CPUS_DEFAULT=8192 -CONFIG_NR_CPUS=8192 -CONFIG_SCHED_SMT=y -CONFIG_SMT_NICE=y -CONFIG_SCHED_MC=y -CONFIG_SCHED_MC_PRIO=y -# CONFIG_RQ_NONE is not set -# CONFIG_RQ_SMT is not set -CONFIG_RQ_MC=y -# CONFIG_RQ_SMP is not set -CONFIG_SHARERQ=2 -CONFIG_X86_LOCAL_APIC=y -CONFIG_X86_IO_APIC=y -CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y -CONFIG_X86_MCE=y -# CONFIG_X86_MCELOG_LEGACY is not set -CONFIG_X86_MCE_INTEL=y -CONFIG_X86_MCE_AMD=y -CONFIG_X86_MCE_THRESHOLD=y -# CONFIG_X86_MCE_INJECT is not set -CONFIG_X86_THERMAL_VECTOR=y - -# -# Performance monitoring -# -CONFIG_PERF_EVENTS_INTEL_UNCORE=y -CONFIG_PERF_EVENTS_INTEL_RAPL=y -CONFIG_PERF_EVENTS_INTEL_CSTATE=y -CONFIG_PERF_EVENTS_AMD_POWER=m -CONFIG_X86_16BIT=y -CONFIG_X86_ESPFIX64=y -CONFIG_X86_VSYSCALL_EMULATION=y -CONFIG_I8K=m -CONFIG_MICROCODE=y -CONFIG_MICROCODE_INTEL=y -CONFIG_MICROCODE_AMD=y -CONFIG_MICROCODE_OLD_INTERFACE=y -CONFIG_X86_MSR=m -CONFIG_X86_CPUID=m -# CONFIG_X86_5LEVEL is not set -CONFIG_X86_DIRECT_GBPAGES=y -CONFIG_ARCH_HAS_MEM_ENCRYPT=y -CONFIG_AMD_MEM_ENCRYPT=y -# CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT is not set -CONFIG_ARCH_USE_MEMREMAP_PROT=y -CONFIG_NUMA=y -CONFIG_AMD_NUMA=y -CONFIG_X86_64_ACPI_NUMA=y -CONFIG_NODES_SPAN_OTHER_NODES=y -# CONFIG_NUMA_EMU is not set -CONFIG_NODES_SHIFT=10 -CONFIG_ARCH_SPARSEMEM_ENABLE=y -CONFIG_ARCH_SPARSEMEM_DEFAULT=y -CONFIG_ARCH_SELECT_MEMORY_MODEL=y -CONFIG_ARCH_MEMORY_PROBE=y -CONFIG_ARCH_PROC_KCORE_TEXT=y -CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 -CONFIG_X86_PMEM_LEGACY_DEVICE=y -CONFIG_X86_PMEM_LEGACY=m -CONFIG_X86_CHECK_BIOS_CORRUPTION=y -CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK=y -CONFIG_X86_RESERVE_LOW=64 -CONFIG_MTRR=y -CONFIG_MTRR_SANITIZER=y -CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=0 -CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1 -CONFIG_X86_PAT=y -CONFIG_ARCH_USES_PG_UNCACHED=y -CONFIG_ARCH_RANDOM=y -CONFIG_X86_SMAP=y -CONFIG_X86_INTEL_UMIP=y -CONFIG_X86_INTEL_MPX=y -CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS=y -CONFIG_EFI=y -CONFIG_EFI_STUB=y -CONFIG_EFI_MIXED=y -CONFIG_SECCOMP=y -CONFIG_HZ_100=y -# CONFIG_HZ_250_NODEF is not set -# CONFIG_HZ_300_NODEF is not set -# CONFIG_HZ_1000_NODEF is not set -CONFIG_HZ=100 -CONFIG_SCHED_HRTICK=y -CONFIG_KEXEC=y -# CONFIG_CRASH_DUMP is not set -CONFIG_KEXEC_JUMP=y -CONFIG_PHYSICAL_START=0x1000000 -CONFIG_RELOCATABLE=y -CONFIG_RANDOMIZE_BASE=y -CONFIG_X86_NEED_RELOCS=y -CONFIG_PHYSICAL_ALIGN=0x1000000 -CONFIG_DYNAMIC_MEMORY_LAYOUT=y -CONFIG_RANDOMIZE_MEMORY=y -CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING=0xa -CONFIG_HOTPLUG_CPU=y -CONFIG_BOOTPARAM_HOTPLUG_CPU0=y -# CONFIG_DEBUG_HOTPLUG_CPU0 is not set -# CONFIG_COMPAT_VDSO is not set -CONFIG_LEGACY_VSYSCALL_EMULATE=y -# CONFIG_LEGACY_VSYSCALL_NONE is not set -# CONFIG_CMDLINE_BOOL is not set -CONFIG_MODIFY_LDT_SYSCALL=y -CONFIG_HAVE_LIVEPATCH=y -# CONFIG_LIVEPATCH is not set -CONFIG_ARCH_HAS_ADD_PAGES=y -CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y -CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y -CONFIG_USE_PERCPU_NUMA_NODE_ID=y -CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y -CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y -CONFIG_ARCH_ENABLE_THP_MIGRATION=y - -# -# Power management and ACPI options -# -CONFIG_ARCH_HIBERNATION_HEADER=y -CONFIG_SUSPEND=y -CONFIG_SUSPEND_FREEZER=y -CONFIG_HIBERNATE_CALLBACKS=y -CONFIG_HIBERNATION=y -CONFIG_PM_STD_PARTITION="" -CONFIG_PM_SLEEP=y -CONFIG_PM_SLEEP_SMP=y -CONFIG_PM_AUTOSLEEP=y -CONFIG_PM_WAKELOCKS=y -CONFIG_PM_WAKELOCKS_LIMIT=100 -CONFIG_PM_WAKELOCKS_GC=y -CONFIG_PM=y -# CONFIG_PM_DEBUG is not set -CONFIG_PM_CLK=y -CONFIG_PM_GENERIC_DOMAINS=y -# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set -CONFIG_PM_GENERIC_DOMAINS_SLEEP=y -CONFIG_ARCH_SUPPORTS_ACPI=y -CONFIG_ACPI=y -CONFIG_ACPI_LEGACY_TABLES_LOOKUP=y -CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC=y -CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y -# CONFIG_ACPI_DEBUGGER is not set -CONFIG_ACPI_SPCR_TABLE=y -CONFIG_ACPI_LPIT=y -CONFIG_ACPI_SLEEP=y -# CONFIG_ACPI_PROCFS_POWER is not set -CONFIG_ACPI_REV_OVERRIDE_POSSIBLE=y -# CONFIG_ACPI_EC_DEBUGFS is not set -CONFIG_ACPI_AC=m -CONFIG_ACPI_BATTERY=m -CONFIG_ACPI_BUTTON=m -CONFIG_ACPI_VIDEO=m -CONFIG_ACPI_FAN=m -CONFIG_ACPI_TAD=m -CONFIG_ACPI_DOCK=y -CONFIG_ACPI_CPU_FREQ_PSS=y -CONFIG_ACPI_PROCESSOR_CSTATE=y -CONFIG_ACPI_PROCESSOR_IDLE=y -CONFIG_ACPI_CPPC_LIB=y -CONFIG_ACPI_PROCESSOR=y -CONFIG_ACPI_IPMI=m -CONFIG_ACPI_HOTPLUG_CPU=y -CONFIG_ACPI_PROCESSOR_AGGREGATOR=m -CONFIG_ACPI_THERMAL=m -CONFIG_ACPI_NUMA=y -CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y -CONFIG_ACPI_TABLE_UPGRADE=y -# CONFIG_ACPI_DEBUG is not set -CONFIG_ACPI_PCI_SLOT=y -CONFIG_ACPI_CONTAINER=y -CONFIG_ACPI_HOTPLUG_MEMORY=y -CONFIG_ACPI_HOTPLUG_IOAPIC=y -CONFIG_ACPI_SBS=m -CONFIG_ACPI_HED=y -# CONFIG_ACPI_CUSTOM_METHOD is not set -CONFIG_ACPI_BGRT=y -CONFIG_ACPI_NFIT=m -CONFIG_HAVE_ACPI_APEI=y -CONFIG_HAVE_ACPI_APEI_NMI=y -CONFIG_ACPI_APEI=y -CONFIG_ACPI_APEI_GHES=y -CONFIG_ACPI_APEI_PCIEAER=y -CONFIG_ACPI_APEI_MEMORY_FAILURE=y -# CONFIG_ACPI_APEI_EINJ is not set -# CONFIG_ACPI_APEI_ERST_DEBUG is not set -CONFIG_DPTF_POWER=m -CONFIG_ACPI_WATCHDOG=y -CONFIG_ACPI_EXTLOG=m -CONFIG_PMIC_OPREGION=y -# CONFIG_XPOWER_PMIC_OPREGION is not set -# CONFIG_BXT_WC_PMIC_OPREGION is not set -CONFIG_CHT_DC_TI_PMIC_OPREGION=y -CONFIG_ACPI_CONFIGFS=m -CONFIG_X86_PM_TIMER=y -CONFIG_SFI=y - -# -# CPU Frequency scaling -# -CONFIG_CPU_FREQ=y -CONFIG_CPU_FREQ_GOV_ATTR_SET=y -CONFIG_CPU_FREQ_GOV_COMMON=y -CONFIG_CPU_FREQ_STAT=y -CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y -# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set -CONFIG_CPU_FREQ_GOV_PERFORMANCE=y -CONFIG_CPU_FREQ_GOV_POWERSAVE=y -CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_CPU_FREQ_GOV_ONDEMAND=y -CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y -CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y - -# -# CPU frequency scaling drivers -# -CONFIG_X86_INTEL_PSTATE=y -CONFIG_X86_PCC_CPUFREQ=m -CONFIG_X86_ACPI_CPUFREQ=m -CONFIG_X86_ACPI_CPUFREQ_CPB=y -CONFIG_X86_POWERNOW_K8=m -CONFIG_X86_AMD_FREQ_SENSITIVITY=m -# CONFIG_X86_SPEEDSTEP_CENTRINO is not set -# CONFIG_X86_P4_CLOCKMOD is not set - -# -# shared options -# - -# -# CPU Idle -# -CONFIG_CPU_IDLE=y -CONFIG_CPU_IDLE_GOV_LADDER=y -CONFIG_CPU_IDLE_GOV_MENU=y -CONFIG_INTEL_IDLE=y - -# -# Bus options (PCI etc.) -# -CONFIG_PCI=y -CONFIG_PCI_DIRECT=y -CONFIG_PCI_MMCONFIG=y -CONFIG_PCI_DOMAINS=y -CONFIG_MMCONF_FAM10H=y -CONFIG_PCIEPORTBUS=y -CONFIG_HOTPLUG_PCI_PCIE=y -CONFIG_PCIEAER=y -CONFIG_PCIEAER_INJECT=m -CONFIG_PCIE_ECRC=y -CONFIG_PCIEASPM=y -# CONFIG_PCIEASPM_DEBUG is not set -CONFIG_PCIEASPM_DEFAULT=y -# CONFIG_PCIEASPM_POWERSAVE is not set -# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set -# CONFIG_PCIEASPM_PERFORMANCE is not set -CONFIG_PCIE_PME=y -CONFIG_PCIE_DPC=y -CONFIG_PCIE_PTM=y -CONFIG_PCI_MSI=y -CONFIG_PCI_MSI_IRQ_DOMAIN=y -CONFIG_PCI_QUIRKS=y -# CONFIG_PCI_DEBUG is not set -CONFIG_PCI_REALLOC_ENABLE_AUTO=y -CONFIG_PCI_STUB=m -CONFIG_PCI_PF_STUB=m -CONFIG_PCI_ATS=y -CONFIG_PCI_LOCKLESS_CONFIG=y -CONFIG_PCI_IOV=y -CONFIG_PCI_PRI=y -CONFIG_PCI_PASID=y -CONFIG_PCI_LABEL=y -CONFIG_PCI_HYPERV=m -CONFIG_HOTPLUG_PCI=y -CONFIG_HOTPLUG_PCI_ACPI=y -CONFIG_HOTPLUG_PCI_ACPI_IBM=m -CONFIG_HOTPLUG_PCI_CPCI=y -CONFIG_HOTPLUG_PCI_CPCI_ZT5550=m -CONFIG_HOTPLUG_PCI_CPCI_GENERIC=m -# CONFIG_HOTPLUG_PCI_SHPC is not set - -# -# PCI controller drivers -# - -# -# Cadence PCIe controllers support -# -CONFIG_VMD=m - -# -# DesignWare PCI Core Support -# -# CONFIG_PCIE_DW_PLAT_HOST is not set -# CONFIG_PCIE_DW_PLAT_EP is not set - -# -# PCI Endpoint -# -CONFIG_PCI_ENDPOINT=y -CONFIG_PCI_ENDPOINT_CONFIGFS=y -# CONFIG_PCI_EPF_TEST is not set - -# -# PCI switch controller drivers -# -CONFIG_PCI_SW_SWITCHTEC=m -CONFIG_ISA_DMA_API=y -CONFIG_AMD_NB=y -CONFIG_PCCARD=m -CONFIG_PCMCIA=m -CONFIG_PCMCIA_LOAD_CIS=y -CONFIG_CARDBUS=y - -# -# PC-card bridges -# -CONFIG_YENTA=m -CONFIG_YENTA_O2=y -CONFIG_YENTA_RICOH=y -CONFIG_YENTA_TI=y -CONFIG_YENTA_ENE_TUNE=y -CONFIG_YENTA_TOSHIBA=y -CONFIG_PD6729=m -CONFIG_I82092=m -CONFIG_PCCARD_NONSTATIC=y -CONFIG_RAPIDIO=y -CONFIG_RAPIDIO_TSI721=y -CONFIG_RAPIDIO_DISC_TIMEOUT=30 -CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS=y -CONFIG_RAPIDIO_DMA_ENGINE=y -# CONFIG_RAPIDIO_DEBUG is not set -CONFIG_RAPIDIO_ENUM_BASIC=m -CONFIG_RAPIDIO_CHMAN=m -CONFIG_RAPIDIO_MPORT_CDEV=m - -# -# RapidIO Switch drivers -# -CONFIG_RAPIDIO_TSI57X=y -CONFIG_RAPIDIO_CPS_XX=y -CONFIG_RAPIDIO_TSI568=y -CONFIG_RAPIDIO_CPS_GEN2=y -CONFIG_RAPIDIO_RXS_GEN3=m -CONFIG_X86_SYSFB=y - -# -# Binary Emulations -# -CONFIG_IA32_EMULATION=y -CONFIG_IA32_AOUT=y -CONFIG_X86_X32=y -CONFIG_COMPAT_32=y -CONFIG_COMPAT=y -CONFIG_COMPAT_FOR_U64_ALIGNMENT=y -CONFIG_SYSVIPC_COMPAT=y -CONFIG_X86_DEV_DMA_OPS=y -CONFIG_HAVE_GENERIC_GUP=y - -# -# Firmware Drivers -# -CONFIG_EDD=m -# CONFIG_EDD_OFF is not set -CONFIG_FIRMWARE_MEMMAP=y -CONFIG_DELL_RBU=m -CONFIG_DCDBAS=m -CONFIG_DMIID=y -CONFIG_DMI_SYSFS=m -CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y -CONFIG_ISCSI_IBFT_FIND=y -CONFIG_ISCSI_IBFT=m -CONFIG_FW_CFG_SYSFS=m -# CONFIG_FW_CFG_SYSFS_CMDLINE is not set -CONFIG_GOOGLE_FIRMWARE=y -CONFIG_GOOGLE_SMI=m -CONFIG_GOOGLE_COREBOOT_TABLE=m -CONFIG_GOOGLE_COREBOOT_TABLE_ACPI=m -CONFIG_GOOGLE_MEMCONSOLE=m -CONFIG_GOOGLE_MEMCONSOLE_X86_LEGACY=m -CONFIG_GOOGLE_FRAMEBUFFER_COREBOOT=m -CONFIG_GOOGLE_MEMCONSOLE_COREBOOT=m -CONFIG_GOOGLE_VPD=m - -# -# EFI (Extensible Firmware Interface) Support -# -CONFIG_EFI_VARS=m -CONFIG_EFI_ESRT=y -CONFIG_EFI_VARS_PSTORE=m -CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE=y -CONFIG_EFI_RUNTIME_MAP=y -# CONFIG_EFI_FAKE_MEMMAP is not set -CONFIG_EFI_RUNTIME_WRAPPERS=y -CONFIG_EFI_BOOTLOADER_CONTROL=m -CONFIG_EFI_CAPSULE_LOADER=m -CONFIG_EFI_TEST=m -CONFIG_APPLE_PROPERTIES=y -CONFIG_RESET_ATTACK_MITIGATION=y -CONFIG_UEFI_CPER=y -CONFIG_UEFI_CPER_X86=y -CONFIG_EFI_DEV_PATH_PARSER=y - -# -# Tegra firmware driver -# -CONFIG_HAVE_KVM=y -CONFIG_HAVE_KVM_IRQCHIP=y -CONFIG_HAVE_KVM_IRQFD=y -CONFIG_HAVE_KVM_IRQ_ROUTING=y -CONFIG_HAVE_KVM_EVENTFD=y -CONFIG_KVM_MMIO=y -CONFIG_KVM_ASYNC_PF=y -CONFIG_HAVE_KVM_MSI=y -CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y -CONFIG_KVM_VFIO=y -CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y -CONFIG_KVM_COMPAT=y -CONFIG_HAVE_KVM_IRQ_BYPASS=y -CONFIG_VIRTUALIZATION=y -CONFIG_KVM=m -CONFIG_KVM_INTEL=m -CONFIG_KVM_AMD=m -CONFIG_KVM_AMD_SEV=y -# CONFIG_KVM_MMU_AUDIT is not set -CONFIG_VHOST_NET=m -CONFIG_VHOST_SCSI=m -CONFIG_VHOST_VSOCK=m -CONFIG_VHOST=m -# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set - -# -# General architecture-dependent options -# -CONFIG_CRASH_CORE=y -CONFIG_KEXEC_CORE=y -CONFIG_HOTPLUG_SMT=y -# CONFIG_OPROFILE is not set -CONFIG_HAVE_OPROFILE=y -CONFIG_OPROFILE_NMI_TIMER=y -CONFIG_KPROBES=y -CONFIG_JUMP_LABEL=y -# CONFIG_STATIC_KEYS_SELFTEST is not set -CONFIG_OPTPROBES=y -CONFIG_KPROBES_ON_FTRACE=y -CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y -CONFIG_ARCH_USE_BUILTIN_BSWAP=y -CONFIG_KRETPROBES=y -CONFIG_USER_RETURN_NOTIFIER=y -CONFIG_HAVE_IOREMAP_PROT=y -CONFIG_HAVE_KPROBES=y -CONFIG_HAVE_KRETPROBES=y -CONFIG_HAVE_OPTPROBES=y -CONFIG_HAVE_KPROBES_ON_FTRACE=y -CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y -CONFIG_HAVE_NMI=y -CONFIG_HAVE_ARCH_TRACEHOOK=y -CONFIG_HAVE_DMA_CONTIGUOUS=y -CONFIG_GENERIC_SMP_IDLE_THREAD=y -CONFIG_ARCH_HAS_FORTIFY_SOURCE=y -CONFIG_ARCH_HAS_SET_MEMORY=y -CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y -CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT=y -CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y -CONFIG_HAVE_RSEQ=y -CONFIG_HAVE_CLK=y -CONFIG_HAVE_HW_BREAKPOINT=y -CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y -CONFIG_HAVE_USER_RETURN_NOTIFIER=y -CONFIG_HAVE_PERF_EVENTS_NMI=y -CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF=y -CONFIG_HAVE_PERF_REGS=y -CONFIG_HAVE_PERF_USER_STACK_DUMP=y -CONFIG_HAVE_ARCH_JUMP_LABEL=y -CONFIG_HAVE_RCU_TABLE_FREE=y -CONFIG_HAVE_RCU_TABLE_INVALIDATE=y -CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y -CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y -CONFIG_HAVE_CMPXCHG_LOCAL=y -CONFIG_HAVE_CMPXCHG_DOUBLE=y -CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y -CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y -CONFIG_HAVE_ARCH_SECCOMP_FILTER=y -CONFIG_SECCOMP_FILTER=y -CONFIG_HAVE_STACKPROTECTOR=y -CONFIG_CC_HAS_STACKPROTECTOR_NONE=y -CONFIG_STACKPROTECTOR=y -CONFIG_STACKPROTECTOR_STRONG=y -CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES=y -CONFIG_HAVE_CONTEXT_TRACKING=y -CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y -CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y -CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y -CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD=y -CONFIG_HAVE_ARCH_HUGE_VMAP=y -CONFIG_HAVE_ARCH_SOFT_DIRTY=y -CONFIG_HAVE_MOD_ARCH_SPECIFIC=y -CONFIG_MODULES_USE_ELF_RELA=y -CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y -CONFIG_ARCH_HAS_ELF_RANDOMIZE=y -CONFIG_HAVE_ARCH_MMAP_RND_BITS=y -CONFIG_HAVE_EXIT_THREAD=y -CONFIG_ARCH_MMAP_RND_BITS=32 -CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y -CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16 -CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES=y -CONFIG_HAVE_COPY_THREAD_TLS=y -CONFIG_HAVE_STACK_VALIDATION=y -CONFIG_HAVE_RELIABLE_STACKTRACE=y -CONFIG_ISA_BUS_API=y -CONFIG_OLD_SIGSUSPEND3=y -CONFIG_COMPAT_OLD_SIGACTION=y -CONFIG_COMPAT_32BIT_TIME=y -CONFIG_HAVE_ARCH_VMAP_STACK=y -CONFIG_VMAP_STACK=y -CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y -CONFIG_STRICT_KERNEL_RWX=y -CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y -CONFIG_STRICT_MODULE_RWX=y -CONFIG_ARCH_HAS_REFCOUNT=y -CONFIG_REFCOUNT_FULL=y -CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y - -# -# GCOV-based kernel profiling -# -# CONFIG_GCOV_KERNEL is not set -CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y -CONFIG_PLUGIN_HOSTCC="g++" -CONFIG_HAVE_GCC_PLUGINS=y -# CONFIG_GCC_PLUGINS is not set -CONFIG_RT_MUTEXES=y -CONFIG_BASE_SMALL=0 -CONFIG_MODULES=y -CONFIG_MODULE_FORCE_LOAD=y -CONFIG_MODULE_UNLOAD=y -CONFIG_MODULE_FORCE_UNLOAD=y -CONFIG_MODVERSIONS=y -CONFIG_MODULE_SRCVERSION_ALL=y -CONFIG_MODULE_SIG=y -# CONFIG_MODULE_SIG_FORCE is not set -CONFIG_MODULE_SIG_ALL=y -# CONFIG_MODULE_SIG_SHA1 is not set -# CONFIG_MODULE_SIG_SHA224 is not set -# CONFIG_MODULE_SIG_SHA256 is not set -# CONFIG_MODULE_SIG_SHA384 is not set -CONFIG_MODULE_SIG_SHA512=y -CONFIG_MODULE_SIG_HASH="sha512" -CONFIG_MODULE_COMPRESS=y -CONFIG_MODULE_COMPRESS_GZIP=y -# CONFIG_MODULE_COMPRESS_XZ is not set -# CONFIG_TRIM_UNUSED_KSYMS is not set -CONFIG_MODULES_TREE_LOOKUP=y -CONFIG_BLOCK=y -CONFIG_BLK_SCSI_REQUEST=y -CONFIG_BLK_DEV_BSG=y -CONFIG_BLK_DEV_BSGLIB=y -CONFIG_BLK_DEV_INTEGRITY=y -CONFIG_BLK_DEV_ZONED=y -CONFIG_BLK_DEV_THROTTLING=y -# CONFIG_BLK_DEV_THROTTLING_LOW is not set -CONFIG_BLK_CMDLINE_PARSER=y -CONFIG_BLK_WBT=y -# CONFIG_BLK_CGROUP_IOLATENCY is not set -CONFIG_BLK_WBT_SQ=y -CONFIG_BLK_WBT_MQ=y -CONFIG_BLK_DEBUG_FS=y -CONFIG_BLK_DEBUG_FS_ZONED=y -# CONFIG_BLK_SED_OPAL is not set - -# -# Partition Types -# -CONFIG_PARTITION_ADVANCED=y -CONFIG_ACORN_PARTITION=y -CONFIG_ACORN_PARTITION_CUMANA=y -CONFIG_ACORN_PARTITION_EESOX=y -CONFIG_ACORN_PARTITION_ICS=y -CONFIG_ACORN_PARTITION_ADFS=y -CONFIG_ACORN_PARTITION_POWERTEC=y -CONFIG_ACORN_PARTITION_RISCIX=y -CONFIG_AIX_PARTITION=y -CONFIG_OSF_PARTITION=y -CONFIG_AMIGA_PARTITION=y -CONFIG_ATARI_PARTITION=y -CONFIG_MAC_PARTITION=y -CONFIG_MSDOS_PARTITION=y -CONFIG_BSD_DISKLABEL=y -CONFIG_MINIX_SUBPARTITION=y -CONFIG_SOLARIS_X86_PARTITION=y -CONFIG_UNIXWARE_DISKLABEL=y -CONFIG_LDM_PARTITION=y -CONFIG_LDM_DEBUG=y -CONFIG_SGI_PARTITION=y -CONFIG_ULTRIX_PARTITION=y -CONFIG_SUN_PARTITION=y -CONFIG_KARMA_PARTITION=y -CONFIG_EFI_PARTITION=y -CONFIG_SYSV68_PARTITION=y -CONFIG_CMDLINE_PARTITION=y -CONFIG_BLOCK_COMPAT=y -CONFIG_BLK_MQ_PCI=y -CONFIG_BLK_MQ_VIRTIO=y -CONFIG_BLK_MQ_RDMA=y - -# -# IO Schedulers -# -CONFIG_IOSCHED_NOOP=y -CONFIG_IOSCHED_DEADLINE=y -CONFIG_IOSCHED_CFQ=y -CONFIG_CFQ_GROUP_IOSCHED=y -CONFIG_IOSCHED_BFQ_SQ=y -CONFIG_BFQ_SQ_GROUP_IOSCHED=y -# CONFIG_DEFAULT_DEADLINE is not set -# CONFIG_DEFAULT_CFQ is not set -CONFIG_DEFAULT_BFQ_SQ=y -# CONFIG_DEFAULT_NOOP is not set -CONFIG_DEFAULT_IOSCHED="bfq-sq" -CONFIG_MQ_IOSCHED_BFQ=y -CONFIG_MQ_BFQ_GROUP_IOSCHED=y -CONFIG_MQ_IOSCHED_DEADLINE=y -# CONFIG_MQ_IOSCHED_KYBER is not set -CONFIG_IOSCHED_BFQ=y -CONFIG_BFQ_GROUP_IOSCHED=y -CONFIG_PREEMPT_NOTIFIERS=y -CONFIG_PADATA=y -CONFIG_ASN1=y -CONFIG_UNINLINE_SPIN_UNLOCK=y -CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y -CONFIG_MUTEX_SPIN_ON_OWNER=y -CONFIG_RWSEM_SPIN_ON_OWNER=y -CONFIG_LOCK_SPIN_ON_OWNER=y -CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y -CONFIG_QUEUED_SPINLOCKS=y -CONFIG_ARCH_USE_QUEUED_RWLOCKS=y -CONFIG_QUEUED_RWLOCKS=y -CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE=y -CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y -CONFIG_FREEZER=y - -# -# Executable file formats -# -CONFIG_BINFMT_ELF=y -CONFIG_COMPAT_BINFMT_ELF=y -CONFIG_ELFCORE=y -CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y -CONFIG_BINFMT_SCRIPT=y -CONFIG_BINFMT_MISC=y -CONFIG_COREDUMP=y - -# -# Memory Management options -# -CONFIG_SELECT_MEMORY_MODEL=y -CONFIG_SPARSEMEM_MANUAL=y -CONFIG_SPARSEMEM=y -CONFIG_NEED_MULTIPLE_NODES=y -CONFIG_HAVE_MEMORY_PRESENT=y -CONFIG_SPARSEMEM_EXTREME=y -CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y -CONFIG_SPARSEMEM_VMEMMAP=y -CONFIG_HAVE_MEMBLOCK=y -CONFIG_HAVE_MEMBLOCK_NODE_MAP=y -CONFIG_ARCH_DISCARD_MEMBLOCK=y -CONFIG_MEMORY_ISOLATION=y -CONFIG_HAVE_BOOTMEM_INFO_NODE=y -CONFIG_MEMORY_HOTPLUG=y -CONFIG_MEMORY_HOTPLUG_SPARSE=y -# CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE is not set -CONFIG_MEMORY_HOTREMOVE=y -CONFIG_SPLIT_PTLOCK_CPUS=4 -CONFIG_MEMORY_BALLOON=y -CONFIG_BALLOON_COMPACTION=y -CONFIG_COMPACTION=y -CONFIG_MIGRATION=y -CONFIG_PHYS_ADDR_T_64BIT=y -CONFIG_BOUNCE=y -CONFIG_VIRT_TO_BUS=y -CONFIG_MMU_NOTIFIER=y -CONFIG_KSM=y -CONFIG_UKSM=y -# CONFIG_KSM_LEGACY is not set -CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 -CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y -CONFIG_MEMORY_FAILURE=y -# CONFIG_HWPOISON_INJECT is not set -CONFIG_TRANSPARENT_HUGEPAGE=y -CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y -# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set -CONFIG_ARCH_WANTS_THP_SWAP=y -CONFIG_THP_SWAP=y -CONFIG_TRANSPARENT_HUGE_PAGECACHE=y -CONFIG_CLEANCACHE=y -CONFIG_FRONTSWAP=y -CONFIG_CMA=y -# CONFIG_CMA_DEBUG is not set -# CONFIG_CMA_DEBUGFS is not set -CONFIG_CMA_AREAS=7 -# CONFIG_ZSWAP is not set -CONFIG_ZPOOL=m -CONFIG_ZBUD=m -CONFIG_Z3FOLD=m -CONFIG_ZSMALLOC=y -# CONFIG_PGTABLE_MAPPING is not set -# CONFIG_ZSMALLOC_STAT is not set -CONFIG_GENERIC_EARLY_IOREMAP=y -# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set -# CONFIG_IDLE_PAGE_TRACKING is not set -CONFIG_ARCH_HAS_ZONE_DEVICE=y -# CONFIG_ZONE_DEVICE is not set -CONFIG_FRAME_VECTOR=y -CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y -CONFIG_ARCH_HAS_PKEYS=y -# CONFIG_PERCPU_STATS is not set -# CONFIG_GUP_BENCHMARK is not set -CONFIG_ARCH_HAS_PTE_SPECIAL=y -CONFIG_NET=y -CONFIG_COMPAT_NETLINK_MESSAGES=y -CONFIG_NET_INGRESS=y -CONFIG_NET_EGRESS=y - -# -# Networking options -# -CONFIG_PACKET=m -CONFIG_PACKET_DIAG=m -CONFIG_UNIX=m -CONFIG_UNIX_DIAG=m -CONFIG_TLS=m -# CONFIG_TLS_DEVICE is not set -CONFIG_XFRM=y -CONFIG_XFRM_OFFLOAD=y -CONFIG_XFRM_ALGO=m -CONFIG_XFRM_USER=m -# CONFIG_XFRM_INTERFACE is not set -CONFIG_XFRM_SUB_POLICY=y -CONFIG_XFRM_MIGRATE=y -CONFIG_XFRM_STATISTICS=y -CONFIG_XFRM_IPCOMP=m -CONFIG_NET_KEY=m -CONFIG_NET_KEY_MIGRATE=y -CONFIG_SMC=m -CONFIG_SMC_DIAG=m -CONFIG_XDP_SOCKETS=y -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_ADVANCED_ROUTER=y -CONFIG_IP_FIB_TRIE_STATS=y -CONFIG_IP_MULTIPLE_TABLES=y -CONFIG_IP_ROUTE_MULTIPATH=y -CONFIG_IP_ROUTE_VERBOSE=y -CONFIG_IP_ROUTE_CLASSID=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -CONFIG_IP_PNP_BOOTP=y -CONFIG_IP_PNP_RARP=y -CONFIG_NET_IPIP=m -CONFIG_NET_IPGRE_DEMUX=m -CONFIG_NET_IP_TUNNEL=m -CONFIG_NET_IPGRE=m -CONFIG_NET_IPGRE_BROADCAST=y -CONFIG_IP_MROUTE_COMMON=y -CONFIG_IP_MROUTE=y -CONFIG_IP_MROUTE_MULTIPLE_TABLES=y -CONFIG_IP_PIMSM_V1=y -CONFIG_IP_PIMSM_V2=y -CONFIG_SYN_COOKIES=y -CONFIG_NET_IPVTI=m -CONFIG_NET_UDP_TUNNEL=m -CONFIG_NET_FOU=m -CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_INET_AH=m -CONFIG_INET_ESP=m -CONFIG_INET_ESP_OFFLOAD=m -CONFIG_INET_IPCOMP=m -CONFIG_INET_XFRM_TUNNEL=m -CONFIG_INET_TUNNEL=m -CONFIG_INET_XFRM_MODE_TRANSPORT=m -CONFIG_INET_XFRM_MODE_TUNNEL=m -CONFIG_INET_XFRM_MODE_BEET=m -CONFIG_INET_DIAG=m -CONFIG_INET_TCP_DIAG=m -CONFIG_INET_UDP_DIAG=m -CONFIG_INET_RAW_DIAG=m -CONFIG_INET_DIAG_DESTROY=y -CONFIG_TCP_CONG_ADVANCED=y -CONFIG_TCP_CONG_BIC=m -CONFIG_TCP_CONG_CUBIC=m -CONFIG_TCP_CONG_WESTWOOD=m -CONFIG_TCP_CONG_HTCP=m -CONFIG_TCP_CONG_HSTCP=m -CONFIG_TCP_CONG_HYBLA=m -CONFIG_TCP_CONG_VEGAS=m -CONFIG_TCP_CONG_NV=m -CONFIG_TCP_CONG_SCALABLE=m -CONFIG_TCP_CONG_LP=m -CONFIG_TCP_CONG_VENO=m -CONFIG_TCP_CONG_YEAH=m -CONFIG_TCP_CONG_ILLINOIS=m -CONFIG_TCP_CONG_DCTCP=m -# CONFIG_TCP_CONG_CDG is not set -CONFIG_TCP_CONG_BBR=m -CONFIG_DEFAULT_RENO=y -CONFIG_DEFAULT_TCP_CONG="reno" -CONFIG_TCP_MD5SIG=y -CONFIG_IPV6=m -CONFIG_IPV6_ROUTER_PREF=y -CONFIG_IPV6_ROUTE_INFO=y -CONFIG_IPV6_OPTIMISTIC_DAD=y -CONFIG_INET6_AH=m -CONFIG_INET6_ESP=m -CONFIG_INET6_ESP_OFFLOAD=m -CONFIG_INET6_IPCOMP=m -CONFIG_IPV6_MIP6=m -CONFIG_IPV6_ILA=m -CONFIG_INET6_XFRM_TUNNEL=m -CONFIG_INET6_TUNNEL=m -CONFIG_INET6_XFRM_MODE_TRANSPORT=m -CONFIG_INET6_XFRM_MODE_TUNNEL=m -CONFIG_INET6_XFRM_MODE_BEET=m -CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m -CONFIG_IPV6_VTI=m -CONFIG_IPV6_SIT=m -CONFIG_IPV6_SIT_6RD=y -CONFIG_IPV6_NDISC_NODETYPE=y -CONFIG_IPV6_TUNNEL=m -CONFIG_IPV6_GRE=m -CONFIG_IPV6_FOU=m -CONFIG_IPV6_FOU_TUNNEL=m -CONFIG_IPV6_MULTIPLE_TABLES=y -CONFIG_IPV6_SUBTREES=y -CONFIG_IPV6_MROUTE=y -CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y -CONFIG_IPV6_PIMSM_V2=y -CONFIG_IPV6_SEG6_LWTUNNEL=y -CONFIG_IPV6_SEG6_HMAC=y -# CONFIG_NETLABEL is not set -CONFIG_NETWORK_SECMARK=y -CONFIG_NET_PTP_CLASSIFY=y -CONFIG_NETWORK_PHY_TIMESTAMPING=y -CONFIG_NETFILTER=y -CONFIG_NETFILTER_ADVANCED=y -CONFIG_BRIDGE_NETFILTER=m - -# -# Core Netfilter Configuration -# -CONFIG_NETFILTER_INGRESS=y -CONFIG_NETFILTER_NETLINK=m -CONFIG_NETFILTER_FAMILY_BRIDGE=y -CONFIG_NETFILTER_FAMILY_ARP=y -CONFIG_NETFILTER_NETLINK_ACCT=m -CONFIG_NETFILTER_NETLINK_QUEUE=m -CONFIG_NETFILTER_NETLINK_LOG=m -CONFIG_NETFILTER_NETLINK_OSF=m -CONFIG_NF_CONNTRACK=m -CONFIG_NF_LOG_COMMON=m -CONFIG_NF_LOG_NETDEV=m -CONFIG_NETFILTER_CONNCOUNT=m -CONFIG_NF_CONNTRACK_MARK=y -CONFIG_NF_CONNTRACK_SECMARK=y -CONFIG_NF_CONNTRACK_ZONES=y -CONFIG_NF_CONNTRACK_PROCFS=y -CONFIG_NF_CONNTRACK_EVENTS=y -CONFIG_NF_CONNTRACK_TIMEOUT=y -CONFIG_NF_CONNTRACK_TIMESTAMP=y -CONFIG_NF_CONNTRACK_LABELS=y -CONFIG_NF_CT_PROTO_DCCP=y -CONFIG_NF_CT_PROTO_GRE=m -CONFIG_NF_CT_PROTO_SCTP=y -CONFIG_NF_CT_PROTO_UDPLITE=y -CONFIG_NF_CONNTRACK_AMANDA=m -CONFIG_NF_CONNTRACK_FTP=m -CONFIG_NF_CONNTRACK_H323=m -CONFIG_NF_CONNTRACK_IRC=m -CONFIG_NF_CONNTRACK_BROADCAST=m -CONFIG_NF_CONNTRACK_NETBIOS_NS=m -CONFIG_NF_CONNTRACK_SNMP=m -CONFIG_NF_CONNTRACK_PPTP=m -CONFIG_NF_CONNTRACK_SANE=m -CONFIG_NF_CONNTRACK_SIP=m -CONFIG_NF_CONNTRACK_TFTP=m -CONFIG_NF_CT_NETLINK=m -CONFIG_NF_CT_NETLINK_TIMEOUT=m -CONFIG_NF_CT_NETLINK_HELPER=m -CONFIG_NETFILTER_NETLINK_GLUE_CT=y -CONFIG_NF_NAT=m -CONFIG_NF_NAT_NEEDED=y -CONFIG_NF_NAT_PROTO_DCCP=y -CONFIG_NF_NAT_PROTO_UDPLITE=y -CONFIG_NF_NAT_PROTO_SCTP=y -CONFIG_NF_NAT_AMANDA=m -CONFIG_NF_NAT_FTP=m -CONFIG_NF_NAT_IRC=m -CONFIG_NF_NAT_SIP=m -CONFIG_NF_NAT_TFTP=m -CONFIG_NF_NAT_REDIRECT=y -CONFIG_NETFILTER_SYNPROXY=m -CONFIG_NF_TABLES=m -CONFIG_NF_TABLES_SET=m -CONFIG_NF_TABLES_INET=y -CONFIG_NF_TABLES_NETDEV=y -CONFIG_NFT_NUMGEN=m -CONFIG_NFT_CT=m -CONFIG_NFT_FLOW_OFFLOAD=m -CONFIG_NFT_COUNTER=m -CONFIG_NFT_CONNLIMIT=m -CONFIG_NFT_LOG=m -CONFIG_NFT_LIMIT=m -CONFIG_NFT_MASQ=m -CONFIG_NFT_REDIR=m -CONFIG_NFT_NAT=m -# CONFIG_NFT_TUNNEL is not set -CONFIG_NFT_OBJREF=m -CONFIG_NFT_QUEUE=m -CONFIG_NFT_QUOTA=m -CONFIG_NFT_REJECT=m -CONFIG_NFT_REJECT_INET=m -CONFIG_NFT_COMPAT=m -CONFIG_NFT_HASH=m -CONFIG_NFT_FIB=m -CONFIG_NFT_FIB_INET=m -CONFIG_NFT_SOCKET=m -# CONFIG_NFT_OSF is not set -# CONFIG_NFT_TPROXY is not set -CONFIG_NF_DUP_NETDEV=m -CONFIG_NFT_DUP_NETDEV=m -CONFIG_NFT_FWD_NETDEV=m -CONFIG_NFT_FIB_NETDEV=m -CONFIG_NF_FLOW_TABLE_INET=m -CONFIG_NF_FLOW_TABLE=m -CONFIG_NETFILTER_XTABLES=m - -# -# Xtables combined modules -# -CONFIG_NETFILTER_XT_MARK=m -CONFIG_NETFILTER_XT_CONNMARK=m -CONFIG_NETFILTER_XT_SET=m - -# -# Xtables targets -# -CONFIG_NETFILTER_XT_TARGET_AUDIT=m -CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m -CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m -CONFIG_NETFILTER_XT_TARGET_CONNMARK=m -CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m -CONFIG_NETFILTER_XT_TARGET_CT=m -CONFIG_NETFILTER_XT_TARGET_DSCP=m -CONFIG_NETFILTER_XT_TARGET_HL=m -CONFIG_NETFILTER_XT_TARGET_HMARK=m -CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m -CONFIG_NETFILTER_XT_TARGET_LED=m -CONFIG_NETFILTER_XT_TARGET_LOG=m -CONFIG_NETFILTER_XT_TARGET_MARK=m -CONFIG_NETFILTER_XT_NAT=m -CONFIG_NETFILTER_XT_TARGET_NETMAP=m -CONFIG_NETFILTER_XT_TARGET_NFLOG=m -CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m -CONFIG_NETFILTER_XT_TARGET_NOTRACK=m -CONFIG_NETFILTER_XT_TARGET_RATEEST=m -CONFIG_NETFILTER_XT_TARGET_REDIRECT=m -CONFIG_NETFILTER_XT_TARGET_TEE=m -CONFIG_NETFILTER_XT_TARGET_TPROXY=m -CONFIG_NETFILTER_XT_TARGET_TRACE=m -CONFIG_NETFILTER_XT_TARGET_SECMARK=m -CONFIG_NETFILTER_XT_TARGET_TCPMSS=m -CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m - -# -# Xtables matches -# -CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m -CONFIG_NETFILTER_XT_MATCH_BPF=m -CONFIG_NETFILTER_XT_MATCH_CGROUP=m -CONFIG_NETFILTER_XT_MATCH_CLUSTER=m -CONFIG_NETFILTER_XT_MATCH_COMMENT=m -CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m -CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m -CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m -CONFIG_NETFILTER_XT_MATCH_CONNMARK=m -CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m -CONFIG_NETFILTER_XT_MATCH_CPU=m -CONFIG_NETFILTER_XT_MATCH_DCCP=m -CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m -CONFIG_NETFILTER_XT_MATCH_DSCP=m -CONFIG_NETFILTER_XT_MATCH_ECN=m -CONFIG_NETFILTER_XT_MATCH_ESP=m -CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m -CONFIG_NETFILTER_XT_MATCH_HELPER=m -CONFIG_NETFILTER_XT_MATCH_HL=m -CONFIG_NETFILTER_XT_MATCH_IPCOMP=m -CONFIG_NETFILTER_XT_MATCH_IPRANGE=m -CONFIG_NETFILTER_XT_MATCH_IPVS=m -CONFIG_NETFILTER_XT_MATCH_L2TP=m -CONFIG_NETFILTER_XT_MATCH_LENGTH=m -CONFIG_NETFILTER_XT_MATCH_LIMIT=m -CONFIG_NETFILTER_XT_MATCH_MAC=m -CONFIG_NETFILTER_XT_MATCH_MARK=m -CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m -CONFIG_NETFILTER_XT_MATCH_NFACCT=m -CONFIG_NETFILTER_XT_MATCH_OSF=m -CONFIG_NETFILTER_XT_MATCH_OWNER=m -CONFIG_NETFILTER_XT_MATCH_POLICY=m -CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m -CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m -CONFIG_NETFILTER_XT_MATCH_QUOTA=m -CONFIG_NETFILTER_XT_MATCH_RATEEST=m -CONFIG_NETFILTER_XT_MATCH_REALM=m -CONFIG_NETFILTER_XT_MATCH_RECENT=m -CONFIG_NETFILTER_XT_MATCH_SCTP=m -CONFIG_NETFILTER_XT_MATCH_SOCKET=m -CONFIG_NETFILTER_XT_MATCH_STATE=m -CONFIG_NETFILTER_XT_MATCH_STATISTIC=m -CONFIG_NETFILTER_XT_MATCH_STRING=m -CONFIG_NETFILTER_XT_MATCH_TCPMSS=m -CONFIG_NETFILTER_XT_MATCH_TIME=m -CONFIG_NETFILTER_XT_MATCH_U32=m -CONFIG_IP_SET=m -CONFIG_IP_SET_MAX=256 -CONFIG_IP_SET_BITMAP_IP=m -CONFIG_IP_SET_BITMAP_IPMAC=m -CONFIG_IP_SET_BITMAP_PORT=m -CONFIG_IP_SET_HASH_IP=m -CONFIG_IP_SET_HASH_IPMARK=m -CONFIG_IP_SET_HASH_IPPORT=m -CONFIG_IP_SET_HASH_IPPORTIP=m -CONFIG_IP_SET_HASH_IPPORTNET=m -CONFIG_IP_SET_HASH_IPMAC=m -CONFIG_IP_SET_HASH_MAC=m -CONFIG_IP_SET_HASH_NETPORTNET=m -CONFIG_IP_SET_HASH_NET=m -CONFIG_IP_SET_HASH_NETNET=m -CONFIG_IP_SET_HASH_NETPORT=m -CONFIG_IP_SET_HASH_NETIFACE=m -CONFIG_IP_SET_LIST_SET=m -CONFIG_IP_VS=m -CONFIG_IP_VS_IPV6=y -# CONFIG_IP_VS_DEBUG is not set -CONFIG_IP_VS_TAB_BITS=12 - -# -# IPVS transport protocol load balancing support -# -CONFIG_IP_VS_PROTO_TCP=y -CONFIG_IP_VS_PROTO_UDP=y -CONFIG_IP_VS_PROTO_AH_ESP=y -CONFIG_IP_VS_PROTO_ESP=y -CONFIG_IP_VS_PROTO_AH=y -CONFIG_IP_VS_PROTO_SCTP=y - -# -# IPVS scheduler -# -CONFIG_IP_VS_RR=m -CONFIG_IP_VS_WRR=m -CONFIG_IP_VS_LC=m -CONFIG_IP_VS_WLC=m -CONFIG_IP_VS_FO=m -CONFIG_IP_VS_OVF=m -CONFIG_IP_VS_LBLC=m -CONFIG_IP_VS_LBLCR=m -CONFIG_IP_VS_DH=m -CONFIG_IP_VS_SH=m -CONFIG_IP_VS_MH=m -CONFIG_IP_VS_SED=m -CONFIG_IP_VS_NQ=m - -# -# IPVS SH scheduler -# -CONFIG_IP_VS_SH_TAB_BITS=8 - -# -# IPVS MH scheduler -# -CONFIG_IP_VS_MH_TAB_INDEX=12 - -# -# IPVS application helper -# -CONFIG_IP_VS_FTP=m -CONFIG_IP_VS_NFCT=y -CONFIG_IP_VS_PE_SIP=m - -# -# IP: Netfilter Configuration -# -CONFIG_NF_DEFRAG_IPV4=m -CONFIG_NF_SOCKET_IPV4=m -CONFIG_NF_TPROXY_IPV4=m -CONFIG_NF_TABLES_IPV4=y -CONFIG_NFT_CHAIN_ROUTE_IPV4=m -CONFIG_NFT_REJECT_IPV4=m -CONFIG_NFT_DUP_IPV4=m -CONFIG_NFT_FIB_IPV4=m -CONFIG_NF_TABLES_ARP=y -CONFIG_NF_FLOW_TABLE_IPV4=m -CONFIG_NF_DUP_IPV4=m -CONFIG_NF_LOG_ARP=m -CONFIG_NF_LOG_IPV4=m -CONFIG_NF_REJECT_IPV4=m -CONFIG_NF_NAT_IPV4=m -CONFIG_NF_NAT_MASQUERADE_IPV4=y -CONFIG_NFT_CHAIN_NAT_IPV4=m -CONFIG_NFT_MASQ_IPV4=m -CONFIG_NFT_REDIR_IPV4=m -CONFIG_NF_NAT_SNMP_BASIC=m -CONFIG_NF_NAT_PROTO_GRE=m -CONFIG_NF_NAT_PPTP=m -CONFIG_NF_NAT_H323=m -CONFIG_IP_NF_IPTABLES=m -CONFIG_IP_NF_MATCH_AH=m -CONFIG_IP_NF_MATCH_ECN=m -CONFIG_IP_NF_MATCH_RPFILTER=m -CONFIG_IP_NF_MATCH_TTL=m -CONFIG_IP_NF_FILTER=m -CONFIG_IP_NF_TARGET_REJECT=m -CONFIG_IP_NF_TARGET_SYNPROXY=m -CONFIG_IP_NF_NAT=m -CONFIG_IP_NF_TARGET_MASQUERADE=m -CONFIG_IP_NF_TARGET_NETMAP=m -CONFIG_IP_NF_TARGET_REDIRECT=m -CONFIG_IP_NF_MANGLE=m -CONFIG_IP_NF_TARGET_CLUSTERIP=m -CONFIG_IP_NF_TARGET_ECN=m -CONFIG_IP_NF_TARGET_TTL=m -CONFIG_IP_NF_RAW=m -# CONFIG_IP_NF_SECURITY is not set -CONFIG_IP_NF_ARPTABLES=m -CONFIG_IP_NF_ARPFILTER=m -CONFIG_IP_NF_ARP_MANGLE=m - -# -# IPv6: Netfilter Configuration -# -CONFIG_NF_SOCKET_IPV6=m -CONFIG_NF_TPROXY_IPV6=m -CONFIG_NF_TABLES_IPV6=y -CONFIG_NFT_CHAIN_ROUTE_IPV6=m -CONFIG_NFT_CHAIN_NAT_IPV6=m -CONFIG_NFT_MASQ_IPV6=m -CONFIG_NFT_REDIR_IPV6=m -CONFIG_NFT_REJECT_IPV6=m -CONFIG_NFT_DUP_IPV6=m -CONFIG_NFT_FIB_IPV6=m -CONFIG_NF_FLOW_TABLE_IPV6=m -CONFIG_NF_DUP_IPV6=m -CONFIG_NF_REJECT_IPV6=m -CONFIG_NF_LOG_IPV6=m -CONFIG_NF_NAT_IPV6=m -CONFIG_NF_NAT_MASQUERADE_IPV6=y -CONFIG_IP6_NF_IPTABLES=m -CONFIG_IP6_NF_MATCH_AH=m -CONFIG_IP6_NF_MATCH_EUI64=m -CONFIG_IP6_NF_MATCH_FRAG=m -CONFIG_IP6_NF_MATCH_OPTS=m -CONFIG_IP6_NF_MATCH_HL=m -CONFIG_IP6_NF_MATCH_IPV6HEADER=m -CONFIG_IP6_NF_MATCH_MH=m -CONFIG_IP6_NF_MATCH_RPFILTER=m -CONFIG_IP6_NF_MATCH_RT=m -CONFIG_IP6_NF_MATCH_SRH=m -CONFIG_IP6_NF_TARGET_HL=m -CONFIG_IP6_NF_FILTER=m -CONFIG_IP6_NF_TARGET_REJECT=m -CONFIG_IP6_NF_TARGET_SYNPROXY=m -CONFIG_IP6_NF_MANGLE=m -CONFIG_IP6_NF_RAW=m -# CONFIG_IP6_NF_SECURITY is not set -CONFIG_IP6_NF_NAT=m -CONFIG_IP6_NF_TARGET_MASQUERADE=m -CONFIG_IP6_NF_TARGET_NPT=m -CONFIG_NF_DEFRAG_IPV6=m - -# -# DECnet: Netfilter Configuration -# -CONFIG_DECNET_NF_GRABULATOR=m -CONFIG_NF_TABLES_BRIDGE=y -CONFIG_NFT_BRIDGE_REJECT=m -CONFIG_NF_LOG_BRIDGE=m -CONFIG_BRIDGE_NF_EBTABLES=m -CONFIG_BRIDGE_EBT_BROUTE=m -CONFIG_BRIDGE_EBT_T_FILTER=m -CONFIG_BRIDGE_EBT_T_NAT=m -CONFIG_BRIDGE_EBT_802_3=m -CONFIG_BRIDGE_EBT_AMONG=m -CONFIG_BRIDGE_EBT_ARP=m -CONFIG_BRIDGE_EBT_IP=m -CONFIG_BRIDGE_EBT_IP6=m -CONFIG_BRIDGE_EBT_LIMIT=m -CONFIG_BRIDGE_EBT_MARK=m -CONFIG_BRIDGE_EBT_PKTTYPE=m -CONFIG_BRIDGE_EBT_STP=m -CONFIG_BRIDGE_EBT_VLAN=m -CONFIG_BRIDGE_EBT_ARPREPLY=m -CONFIG_BRIDGE_EBT_DNAT=m -CONFIG_BRIDGE_EBT_MARK_T=m -CONFIG_BRIDGE_EBT_REDIRECT=m -CONFIG_BRIDGE_EBT_SNAT=m -CONFIG_BRIDGE_EBT_LOG=m -CONFIG_BRIDGE_EBT_NFLOG=m -CONFIG_BPFILTER=y -CONFIG_BPFILTER_UMH=m -CONFIG_IP_DCCP=m -CONFIG_INET_DCCP_DIAG=m - -# -# DCCP CCIDs Configuration -# -# CONFIG_IP_DCCP_CCID2_DEBUG is not set -CONFIG_IP_DCCP_CCID3=y -# CONFIG_IP_DCCP_CCID3_DEBUG is not set -CONFIG_IP_DCCP_TFRC_LIB=y - -# -# DCCP Kernel Hacking -# -# CONFIG_IP_DCCP_DEBUG is not set -CONFIG_IP_SCTP=m -# CONFIG_SCTP_DBG_OBJCNT is not set -CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5=y -# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1 is not set -# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set -CONFIG_SCTP_COOKIE_HMAC_MD5=y -CONFIG_SCTP_COOKIE_HMAC_SHA1=y -CONFIG_INET_SCTP_DIAG=m -CONFIG_RDS=m -CONFIG_RDS_RDMA=m -CONFIG_RDS_TCP=m -# CONFIG_RDS_DEBUG is not set -CONFIG_TIPC=m -CONFIG_TIPC_MEDIA_IB=y -CONFIG_TIPC_MEDIA_UDP=y -CONFIG_TIPC_DIAG=m -CONFIG_ATM=m -CONFIG_ATM_CLIP=m -# CONFIG_ATM_CLIP_NO_ICMP is not set -CONFIG_ATM_LANE=m -CONFIG_ATM_MPOA=m -CONFIG_ATM_BR2684=m -# CONFIG_ATM_BR2684_IPFILTER is not set -CONFIG_L2TP=m -# CONFIG_L2TP_DEBUGFS is not set -CONFIG_L2TP_V3=y -CONFIG_L2TP_IP=m -CONFIG_L2TP_ETH=m -CONFIG_STP=m -CONFIG_GARP=m -CONFIG_MRP=m -CONFIG_BRIDGE=m -CONFIG_BRIDGE_IGMP_SNOOPING=y -CONFIG_BRIDGE_VLAN_FILTERING=y -CONFIG_HAVE_NET_DSA=y -CONFIG_NET_DSA=m -CONFIG_NET_DSA_LEGACY=y -CONFIG_NET_DSA_TAG_BRCM=y -CONFIG_NET_DSA_TAG_BRCM_PREPEND=y -CONFIG_NET_DSA_TAG_DSA=y -CONFIG_NET_DSA_TAG_EDSA=y -CONFIG_NET_DSA_TAG_KSZ=y -CONFIG_NET_DSA_TAG_LAN9303=y -CONFIG_NET_DSA_TAG_MTK=y -CONFIG_NET_DSA_TAG_TRAILER=y -CONFIG_NET_DSA_TAG_QCA=y -CONFIG_VLAN_8021Q=m -CONFIG_VLAN_8021Q_GVRP=y -CONFIG_VLAN_8021Q_MVRP=y -CONFIG_DECNET=m -CONFIG_DECNET_ROUTER=y -CONFIG_LLC=m -CONFIG_LLC2=m -CONFIG_ATALK=m -CONFIG_DEV_APPLETALK=m -CONFIG_IPDDP=m -CONFIG_IPDDP_ENCAP=y -CONFIG_X25=m -CONFIG_LAPB=m -CONFIG_PHONET=m -CONFIG_6LOWPAN=m -# CONFIG_6LOWPAN_DEBUGFS is not set -CONFIG_6LOWPAN_NHC=m -CONFIG_6LOWPAN_NHC_DEST=m -CONFIG_6LOWPAN_NHC_FRAGMENT=m -CONFIG_6LOWPAN_NHC_HOP=m -CONFIG_6LOWPAN_NHC_IPV6=m -CONFIG_6LOWPAN_NHC_MOBILITY=m -CONFIG_6LOWPAN_NHC_ROUTING=m -CONFIG_6LOWPAN_NHC_UDP=m -CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m -CONFIG_6LOWPAN_GHC_UDP=m -CONFIG_6LOWPAN_GHC_ICMPV6=m -CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m -CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m -CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m -CONFIG_IEEE802154=m -CONFIG_IEEE802154_NL802154_EXPERIMENTAL=y -CONFIG_IEEE802154_SOCKET=m -CONFIG_IEEE802154_6LOWPAN=m -CONFIG_MAC802154=m -CONFIG_NET_SCHED=y - -# -# Queueing/Scheduling -# -CONFIG_NET_SCH_CBQ=m -CONFIG_NET_SCH_HTB=m -CONFIG_NET_SCH_HFSC=m -CONFIG_NET_SCH_ATM=m -CONFIG_NET_SCH_PRIO=m -CONFIG_NET_SCH_MULTIQ=m -CONFIG_NET_SCH_RED=m -CONFIG_NET_SCH_SFB=m -CONFIG_NET_SCH_SFQ=m -CONFIG_NET_SCH_TEQL=m -CONFIG_NET_SCH_TBF=m -CONFIG_NET_SCH_CBS=m -# CONFIG_NET_SCH_ETF is not set -CONFIG_NET_SCH_GRED=m -CONFIG_NET_SCH_DSMARK=m -CONFIG_NET_SCH_NETEM=m -CONFIG_NET_SCH_DRR=m -CONFIG_NET_SCH_MQPRIO=m -# CONFIG_NET_SCH_SKBPRIO is not set -CONFIG_NET_SCH_CHOKE=m -CONFIG_NET_SCH_QFQ=m -CONFIG_NET_SCH_CODEL=m -CONFIG_NET_SCH_FQ_CODEL=m -# CONFIG_NET_SCH_CAKE is not set -CONFIG_NET_SCH_FQ=m -CONFIG_NET_SCH_HHF=m -CONFIG_NET_SCH_PIE=m -CONFIG_NET_SCH_INGRESS=m -CONFIG_NET_SCH_PLUG=m -# CONFIG_NET_SCH_DEFAULT is not set - -# -# Classification -# -CONFIG_NET_CLS=y -CONFIG_NET_CLS_BASIC=m -CONFIG_NET_CLS_TCINDEX=m -CONFIG_NET_CLS_ROUTE4=m -CONFIG_NET_CLS_FW=m -CONFIG_NET_CLS_U32=m -CONFIG_CLS_U32_PERF=y -CONFIG_CLS_U32_MARK=y -CONFIG_NET_CLS_RSVP=m -CONFIG_NET_CLS_RSVP6=m -CONFIG_NET_CLS_FLOW=m -CONFIG_NET_CLS_CGROUP=m -CONFIG_NET_CLS_BPF=m -CONFIG_NET_CLS_FLOWER=m -CONFIG_NET_CLS_MATCHALL=m -CONFIG_NET_EMATCH=y -CONFIG_NET_EMATCH_STACK=32 -CONFIG_NET_EMATCH_CMP=m -CONFIG_NET_EMATCH_NBYTE=m -CONFIG_NET_EMATCH_U32=m -CONFIG_NET_EMATCH_META=m -CONFIG_NET_EMATCH_TEXT=m -CONFIG_NET_EMATCH_CANID=m -CONFIG_NET_EMATCH_IPSET=m -CONFIG_NET_EMATCH_IPT=m -CONFIG_NET_CLS_ACT=y -CONFIG_NET_ACT_POLICE=m -CONFIG_NET_ACT_GACT=m -CONFIG_GACT_PROB=y -CONFIG_NET_ACT_MIRRED=m -CONFIG_NET_ACT_SAMPLE=m -CONFIG_NET_ACT_IPT=m -CONFIG_NET_ACT_NAT=m -CONFIG_NET_ACT_PEDIT=m -# CONFIG_NET_ACT_SIMP is not set -CONFIG_NET_ACT_SKBEDIT=m -CONFIG_NET_ACT_CSUM=m -CONFIG_NET_ACT_VLAN=m -CONFIG_NET_ACT_BPF=m -CONFIG_NET_ACT_CONNMARK=m -CONFIG_NET_ACT_SKBMOD=m -CONFIG_NET_ACT_IFE=m -CONFIG_NET_ACT_TUNNEL_KEY=m -CONFIG_NET_IFE_SKBMARK=m -CONFIG_NET_IFE_SKBPRIO=m -CONFIG_NET_IFE_SKBTCINDEX=m -CONFIG_NET_CLS_IND=y -CONFIG_NET_SCH_FIFO=y -CONFIG_DCB=y -CONFIG_DNS_RESOLVER=y -CONFIG_BATMAN_ADV=m -# CONFIG_BATMAN_ADV_BATMAN_V is not set -CONFIG_BATMAN_ADV_BLA=y -CONFIG_BATMAN_ADV_DAT=y -CONFIG_BATMAN_ADV_NC=y -CONFIG_BATMAN_ADV_MCAST=y -CONFIG_BATMAN_ADV_DEBUGFS=y -# CONFIG_BATMAN_ADV_DEBUG is not set -CONFIG_OPENVSWITCH=m -CONFIG_OPENVSWITCH_GRE=m -CONFIG_OPENVSWITCH_VXLAN=m -CONFIG_OPENVSWITCH_GENEVE=m -CONFIG_VSOCKETS=m -CONFIG_VSOCKETS_DIAG=m -CONFIG_VMWARE_VMCI_VSOCKETS=m -CONFIG_VIRTIO_VSOCKETS=m -CONFIG_VIRTIO_VSOCKETS_COMMON=m -CONFIG_HYPERV_VSOCKETS=m -CONFIG_NETLINK_DIAG=m -CONFIG_MPLS=y -CONFIG_NET_MPLS_GSO=m -CONFIG_MPLS_ROUTING=m -CONFIG_MPLS_IPTUNNEL=m -CONFIG_NET_NSH=m -CONFIG_HSR=m -CONFIG_NET_SWITCHDEV=y -CONFIG_NET_L3_MASTER_DEV=y -# CONFIG_NET_NCSI is not set -CONFIG_RPS=y -CONFIG_RFS_ACCEL=y -CONFIG_XPS=y -CONFIG_CGROUP_NET_PRIO=y -CONFIG_CGROUP_NET_CLASSID=y -CONFIG_NET_RX_BUSY_POLL=y -CONFIG_BQL=y -CONFIG_BPF_JIT=y -CONFIG_BPF_STREAM_PARSER=y -CONFIG_NET_FLOW_LIMIT=y - -# -# Network testing -# -# CONFIG_NET_PKTGEN is not set -# CONFIG_NET_DROP_MONITOR is not set -CONFIG_HAMRADIO=y - -# -# Packet Radio protocols -# -CONFIG_AX25=m -CONFIG_AX25_DAMA_SLAVE=y -CONFIG_NETROM=m -CONFIG_ROSE=m - -# -# AX.25 network device drivers -# -CONFIG_MKISS=m -CONFIG_6PACK=m -CONFIG_BPQETHER=m -CONFIG_BAYCOM_SER_FDX=m -CONFIG_BAYCOM_SER_HDX=m -CONFIG_BAYCOM_PAR=m -CONFIG_YAM=m -CONFIG_CAN=m -CONFIG_CAN_RAW=m -CONFIG_CAN_BCM=m -CONFIG_CAN_GW=m - -# -# CAN Device Drivers -# -CONFIG_CAN_VCAN=m -CONFIG_CAN_VXCAN=m -CONFIG_CAN_SLCAN=m -CONFIG_CAN_DEV=m -CONFIG_CAN_CALC_BITTIMING=y -CONFIG_CAN_JANZ_ICAN3=m -CONFIG_CAN_C_CAN=m -CONFIG_CAN_C_CAN_PLATFORM=m -CONFIG_CAN_C_CAN_PCI=m -CONFIG_CAN_CC770=m -CONFIG_CAN_CC770_ISA=m -CONFIG_CAN_CC770_PLATFORM=m -CONFIG_CAN_IFI_CANFD=m -CONFIG_CAN_M_CAN=m -CONFIG_CAN_PEAK_PCIEFD=m -CONFIG_CAN_SJA1000=m -CONFIG_CAN_SJA1000_ISA=m -CONFIG_CAN_SJA1000_PLATFORM=m -CONFIG_CAN_EMS_PCMCIA=m -CONFIG_CAN_EMS_PCI=m -CONFIG_CAN_PEAK_PCMCIA=m -CONFIG_CAN_PEAK_PCI=m -CONFIG_CAN_PEAK_PCIEC=y -CONFIG_CAN_KVASER_PCI=m -CONFIG_CAN_PLX_PCI=m -CONFIG_CAN_SOFTING=m -CONFIG_CAN_SOFTING_CS=m - -# -# CAN SPI interfaces -# -CONFIG_CAN_HI311X=m -CONFIG_CAN_MCP251X=m - -# -# CAN USB interfaces -# -CONFIG_CAN_8DEV_USB=m -CONFIG_CAN_EMS_USB=m -CONFIG_CAN_ESD_USB2=m -CONFIG_CAN_GS_USB=m -CONFIG_CAN_KVASER_USB=m -CONFIG_CAN_MCBA_USB=m -CONFIG_CAN_PEAK_USB=m -# CONFIG_CAN_UCAN is not set -# CONFIG_CAN_DEBUG_DEVICES is not set -CONFIG_BT=m -CONFIG_BT_BREDR=y -CONFIG_BT_RFCOMM=m -CONFIG_BT_RFCOMM_TTY=y -CONFIG_BT_BNEP=m -CONFIG_BT_BNEP_MC_FILTER=y -CONFIG_BT_BNEP_PROTO_FILTER=y -CONFIG_BT_CMTP=m -CONFIG_BT_HIDP=m -CONFIG_BT_HS=y -CONFIG_BT_LE=y -CONFIG_BT_6LOWPAN=m -CONFIG_BT_LEDS=y -# CONFIG_BT_SELFTEST is not set -CONFIG_BT_DEBUGFS=y - -# -# Bluetooth device drivers -# -CONFIG_BT_INTEL=m -CONFIG_BT_BCM=m -CONFIG_BT_RTL=m -CONFIG_BT_QCA=m -CONFIG_BT_HCIBTUSB=m -# CONFIG_BT_HCIBTUSB_AUTOSUSPEND is not set -CONFIG_BT_HCIBTUSB_BCM=y -CONFIG_BT_HCIBTUSB_RTL=y -CONFIG_BT_HCIBTSDIO=m -CONFIG_BT_HCIUART=m -CONFIG_BT_HCIUART_SERDEV=y -CONFIG_BT_HCIUART_H4=y -CONFIG_BT_HCIUART_NOKIA=m -CONFIG_BT_HCIUART_BCSP=y -CONFIG_BT_HCIUART_ATH3K=y -CONFIG_BT_HCIUART_LL=y -CONFIG_BT_HCIUART_3WIRE=y -CONFIG_BT_HCIUART_INTEL=y -# CONFIG_BT_HCIUART_RTL is not set -CONFIG_BT_HCIUART_QCA=y -CONFIG_BT_HCIUART_AG6XX=y -CONFIG_BT_HCIUART_MRVL=y -CONFIG_BT_HCIBCM203X=m -CONFIG_BT_HCIBPA10X=m -CONFIG_BT_HCIBFUSB=m -CONFIG_BT_HCIDTL1=m -CONFIG_BT_HCIBT3C=m -CONFIG_BT_HCIBLUECARD=m -CONFIG_BT_HCIVHCI=m -CONFIG_BT_MRVL=m -CONFIG_BT_MRVL_SDIO=m -CONFIG_BT_ATH3K=m -CONFIG_BT_WILINK=m -# CONFIG_BT_MTKUART is not set -CONFIG_BT_HCIRSI=m -CONFIG_AF_RXRPC=m -CONFIG_AF_RXRPC_IPV6=y -# CONFIG_AF_RXRPC_INJECT_LOSS is not set -# CONFIG_AF_RXRPC_DEBUG is not set -# CONFIG_RXKAD is not set -CONFIG_AF_KCM=m -CONFIG_STREAM_PARSER=y -CONFIG_FIB_RULES=y -CONFIG_WIRELESS=y -CONFIG_WIRELESS_EXT=y -CONFIG_WEXT_CORE=y -CONFIG_WEXT_PROC=y -CONFIG_WEXT_SPY=y -CONFIG_WEXT_PRIV=y -CONFIG_CFG80211=m -CONFIG_NL80211_TESTMODE=y -# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set -CONFIG_CFG80211_REQUIRE_SIGNED_REGDB=y -CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS=y -CONFIG_CFG80211_DEFAULT_PS=y -# CONFIG_CFG80211_DEBUGFS is not set -CONFIG_CFG80211_CRDA_SUPPORT=y -CONFIG_CFG80211_WEXT=y -CONFIG_CFG80211_WEXT_EXPORT=y -CONFIG_LIB80211=m -CONFIG_LIB80211_CRYPT_WEP=m -CONFIG_LIB80211_CRYPT_CCMP=m -CONFIG_LIB80211_CRYPT_TKIP=m -# CONFIG_LIB80211_DEBUG is not set -CONFIG_MAC80211=m -CONFIG_MAC80211_HAS_RC=y -CONFIG_MAC80211_RC_MINSTREL=y -CONFIG_MAC80211_RC_MINSTREL_HT=y -CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y -CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" -CONFIG_MAC80211_MESH=y -CONFIG_MAC80211_LEDS=y -# CONFIG_MAC80211_DEBUGFS is not set -# CONFIG_MAC80211_MESSAGE_TRACING is not set -# CONFIG_MAC80211_DEBUG_MENU is not set -CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 -CONFIG_WIMAX=m -CONFIG_WIMAX_DEBUG_LEVEL=8 -CONFIG_RFKILL=m -CONFIG_RFKILL_LEDS=y -CONFIG_RFKILL_INPUT=y -CONFIG_RFKILL_GPIO=m -CONFIG_NET_9P=m -CONFIG_NET_9P_VIRTIO=m -CONFIG_NET_9P_RDMA=m -# CONFIG_NET_9P_DEBUG is not set -CONFIG_CAIF=m -# CONFIG_CAIF_DEBUG is not set -CONFIG_CAIF_NETDEV=m -CONFIG_CAIF_USB=m -CONFIG_CEPH_LIB=m -# CONFIG_CEPH_LIB_PRETTYDEBUG is not set -CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y -CONFIG_NFC=m -CONFIG_NFC_DIGITAL=m -CONFIG_NFC_NCI=m -CONFIG_NFC_NCI_SPI=m -CONFIG_NFC_NCI_UART=m -CONFIG_NFC_HCI=m -CONFIG_NFC_SHDLC=y - -# -# Near Field Communication (NFC) devices -# -CONFIG_NFC_TRF7970A=m -CONFIG_NFC_MEI_PHY=m -CONFIG_NFC_SIM=m -CONFIG_NFC_PORT100=m -CONFIG_NFC_FDP=m -CONFIG_NFC_FDP_I2C=m -CONFIG_NFC_PN544=m -CONFIG_NFC_PN544_I2C=m -CONFIG_NFC_PN544_MEI=m -CONFIG_NFC_PN533=m -CONFIG_NFC_PN533_USB=m -CONFIG_NFC_PN533_I2C=m -CONFIG_NFC_MICROREAD=m -CONFIG_NFC_MICROREAD_I2C=m -CONFIG_NFC_MICROREAD_MEI=m -CONFIG_NFC_MRVL=m -CONFIG_NFC_MRVL_USB=m -CONFIG_NFC_MRVL_UART=m -CONFIG_NFC_MRVL_I2C=m -CONFIG_NFC_MRVL_SPI=m -CONFIG_NFC_ST21NFCA=m -CONFIG_NFC_ST21NFCA_I2C=m -CONFIG_NFC_ST_NCI=m -CONFIG_NFC_ST_NCI_I2C=m -CONFIG_NFC_ST_NCI_SPI=m -CONFIG_NFC_NXP_NCI=m -CONFIG_NFC_NXP_NCI_I2C=m -CONFIG_NFC_S3FWRN5=m -CONFIG_NFC_S3FWRN5_I2C=m -CONFIG_NFC_ST95HF=m -CONFIG_PSAMPLE=m -CONFIG_NET_IFE=m -CONFIG_LWTUNNEL=y -CONFIG_LWTUNNEL_BPF=y -CONFIG_DST_CACHE=y -CONFIG_GRO_CELLS=y -CONFIG_NET_DEVLINK=m -CONFIG_MAY_USE_DEVLINK=m -CONFIG_FAILOVER=m -CONFIG_HAVE_EBPF_JIT=y - -# -# Device Drivers -# - -# -# Generic Driver Options -# -# CONFIG_UEVENT_HELPER is not set -CONFIG_DEVTMPFS=y -CONFIG_DEVTMPFS_MOUNT=y -CONFIG_STANDALONE=y -CONFIG_PREVENT_FIRMWARE_BUILD=y - -# -# Firmware loader -# -CONFIG_FW_LOADER=y -CONFIG_EXTRA_FIRMWARE="" -CONFIG_FW_LOADER_USER_HELPER=y -# CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set -CONFIG_WANT_DEV_COREDUMP=y -CONFIG_ALLOW_DEV_COREDUMP=y -CONFIG_DEV_COREDUMP=y -# CONFIG_DEBUG_DRIVER is not set -# CONFIG_DEBUG_DEVRES is not set -# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set -CONFIG_TEST_ASYNC_DRIVER_PROBE=m -CONFIG_GENERIC_CPU_AUTOPROBE=y -CONFIG_GENERIC_CPU_VULNERABILITIES=y -CONFIG_REGMAP=y -CONFIG_REGMAP_I2C=m -CONFIG_REGMAP_SPI=y -CONFIG_REGMAP_SPMI=m -CONFIG_REGMAP_W1=m -CONFIG_REGMAP_MMIO=y -CONFIG_REGMAP_IRQ=y -CONFIG_REGMAP_SOUNDWIRE=m -CONFIG_DMA_SHARED_BUFFER=y -# CONFIG_DMA_FENCE_TRACE is not set -# CONFIG_DMA_CMA is not set - -# -# Bus devices -# -CONFIG_CONNECTOR=m -# CONFIG_GNSS is not set -CONFIG_MTD=m -CONFIG_MTD_TESTS=m -CONFIG_MTD_REDBOOT_PARTS=m -CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1 -CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED=y -CONFIG_MTD_REDBOOT_PARTS_READONLY=y -CONFIG_MTD_CMDLINE_PARTS=m -CONFIG_MTD_AR7_PARTS=m - -# -# Partition parsers -# - -# -# User Modules And Translation Layers -# -CONFIG_MTD_BLKDEVS=m -CONFIG_MTD_BLOCK=m -CONFIG_MTD_BLOCK_RO=m -CONFIG_FTL=m -CONFIG_NFTL=m -CONFIG_NFTL_RW=y -CONFIG_INFTL=m -CONFIG_RFD_FTL=m -CONFIG_SSFDC=m -CONFIG_SM_FTL=m -CONFIG_MTD_OOPS=m -CONFIG_MTD_SWAP=m -# CONFIG_MTD_PARTITIONED_MASTER is not set - -# -# RAM/ROM/Flash chip drivers -# -CONFIG_MTD_CFI=m -CONFIG_MTD_JEDECPROBE=m -CONFIG_MTD_GEN_PROBE=m -# CONFIG_MTD_CFI_ADV_OPTIONS is not set -CONFIG_MTD_MAP_BANK_WIDTH_1=y -CONFIG_MTD_MAP_BANK_WIDTH_2=y -CONFIG_MTD_MAP_BANK_WIDTH_4=y -CONFIG_MTD_CFI_I1=y -CONFIG_MTD_CFI_I2=y -CONFIG_MTD_CFI_INTELEXT=m -CONFIG_MTD_CFI_AMDSTD=m -CONFIG_MTD_CFI_STAA=m -CONFIG_MTD_CFI_UTIL=m -CONFIG_MTD_RAM=m -CONFIG_MTD_ROM=m -CONFIG_MTD_ABSENT=m - -# -# Mapping drivers for chip access -# -CONFIG_MTD_COMPLEX_MAPPINGS=y -CONFIG_MTD_PHYSMAP=m -# CONFIG_MTD_PHYSMAP_COMPAT is not set -CONFIG_MTD_SBC_GXX=m -CONFIG_MTD_AMD76XROM=m -CONFIG_MTD_ICHXROM=m -CONFIG_MTD_ESB2ROM=m -CONFIG_MTD_CK804XROM=m -CONFIG_MTD_SCB2_FLASH=m -CONFIG_MTD_NETtel=m -CONFIG_MTD_L440GX=m -CONFIG_MTD_PCI=m -CONFIG_MTD_PCMCIA=m -# CONFIG_MTD_PCMCIA_ANONYMOUS is not set -CONFIG_MTD_GPIO_ADDR=m -CONFIG_MTD_INTEL_VR_NOR=m -CONFIG_MTD_PLATRAM=m -CONFIG_MTD_LATCH_ADDR=m - -# -# Self-contained MTD device drivers -# -CONFIG_MTD_PMC551=m -CONFIG_MTD_PMC551_BUGFIX=y -# CONFIG_MTD_PMC551_DEBUG is not set -CONFIG_MTD_DATAFLASH=m -CONFIG_MTD_DATAFLASH_WRITE_VERIFY=y -CONFIG_MTD_DATAFLASH_OTP=y -CONFIG_MTD_M25P80=m -CONFIG_MTD_MCHP23K256=m -CONFIG_MTD_SST25L=m -CONFIG_MTD_SLRAM=m -CONFIG_MTD_PHRAM=m -CONFIG_MTD_MTDRAM=m -CONFIG_MTDRAM_TOTAL_SIZE=4096 -CONFIG_MTDRAM_ERASE_SIZE=128 -CONFIG_MTD_BLOCK2MTD=m - -# -# Disk-On-Chip Device Drivers -# -CONFIG_MTD_DOCG3=m -CONFIG_BCH_CONST_M=14 -CONFIG_BCH_CONST_T=4 -CONFIG_MTD_ONENAND=m -CONFIG_MTD_ONENAND_VERIFY_WRITE=y -CONFIG_MTD_ONENAND_GENERIC=m -CONFIG_MTD_ONENAND_OTP=y -CONFIG_MTD_ONENAND_2X_PROGRAM=y -CONFIG_MTD_NAND_ECC=m -CONFIG_MTD_NAND_ECC_SMC=y -CONFIG_MTD_NAND=m -CONFIG_MTD_NAND_BCH=m -CONFIG_MTD_NAND_ECC_BCH=y -CONFIG_MTD_SM_COMMON=m -CONFIG_MTD_NAND_DENALI=m -CONFIG_MTD_NAND_DENALI_PCI=m -CONFIG_MTD_NAND_GPIO=m -CONFIG_MTD_NAND_RICOH=m -CONFIG_MTD_NAND_DISKONCHIP=m -CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADVANCED=y -CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS=0x0 -CONFIG_MTD_NAND_DISKONCHIP_PROBE_HIGH=y -CONFIG_MTD_NAND_DISKONCHIP_BBTWRITE=y -CONFIG_MTD_NAND_DOCG4=m -CONFIG_MTD_NAND_CAFE=m -CONFIG_MTD_NAND_NANDSIM=m -CONFIG_MTD_NAND_PLATFORM=m -# CONFIG_MTD_SPI_NAND is not set - -# -# LPDDR & LPDDR2 PCM memory drivers -# -CONFIG_MTD_LPDDR=m -CONFIG_MTD_QINFO_PROBE=m -CONFIG_MTD_SPI_NOR=m -CONFIG_MTD_MT81xx_NOR=m -CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=y -CONFIG_SPI_INTEL_SPI=m -CONFIG_SPI_INTEL_SPI_PCI=m -CONFIG_SPI_INTEL_SPI_PLATFORM=m -CONFIG_MTD_UBI=m -CONFIG_MTD_UBI_WL_THRESHOLD=4096 -CONFIG_MTD_UBI_BEB_LIMIT=20 -CONFIG_MTD_UBI_FASTMAP=y -# CONFIG_MTD_UBI_GLUEBI is not set -CONFIG_MTD_UBI_BLOCK=y -# CONFIG_OF is not set -CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y -CONFIG_PARPORT=m -CONFIG_PARPORT_PC=m -CONFIG_PARPORT_SERIAL=m -CONFIG_PARPORT_PC_FIFO=y -CONFIG_PARPORT_PC_SUPERIO=y -CONFIG_PARPORT_PC_PCMCIA=m -CONFIG_PARPORT_AX88796=m -CONFIG_PARPORT_1284=y -CONFIG_PARPORT_NOT_PC=y -CONFIG_PNP=y -# CONFIG_PNP_DEBUG_MESSAGES is not set - -# -# Protocols -# -CONFIG_PNPACPI=y -CONFIG_BLK_DEV=y -# CONFIG_BLK_DEV_NULL_BLK is not set -CONFIG_BLK_DEV_FD=m -CONFIG_CDROM=m -CONFIG_PARIDE=m - -# -# Parallel IDE high-level drivers -# -CONFIG_PARIDE_PD=m -CONFIG_PARIDE_PCD=m -CONFIG_PARIDE_PF=m -CONFIG_PARIDE_PT=m -CONFIG_PARIDE_PG=m - -# -# Parallel IDE protocol modules -# -CONFIG_PARIDE_ATEN=m -CONFIG_PARIDE_BPCK=m -CONFIG_PARIDE_COMM=m -CONFIG_PARIDE_DSTR=m -CONFIG_PARIDE_FIT2=m -CONFIG_PARIDE_FIT3=m -CONFIG_PARIDE_EPAT=m -CONFIG_PARIDE_EPATC8=y -CONFIG_PARIDE_EPIA=m -CONFIG_PARIDE_FRIQ=m -CONFIG_PARIDE_FRPW=m -CONFIG_PARIDE_KBIC=m -CONFIG_PARIDE_KTTI=m -CONFIG_PARIDE_ON20=m -CONFIG_PARIDE_ON26=m -CONFIG_BLK_DEV_PCIESSD_MTIP32XX=m -CONFIG_ZRAM=m -CONFIG_ZRAM_WRITEBACK=y -# CONFIG_ZRAM_MEMORY_TRACKING is not set -CONFIG_BLK_DEV_DAC960=m -CONFIG_BLK_DEV_UMEM=m -CONFIG_BLK_DEV_LOOP=y -CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 -CONFIG_BLK_DEV_CRYPTOLOOP=m -CONFIG_BLK_DEV_DRBD=m -# CONFIG_DRBD_FAULT_INJECTION is not set -CONFIG_BLK_DEV_NBD=m -CONFIG_BLK_DEV_SKD=m -CONFIG_BLK_DEV_SX8=m -CONFIG_BLK_DEV_RAM=m -CONFIG_BLK_DEV_RAM_COUNT=16 -CONFIG_BLK_DEV_RAM_SIZE=16384 -CONFIG_CDROM_PKTCDVD=m -CONFIG_CDROM_PKTCDVD_BUFFERS=8 -CONFIG_CDROM_PKTCDVD_WCACHE=y -CONFIG_ATA_OVER_ETH=m -CONFIG_VIRTIO_BLK=m -# CONFIG_VIRTIO_BLK_SCSI is not set -CONFIG_BLK_DEV_RBD=m -CONFIG_BLK_DEV_RSXX=m - -# -# NVME Support -# -CONFIG_NVME_CORE=y -CONFIG_BLK_DEV_NVME=y -# CONFIG_NVME_MULTIPATH is not set -CONFIG_NVME_FABRICS=m -CONFIG_NVME_RDMA=m -CONFIG_NVME_FC=m -CONFIG_NVME_TARGET=m -CONFIG_NVME_TARGET_LOOP=m -CONFIG_NVME_TARGET_RDMA=m -CONFIG_NVME_TARGET_FC=m -CONFIG_NVME_TARGET_FCLOOP=m - -# -# Misc devices -# -CONFIG_SENSORS_LIS3LV02D=m -CONFIG_AD525X_DPOT=m -CONFIG_AD525X_DPOT_I2C=m -CONFIG_AD525X_DPOT_SPI=m -# CONFIG_DUMMY_IRQ is not set -CONFIG_IBM_ASM=m -CONFIG_PHANTOM=m -CONFIG_SGI_IOC4=m -CONFIG_TIFM_CORE=m -CONFIG_TIFM_7XX1=m -CONFIG_ICS932S401=m -CONFIG_ENCLOSURE_SERVICES=m -CONFIG_HP_ILO=m -CONFIG_APDS9802ALS=m -CONFIG_ISL29003=m -CONFIG_ISL29020=m -CONFIG_SENSORS_TSL2550=m -CONFIG_SENSORS_BH1770=m -CONFIG_SENSORS_APDS990X=m -CONFIG_HMC6352=m -CONFIG_DS1682=m -CONFIG_VMWARE_BALLOON=m -CONFIG_USB_SWITCH_FSA9480=m -CONFIG_LATTICE_ECP3_CONFIG=m -CONFIG_SRAM=y -CONFIG_PCI_ENDPOINT_TEST=m -CONFIG_MISC_RTSX=m -CONFIG_C2PORT=m -CONFIG_C2PORT_DURAMAR_2150=m - -# -# EEPROM support -# -CONFIG_EEPROM_AT24=m -CONFIG_EEPROM_AT25=m -CONFIG_EEPROM_LEGACY=m -CONFIG_EEPROM_MAX6875=m -CONFIG_EEPROM_93CX6=m -CONFIG_EEPROM_93XX46=m -CONFIG_EEPROM_IDT_89HPESX=m -CONFIG_CB710_CORE=m -# CONFIG_CB710_DEBUG is not set -CONFIG_CB710_DEBUG_ASSUMPTIONS=y - -# -# Texas Instruments shared transport line discipline -# -CONFIG_TI_ST=m -CONFIG_SENSORS_LIS3_I2C=m - -# -# Altera FPGA firmware download module (requires I2C) -# -CONFIG_ALTERA_STAPL=m -CONFIG_INTEL_MEI=y -CONFIG_INTEL_MEI_ME=y -CONFIG_INTEL_MEI_TXE=m -CONFIG_VMWARE_VMCI=m - -# -# Intel MIC & related support -# - -# -# Intel MIC Bus Driver -# -CONFIG_INTEL_MIC_BUS=m - -# -# SCIF Bus Driver -# -CONFIG_SCIF_BUS=m - -# -# VOP Bus Driver -# -CONFIG_VOP_BUS=m - -# -# Intel MIC Host Driver -# -CONFIG_INTEL_MIC_HOST=m - -# -# Intel MIC Card Driver -# -CONFIG_INTEL_MIC_CARD=m - -# -# SCIF Driver -# -CONFIG_SCIF=m - -# -# Intel MIC Coprocessor State Management (COSM) Drivers -# -CONFIG_MIC_COSM=m - -# -# VOP Driver -# -CONFIG_VOP=m -CONFIG_VHOST_RING=m -CONFIG_GENWQE=m -CONFIG_GENWQE_PLATFORM_ERROR_RECOVERY=0 -CONFIG_ECHO=m -CONFIG_MISC_RTSX_PCI=m -CONFIG_MISC_RTSX_USB=m -CONFIG_HAVE_IDE=y -# CONFIG_IDE is not set - -# -# SCSI device support -# -CONFIG_SCSI_MOD=m -CONFIG_RAID_ATTRS=m -CONFIG_SCSI=m -CONFIG_SCSI_DMA=y -CONFIG_SCSI_NETLINK=y -# CONFIG_SCSI_MQ_DEFAULT is not set -CONFIG_SCSI_PROC_FS=y - -# -# SCSI support type (disk, tape, CD-ROM) -# -CONFIG_BLK_DEV_SD=m -CONFIG_CHR_DEV_ST=m -CONFIG_CHR_DEV_OSST=m -CONFIG_BLK_DEV_SR=m -CONFIG_BLK_DEV_SR_VENDOR=y -CONFIG_CHR_DEV_SG=m -CONFIG_CHR_DEV_SCH=m -CONFIG_SCSI_ENCLOSURE=m -CONFIG_SCSI_CONSTANTS=y -CONFIG_SCSI_LOGGING=y -CONFIG_SCSI_SCAN_ASYNC=y - -# -# SCSI Transports -# -CONFIG_SCSI_SPI_ATTRS=m -CONFIG_SCSI_FC_ATTRS=m -CONFIG_SCSI_ISCSI_ATTRS=m -CONFIG_SCSI_SAS_ATTRS=m -CONFIG_SCSI_SAS_LIBSAS=m -CONFIG_SCSI_SAS_ATA=y -CONFIG_SCSI_SAS_HOST_SMP=y -CONFIG_SCSI_SRP_ATTRS=m -CONFIG_SCSI_LOWLEVEL=y -CONFIG_ISCSI_TCP=m -CONFIG_ISCSI_BOOT_SYSFS=m -CONFIG_SCSI_CXGB3_ISCSI=m -CONFIG_SCSI_CXGB4_ISCSI=m -CONFIG_SCSI_BNX2_ISCSI=m -CONFIG_SCSI_BNX2X_FCOE=m -CONFIG_BE2ISCSI=m -CONFIG_BLK_DEV_3W_XXXX_RAID=m -CONFIG_SCSI_HPSA=m -CONFIG_SCSI_3W_9XXX=m -CONFIG_SCSI_3W_SAS=m -CONFIG_SCSI_ACARD=m -CONFIG_SCSI_AACRAID=m -CONFIG_SCSI_AIC7XXX=m -CONFIG_AIC7XXX_CMDS_PER_DEVICE=4 -CONFIG_AIC7XXX_RESET_DELAY_MS=15000 -# CONFIG_AIC7XXX_DEBUG_ENABLE is not set -CONFIG_AIC7XXX_DEBUG_MASK=0 -# CONFIG_AIC7XXX_REG_PRETTY_PRINT is not set -CONFIG_SCSI_AIC79XX=m -CONFIG_AIC79XX_CMDS_PER_DEVICE=4 -CONFIG_AIC79XX_RESET_DELAY_MS=15000 -# CONFIG_AIC79XX_DEBUG_ENABLE is not set -CONFIG_AIC79XX_DEBUG_MASK=0 -# CONFIG_AIC79XX_REG_PRETTY_PRINT is not set -CONFIG_SCSI_AIC94XX=m -# CONFIG_AIC94XX_DEBUG is not set -CONFIG_SCSI_MVSAS=m -# CONFIG_SCSI_MVSAS_DEBUG is not set -CONFIG_SCSI_MVSAS_TASKLET=y -CONFIG_SCSI_MVUMI=m -CONFIG_SCSI_DPT_I2O=m -CONFIG_SCSI_ADVANSYS=m -CONFIG_SCSI_ARCMSR=m -CONFIG_SCSI_ESAS2R=m -CONFIG_MEGARAID_NEWGEN=y -CONFIG_MEGARAID_MM=m -CONFIG_MEGARAID_MAILBOX=m -CONFIG_MEGARAID_LEGACY=m -CONFIG_MEGARAID_SAS=m -CONFIG_SCSI_MPT3SAS=m -CONFIG_SCSI_MPT2SAS_MAX_SGE=128 -CONFIG_SCSI_MPT3SAS_MAX_SGE=128 -CONFIG_SCSI_MPT2SAS=m -CONFIG_SCSI_SMARTPQI=m -CONFIG_SCSI_UFSHCD=m -CONFIG_SCSI_UFSHCD_PCI=m -CONFIG_SCSI_UFS_DWC_TC_PCI=m -CONFIG_SCSI_UFSHCD_PLATFORM=m -CONFIG_SCSI_UFS_DWC_TC_PLATFORM=m -CONFIG_SCSI_HPTIOP=m -CONFIG_SCSI_BUSLOGIC=m -CONFIG_SCSI_FLASHPOINT=y -CONFIG_VMWARE_PVSCSI=m -CONFIG_HYPERV_STORAGE=m -CONFIG_LIBFC=m -CONFIG_LIBFCOE=m -CONFIG_FCOE=m -CONFIG_FCOE_FNIC=m -CONFIG_SCSI_SNIC=m -# CONFIG_SCSI_SNIC_DEBUG_FS is not set -CONFIG_SCSI_DMX3191D=m -CONFIG_SCSI_GDTH=m -CONFIG_SCSI_ISCI=m -CONFIG_SCSI_IPS=m -CONFIG_SCSI_INITIO=m -CONFIG_SCSI_INIA100=m -CONFIG_SCSI_PPA=m -CONFIG_SCSI_IMM=m -# CONFIG_SCSI_IZIP_EPP16 is not set -# CONFIG_SCSI_IZIP_SLOW_CTR is not set -CONFIG_SCSI_STEX=m -CONFIG_SCSI_SYM53C8XX_2=m -CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1 -CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16 -CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64 -CONFIG_SCSI_SYM53C8XX_MMIO=y -CONFIG_SCSI_IPR=m -# CONFIG_SCSI_IPR_TRACE is not set -# CONFIG_SCSI_IPR_DUMP is not set -CONFIG_SCSI_QLOGIC_1280=m -CONFIG_SCSI_QLA_FC=m -CONFIG_TCM_QLA2XXX=m -# CONFIG_TCM_QLA2XXX_DEBUG is not set -CONFIG_SCSI_QLA_ISCSI=m -CONFIG_QEDI=m -CONFIG_QEDF=m -CONFIG_SCSI_LPFC=m -# CONFIG_SCSI_LPFC_DEBUG_FS is not set -CONFIG_SCSI_DC395x=m -CONFIG_SCSI_AM53C974=m -CONFIG_SCSI_WD719X=m -# CONFIG_SCSI_DEBUG is not set -CONFIG_SCSI_PMCRAID=m -CONFIG_SCSI_PM8001=m -CONFIG_SCSI_BFA_FC=m -CONFIG_SCSI_VIRTIO=m -CONFIG_SCSI_CHELSIO_FCOE=m -CONFIG_SCSI_LOWLEVEL_PCMCIA=y -CONFIG_PCMCIA_AHA152X=m -CONFIG_PCMCIA_QLOGIC=m -CONFIG_PCMCIA_SYM53C500=m -CONFIG_SCSI_DH=y -CONFIG_SCSI_DH_RDAC=m -CONFIG_SCSI_DH_HP_SW=m -CONFIG_SCSI_DH_EMC=m -CONFIG_SCSI_DH_ALUA=m -CONFIG_SCSI_OSD_INITIATOR=m -CONFIG_SCSI_OSD_ULD=m -CONFIG_SCSI_OSD_DPRINT_SENSE=1 -# CONFIG_SCSI_OSD_DEBUG is not set -CONFIG_ATA=m -CONFIG_ATA_VERBOSE_ERROR=y -CONFIG_ATA_ACPI=y -CONFIG_SATA_ZPODD=y -CONFIG_SATA_PMP=y - -# -# Controllers with non-SFF native interface -# -CONFIG_SATA_AHCI=m -CONFIG_SATA_MOBILE_LPM_POLICY=0 -CONFIG_SATA_AHCI_PLATFORM=m -CONFIG_SATA_INIC162X=m -CONFIG_SATA_ACARD_AHCI=m -CONFIG_SATA_SIL24=m -CONFIG_ATA_SFF=y - -# -# SFF controllers with custom DMA interface -# -CONFIG_PDC_ADMA=m -CONFIG_SATA_QSTOR=m -CONFIG_SATA_SX4=m -CONFIG_ATA_BMDMA=y - -# -# SATA SFF controllers with BMDMA -# -CONFIG_ATA_PIIX=m -CONFIG_SATA_DWC=m -# CONFIG_SATA_DWC_OLD_DMA is not set -# CONFIG_SATA_DWC_DEBUG is not set -CONFIG_SATA_MV=m -CONFIG_SATA_NV=m -CONFIG_SATA_PROMISE=m -CONFIG_SATA_SIL=m -CONFIG_SATA_SIS=m -CONFIG_SATA_SVW=m -CONFIG_SATA_ULI=m -CONFIG_SATA_VIA=m -CONFIG_SATA_VITESSE=m - -# -# PATA SFF controllers with BMDMA -# -CONFIG_PATA_ALI=m -CONFIG_PATA_AMD=m -CONFIG_PATA_ARTOP=m -CONFIG_PATA_ATIIXP=m -CONFIG_PATA_ATP867X=m -CONFIG_PATA_CMD64X=m -CONFIG_PATA_CYPRESS=m -CONFIG_PATA_EFAR=m -CONFIG_PATA_HPT366=m -CONFIG_PATA_HPT37X=m -CONFIG_PATA_HPT3X2N=m -CONFIG_PATA_HPT3X3=m -CONFIG_PATA_HPT3X3_DMA=y -CONFIG_PATA_IT8213=m -CONFIG_PATA_IT821X=m -CONFIG_PATA_JMICRON=m -CONFIG_PATA_MARVELL=m -CONFIG_PATA_NETCELL=m -CONFIG_PATA_NINJA32=m -CONFIG_PATA_NS87415=m -CONFIG_PATA_OLDPIIX=m -CONFIG_PATA_OPTIDMA=m -CONFIG_PATA_PDC2027X=m -CONFIG_PATA_PDC_OLD=m -CONFIG_PATA_RADISYS=m -CONFIG_PATA_RDC=m -CONFIG_PATA_SCH=m -CONFIG_PATA_SERVERWORKS=m -CONFIG_PATA_SIL680=m -CONFIG_PATA_SIS=m -CONFIG_PATA_TOSHIBA=m -CONFIG_PATA_TRIFLEX=m -CONFIG_PATA_VIA=m -CONFIG_PATA_WINBOND=m - -# -# PIO-only SFF controllers -# -CONFIG_PATA_CMD640_PCI=m -CONFIG_PATA_MPIIX=m -CONFIG_PATA_NS87410=m -CONFIG_PATA_OPTI=m -CONFIG_PATA_PCMCIA=m -CONFIG_PATA_RZ1000=m - -# -# Generic fallback / legacy drivers -# -CONFIG_PATA_ACPI=m -CONFIG_ATA_GENERIC=m -CONFIG_PATA_LEGACY=m -CONFIG_MD=y -CONFIG_BLK_DEV_MD=m -CONFIG_MD_LINEAR=m -CONFIG_MD_RAID0=m -CONFIG_MD_RAID1=m -CONFIG_MD_RAID10=m -CONFIG_MD_RAID456=m -CONFIG_MD_MULTIPATH=m -CONFIG_MD_FAULTY=m -CONFIG_MD_CLUSTER=m -CONFIG_BCACHE=m -# CONFIG_BCACHE_DEBUG is not set -# CONFIG_BCACHE_CLOSURES_DEBUG is not set -CONFIG_BLK_DEV_DM_BUILTIN=y -CONFIG_BLK_DEV_DM=m -# CONFIG_DM_MQ_DEFAULT is not set -# CONFIG_DM_DEBUG is not set -CONFIG_DM_BUFIO=m -# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set -CONFIG_DM_BIO_PRISON=m -CONFIG_DM_PERSISTENT_DATA=m -CONFIG_DM_UNSTRIPED=m -CONFIG_DM_CRYPT=m -CONFIG_DM_SNAPSHOT=m -CONFIG_DM_THIN_PROVISIONING=m -CONFIG_DM_CACHE=m -CONFIG_DM_CACHE_SMQ=m -CONFIG_DM_WRITECACHE=m -CONFIG_DM_ERA=m -CONFIG_DM_MIRROR=m -CONFIG_DM_LOG_USERSPACE=m -CONFIG_DM_RAID=m -CONFIG_DM_ZERO=m -CONFIG_DM_MULTIPATH=m -CONFIG_DM_MULTIPATH_QL=m -CONFIG_DM_MULTIPATH_ST=m -CONFIG_DM_DELAY=m -CONFIG_DM_UEVENT=y -CONFIG_DM_FLAKEY=m -CONFIG_DM_VERITY=m -# CONFIG_DM_VERITY_FEC is not set -CONFIG_DM_SWITCH=m -CONFIG_DM_LOG_WRITES=m -CONFIG_DM_INTEGRITY=m -CONFIG_DM_ZONED=m -CONFIG_TARGET_CORE=m -CONFIG_TCM_IBLOCK=m -CONFIG_TCM_FILEIO=m -CONFIG_TCM_PSCSI=m -CONFIG_TCM_USER2=m -CONFIG_LOOPBACK_TARGET=m -CONFIG_TCM_FC=m -CONFIG_ISCSI_TARGET=m -CONFIG_ISCSI_TARGET_CXGB4=m -CONFIG_SBP_TARGET=m -CONFIG_FUSION=y -CONFIG_FUSION_SPI=m -CONFIG_FUSION_FC=m -CONFIG_FUSION_SAS=m -CONFIG_FUSION_MAX_SGE=128 -CONFIG_FUSION_CTL=m -CONFIG_FUSION_LAN=m -CONFIG_FUSION_LOGGING=y - -# -# IEEE 1394 (FireWire) support -# -CONFIG_FIREWIRE=m -CONFIG_FIREWIRE_OHCI=m -CONFIG_FIREWIRE_SBP2=m -CONFIG_FIREWIRE_NET=m -CONFIG_FIREWIRE_NOSY=m -CONFIG_MACINTOSH_DRIVERS=y -CONFIG_MAC_EMUMOUSEBTN=m -CONFIG_NETDEVICES=y -CONFIG_MII=m -CONFIG_NET_CORE=y -CONFIG_BONDING=m -CONFIG_DUMMY=m -CONFIG_EQUALIZER=m -CONFIG_NET_FC=y -CONFIG_IFB=m -CONFIG_NET_TEAM=m -CONFIG_NET_TEAM_MODE_BROADCAST=m -CONFIG_NET_TEAM_MODE_ROUNDROBIN=m -CONFIG_NET_TEAM_MODE_RANDOM=m -CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m -CONFIG_NET_TEAM_MODE_LOADBALANCE=m -CONFIG_MACVLAN=m -CONFIG_MACVTAP=m -CONFIG_IPVLAN=m -CONFIG_IPVTAP=m -CONFIG_VXLAN=m -CONFIG_GENEVE=m -CONFIG_GTP=m -CONFIG_MACSEC=m -CONFIG_NETCONSOLE=m -CONFIG_NETCONSOLE_DYNAMIC=y -CONFIG_NETPOLL=y -CONFIG_NET_POLL_CONTROLLER=y -# CONFIG_NTB_NETDEV is not set -CONFIG_RIONET=m -CONFIG_RIONET_TX_SIZE=128 -CONFIG_RIONET_RX_SIZE=128 -CONFIG_TUN=m -CONFIG_TAP=m -# CONFIG_TUN_VNET_CROSS_LE is not set -CONFIG_VETH=m -CONFIG_VIRTIO_NET=m -CONFIG_NLMON=m -CONFIG_NET_VRF=m -CONFIG_VSOCKMON=m -CONFIG_SUNGEM_PHY=m -CONFIG_ARCNET=m -CONFIG_ARCNET_1201=m -CONFIG_ARCNET_1051=m -CONFIG_ARCNET_RAW=m -CONFIG_ARCNET_CAP=m -CONFIG_ARCNET_COM90xx=m -CONFIG_ARCNET_COM90xxIO=m -CONFIG_ARCNET_RIM_I=m -CONFIG_ARCNET_COM20020=m -CONFIG_ARCNET_COM20020_PCI=m -CONFIG_ARCNET_COM20020_CS=m -CONFIG_ATM_DRIVERS=y -# CONFIG_ATM_DUMMY is not set -CONFIG_ATM_TCP=m -CONFIG_ATM_LANAI=m -CONFIG_ATM_ENI=m -# CONFIG_ATM_ENI_DEBUG is not set -# CONFIG_ATM_ENI_TUNE_BURST is not set -CONFIG_ATM_FIRESTREAM=m -CONFIG_ATM_ZATM=m -# CONFIG_ATM_ZATM_DEBUG is not set -CONFIG_ATM_NICSTAR=m -CONFIG_ATM_NICSTAR_USE_SUNI=y -CONFIG_ATM_NICSTAR_USE_IDT77105=y -CONFIG_ATM_IDT77252=m -# CONFIG_ATM_IDT77252_DEBUG is not set -# CONFIG_ATM_IDT77252_RCV_ALL is not set -CONFIG_ATM_IDT77252_USE_SUNI=y -CONFIG_ATM_AMBASSADOR=m -# CONFIG_ATM_AMBASSADOR_DEBUG is not set -CONFIG_ATM_HORIZON=m -# CONFIG_ATM_HORIZON_DEBUG is not set -CONFIG_ATM_IA=m -# CONFIG_ATM_IA_DEBUG is not set -CONFIG_ATM_FORE200E=m -CONFIG_ATM_FORE200E_USE_TASKLET=y -CONFIG_ATM_FORE200E_TX_RETRY=16 -CONFIG_ATM_FORE200E_DEBUG=0 -CONFIG_ATM_HE=m -CONFIG_ATM_HE_USE_SUNI=y -CONFIG_ATM_SOLOS=m - -# -# CAIF transport drivers -# -CONFIG_CAIF_TTY=m -CONFIG_CAIF_SPI_SLAVE=m -CONFIG_CAIF_SPI_SYNC=y -CONFIG_CAIF_HSI=m -CONFIG_CAIF_VIRTIO=m - -# -# Distributed Switch Architecture drivers -# -CONFIG_B53=m -CONFIG_B53_SPI_DRIVER=m -CONFIG_B53_MDIO_DRIVER=m -CONFIG_B53_MMAP_DRIVER=m -CONFIG_B53_SRAB_DRIVER=m -# CONFIG_NET_DSA_BCM_SF2 is not set -CONFIG_NET_DSA_LOOP=m -CONFIG_NET_DSA_MT7530=m -CONFIG_NET_DSA_MV88E6060=m -CONFIG_MICROCHIP_KSZ=m -CONFIG_MICROCHIP_KSZ_SPI_DRIVER=m -CONFIG_NET_DSA_MV88E6XXX=m -CONFIG_NET_DSA_MV88E6XXX_GLOBAL2=y -CONFIG_NET_DSA_MV88E6XXX_PTP=y -CONFIG_NET_DSA_QCA8K=m -# CONFIG_NET_DSA_REALTEK_SMI is not set -CONFIG_NET_DSA_SMSC_LAN9303=m -CONFIG_NET_DSA_SMSC_LAN9303_I2C=m -CONFIG_NET_DSA_SMSC_LAN9303_MDIO=m -CONFIG_ETHERNET=y -CONFIG_MDIO=m -CONFIG_NET_VENDOR_3COM=y -CONFIG_PCMCIA_3C574=m -CONFIG_PCMCIA_3C589=m -CONFIG_VORTEX=m -CONFIG_TYPHOON=m -CONFIG_NET_VENDOR_ADAPTEC=y -CONFIG_ADAPTEC_STARFIRE=m -CONFIG_NET_VENDOR_AGERE=y -CONFIG_ET131X=m -CONFIG_NET_VENDOR_ALACRITECH=y -CONFIG_SLICOSS=m -CONFIG_NET_VENDOR_ALTEON=y -CONFIG_ACENIC=m -# CONFIG_ACENIC_OMIT_TIGON_I is not set -CONFIG_ALTERA_TSE=m -CONFIG_NET_VENDOR_AMAZON=y -CONFIG_ENA_ETHERNET=m -CONFIG_NET_VENDOR_AMD=y -CONFIG_AMD8111_ETH=m -CONFIG_PCNET32=m -CONFIG_PCMCIA_NMCLAN=m -CONFIG_AMD_XGBE=m -CONFIG_AMD_XGBE_DCB=y -CONFIG_AMD_XGBE_HAVE_ECC=y -CONFIG_NET_VENDOR_AQUANTIA=y -CONFIG_AQTION=m -CONFIG_NET_VENDOR_ARC=y -CONFIG_NET_VENDOR_ATHEROS=y -CONFIG_ATL2=m -CONFIG_ATL1=m -CONFIG_ATL1E=m -CONFIG_ATL1C=m -CONFIG_ALX=m -CONFIG_NET_VENDOR_AURORA=y -CONFIG_AURORA_NB8800=m -CONFIG_NET_VENDOR_BROADCOM=y -CONFIG_B44=m -CONFIG_B44_PCI_AUTOSELECT=y -CONFIG_B44_PCICORE_AUTOSELECT=y -CONFIG_B44_PCI=y -# CONFIG_BCMGENET is not set -CONFIG_BNX2=m -CONFIG_CNIC=m -CONFIG_TIGON3=m -CONFIG_TIGON3_HWMON=y -CONFIG_BNX2X=m -CONFIG_BNX2X_SRIOV=y -# CONFIG_SYSTEMPORT is not set -CONFIG_BNXT=m -CONFIG_BNXT_SRIOV=y -CONFIG_BNXT_FLOWER_OFFLOAD=y -CONFIG_BNXT_DCB=y -CONFIG_BNXT_HWMON=y -CONFIG_NET_VENDOR_BROCADE=y -CONFIG_BNA=m -CONFIG_NET_VENDOR_CADENCE=y -CONFIG_MACB=m -CONFIG_MACB_USE_HWSTAMP=y -CONFIG_MACB_PCI=m -CONFIG_NET_VENDOR_CAVIUM=y -CONFIG_THUNDER_NIC_PF=m -CONFIG_THUNDER_NIC_VF=m -CONFIG_THUNDER_NIC_BGX=m -CONFIG_THUNDER_NIC_RGX=m -CONFIG_CAVIUM_PTP=m -CONFIG_LIQUIDIO=m -CONFIG_LIQUIDIO_VF=m -CONFIG_NET_VENDOR_CHELSIO=y -CONFIG_CHELSIO_T1=m -CONFIG_CHELSIO_T1_1G=y -CONFIG_CHELSIO_T3=m -CONFIG_CHELSIO_T4=m -CONFIG_CHELSIO_T4_DCB=y -# CONFIG_CHELSIO_T4_FCOE is not set -CONFIG_CHELSIO_T4VF=m -CONFIG_CHELSIO_LIB=m -CONFIG_NET_VENDOR_CISCO=y -CONFIG_ENIC=m -# CONFIG_NET_VENDOR_CORTINA is not set -CONFIG_CX_ECAT=m -CONFIG_DNET=m -CONFIG_NET_VENDOR_DEC=y -CONFIG_NET_TULIP=y -CONFIG_DE2104X=m -CONFIG_DE2104X_DSL=0 -CONFIG_TULIP=m -CONFIG_TULIP_MWI=y -CONFIG_TULIP_MMIO=y -CONFIG_TULIP_NAPI=y -CONFIG_TULIP_NAPI_HW_MITIGATION=y -CONFIG_DE4X5=m -CONFIG_WINBOND_840=m -CONFIG_DM9102=m -CONFIG_ULI526X=m -CONFIG_PCMCIA_XIRCOM=m -CONFIG_NET_VENDOR_DLINK=y -CONFIG_DL2K=m -CONFIG_SUNDANCE=m -# CONFIG_SUNDANCE_MMIO is not set -CONFIG_NET_VENDOR_EMULEX=y -CONFIG_BE2NET=m -CONFIG_BE2NET_HWMON=y -CONFIG_BE2NET_BE2=y -CONFIG_BE2NET_BE3=y -CONFIG_BE2NET_LANCER=y -CONFIG_BE2NET_SKYHAWK=y -CONFIG_NET_VENDOR_EZCHIP=y -CONFIG_NET_VENDOR_FUJITSU=y -CONFIG_PCMCIA_FMVJ18X=m -CONFIG_NET_VENDOR_HP=y -CONFIG_HP100=m -CONFIG_NET_VENDOR_HUAWEI=y -CONFIG_HINIC=m -CONFIG_NET_VENDOR_I825XX=y -CONFIG_NET_VENDOR_INTEL=y -CONFIG_E100=m -CONFIG_E1000=m -CONFIG_E1000E=m -CONFIG_E1000E_HWTS=y -CONFIG_IGB=m -CONFIG_IGB_HWMON=y -CONFIG_IGB_DCA=y -CONFIG_IGBVF=m -CONFIG_IXGB=m -CONFIG_IXGBE=m -CONFIG_IXGBE_HWMON=y -CONFIG_IXGBE_DCA=y -CONFIG_IXGBE_DCB=y -CONFIG_IXGBEVF=m -CONFIG_I40E=m -CONFIG_I40E_DCB=y -CONFIG_I40EVF=m -CONFIG_ICE=m -CONFIG_FM10K=m -CONFIG_JME=m -CONFIG_NET_VENDOR_MARVELL=y -CONFIG_MVMDIO=m -CONFIG_SKGE=m -# CONFIG_SKGE_DEBUG is not set -CONFIG_SKGE_GENESIS=y -CONFIG_SKY2=m -# CONFIG_SKY2_DEBUG is not set -CONFIG_NET_VENDOR_MELLANOX=y -CONFIG_MLX4_EN=m -CONFIG_MLX4_EN_DCB=y -CONFIG_MLX4_CORE=m -CONFIG_MLX4_DEBUG=y -CONFIG_MLX4_CORE_GEN2=y -CONFIG_MLX5_CORE=m -CONFIG_MLX5_ACCEL=y -CONFIG_MLX5_FPGA=y -# CONFIG_MLX5_CORE_EN is not set -CONFIG_MLXSW_CORE=m -CONFIG_MLXSW_CORE_HWMON=y -CONFIG_MLXSW_CORE_THERMAL=y -CONFIG_MLXSW_PCI=m -CONFIG_MLXSW_I2C=m -CONFIG_MLXSW_SWITCHIB=m -CONFIG_MLXSW_SWITCHX2=m -CONFIG_MLXSW_SPECTRUM=m -CONFIG_MLXSW_SPECTRUM_DCB=y -CONFIG_MLXSW_MINIMAL=m -CONFIG_MLXFW=m -CONFIG_NET_VENDOR_MICREL=y -CONFIG_KS8842=m -CONFIG_KS8851=m -CONFIG_KS8851_MLL=m -CONFIG_KSZ884X_PCI=m -CONFIG_NET_VENDOR_MICROCHIP=y -CONFIG_ENC28J60=m -# CONFIG_ENC28J60_WRITEVERIFY is not set -CONFIG_ENCX24J600=m -CONFIG_LAN743X=m -CONFIG_NET_VENDOR_MICROSEMI=y -CONFIG_MSCC_OCELOT_SWITCH=m -CONFIG_MSCC_OCELOT_SWITCH_OCELOT=m -CONFIG_NET_VENDOR_MYRI=y -CONFIG_MYRI10GE=m -CONFIG_MYRI10GE_DCA=y -CONFIG_FEALNX=m -CONFIG_NET_VENDOR_NATSEMI=y -CONFIG_NATSEMI=m -CONFIG_NS83820=m -CONFIG_NET_VENDOR_NETERION=y -CONFIG_S2IO=m -CONFIG_VXGE=m -# CONFIG_VXGE_DEBUG_TRACE_ALL is not set -CONFIG_NET_VENDOR_NETRONOME=y -CONFIG_NFP=m -# CONFIG_NFP_APP_FLOWER is not set -CONFIG_NFP_APP_ABM_NIC=y -# CONFIG_NFP_DEBUG is not set -CONFIG_NET_VENDOR_NI=y -CONFIG_NET_VENDOR_8390=y -CONFIG_PCMCIA_AXNET=m -CONFIG_NE2K_PCI=m -CONFIG_PCMCIA_PCNET=m -CONFIG_NET_VENDOR_NVIDIA=y -CONFIG_FORCEDETH=m -CONFIG_NET_VENDOR_OKI=y -CONFIG_ETHOC=m -CONFIG_NET_VENDOR_PACKET_ENGINES=y -CONFIG_HAMACHI=m -CONFIG_YELLOWFIN=m -CONFIG_NET_VENDOR_QLOGIC=y -CONFIG_QLA3XXX=m -CONFIG_QLCNIC=m -CONFIG_QLCNIC_SRIOV=y -CONFIG_QLCNIC_DCB=y -CONFIG_QLCNIC_HWMON=y -CONFIG_QLGE=m -CONFIG_NETXEN_NIC=m -CONFIG_QED=m -CONFIG_QED_LL2=y -CONFIG_QED_SRIOV=y -CONFIG_QEDE=m -CONFIG_QED_RDMA=y -CONFIG_QED_ISCSI=y -CONFIG_QED_FCOE=y -CONFIG_QED_OOO=y -CONFIG_NET_VENDOR_QUALCOMM=y -CONFIG_QCOM_EMAC=m -CONFIG_RMNET=m -CONFIG_NET_VENDOR_RDC=y -CONFIG_R6040=m -CONFIG_NET_VENDOR_REALTEK=y -CONFIG_ATP=m -CONFIG_8139CP=m -CONFIG_8139TOO=m -# CONFIG_8139TOO_PIO is not set -CONFIG_8139TOO_TUNE_TWISTER=y -CONFIG_8139TOO_8129=y -# CONFIG_8139_OLD_RX_RESET is not set -CONFIG_R8169=m -CONFIG_NET_VENDOR_RENESAS=y -CONFIG_NET_VENDOR_ROCKER=y -CONFIG_ROCKER=m -CONFIG_NET_VENDOR_SAMSUNG=y -CONFIG_SXGBE_ETH=m -CONFIG_NET_VENDOR_SEEQ=y -CONFIG_NET_VENDOR_SOLARFLARE=y -CONFIG_SFC=m -CONFIG_SFC_MTD=y -CONFIG_SFC_MCDI_MON=y -CONFIG_SFC_SRIOV=y -CONFIG_SFC_MCDI_LOGGING=y -CONFIG_SFC_FALCON=m -CONFIG_SFC_FALCON_MTD=y -CONFIG_NET_VENDOR_SILAN=y -CONFIG_SC92031=m -CONFIG_NET_VENDOR_SIS=y -CONFIG_SIS900=m -CONFIG_SIS190=m -CONFIG_NET_VENDOR_SMSC=y -CONFIG_PCMCIA_SMC91C92=m -CONFIG_EPIC100=m -CONFIG_SMSC911X=m -CONFIG_SMSC9420=m -# CONFIG_NET_VENDOR_SOCIONEXT is not set -CONFIG_NET_VENDOR_STMICRO=y -CONFIG_STMMAC_ETH=m -CONFIG_STMMAC_PLATFORM=m -CONFIG_DWMAC_GENERIC=m -CONFIG_STMMAC_PCI=m -CONFIG_NET_VENDOR_SUN=y -CONFIG_HAPPYMEAL=m -CONFIG_SUNGEM=m -CONFIG_CASSINI=m -CONFIG_NIU=m -CONFIG_NET_VENDOR_SYNOPSYS=y -CONFIG_DWC_XLGMAC=m -CONFIG_DWC_XLGMAC_PCI=m -CONFIG_NET_VENDOR_TEHUTI=y -CONFIG_TEHUTI=m -CONFIG_NET_VENDOR_TI=y -CONFIG_TI_CPSW_ALE=m -CONFIG_TLAN=m -CONFIG_NET_VENDOR_VIA=y -CONFIG_VIA_RHINE=m -CONFIG_VIA_RHINE_MMIO=y -CONFIG_VIA_VELOCITY=m -CONFIG_NET_VENDOR_WIZNET=y -CONFIG_WIZNET_W5100=m -CONFIG_WIZNET_W5300=m -# CONFIG_WIZNET_BUS_DIRECT is not set -# CONFIG_WIZNET_BUS_INDIRECT is not set -CONFIG_WIZNET_BUS_ANY=y -CONFIG_WIZNET_W5100_SPI=m -CONFIG_NET_VENDOR_XIRCOM=y -CONFIG_PCMCIA_XIRC2PS=m -CONFIG_FDDI=m -CONFIG_DEFXX=m -# CONFIG_DEFXX_MMIO is not set -CONFIG_SKFP=m -CONFIG_HIPPI=y -CONFIG_ROADRUNNER=m -CONFIG_ROADRUNNER_LARGE_RINGS=y -CONFIG_NET_SB1000=m -CONFIG_MDIO_DEVICE=m -CONFIG_MDIO_BUS=m -# CONFIG_MDIO_BCM_UNIMAC is not set -CONFIG_MDIO_BITBANG=m -CONFIG_MDIO_CAVIUM=m -CONFIG_MDIO_GPIO=m -CONFIG_MDIO_I2C=m -CONFIG_MDIO_MSCC_MIIM=m -CONFIG_MDIO_THUNDER=m -CONFIG_PHYLINK=m -CONFIG_PHYLIB=m -CONFIG_SWPHY=y -CONFIG_LED_TRIGGER_PHY=y - -# -# MII PHY device drivers -# -CONFIG_SFP=m -CONFIG_AMD_PHY=m -CONFIG_AQUANTIA_PHY=m -CONFIG_ASIX_PHY=m -CONFIG_AT803X_PHY=m -CONFIG_BCM7XXX_PHY=m -CONFIG_BCM87XX_PHY=m -CONFIG_BCM_NET_PHYLIB=m -CONFIG_BROADCOM_PHY=m -CONFIG_CICADA_PHY=m -CONFIG_CORTINA_PHY=m -CONFIG_DAVICOM_PHY=m -CONFIG_DP83822_PHY=m -CONFIG_DP83TC811_PHY=m -CONFIG_DP83848_PHY=m -CONFIG_DP83867_PHY=m -CONFIG_FIXED_PHY=m -CONFIG_ICPLUS_PHY=m -CONFIG_INTEL_XWAY_PHY=m -CONFIG_LSI_ET1011C_PHY=m -CONFIG_LXT_PHY=m -CONFIG_MARVELL_PHY=m -CONFIG_MARVELL_10G_PHY=m -CONFIG_MICREL_PHY=m -CONFIG_MICROCHIP_PHY=m -CONFIG_MICROCHIP_T1_PHY=m -CONFIG_MICROSEMI_PHY=m -CONFIG_NATIONAL_PHY=m -CONFIG_QSEMI_PHY=m -CONFIG_REALTEK_PHY=m -CONFIG_RENESAS_PHY=m -CONFIG_ROCKCHIP_PHY=m -CONFIG_SMSC_PHY=m -CONFIG_STE10XP=m -CONFIG_TERANETICS_PHY=m -CONFIG_VITESSE_PHY=m -CONFIG_XILINX_GMII2RGMII=m -CONFIG_MICREL_KS8995MA=m -CONFIG_PLIP=m -CONFIG_PPP=m -CONFIG_PPP_BSDCOMP=m -CONFIG_PPP_DEFLATE=m -CONFIG_PPP_FILTER=y -CONFIG_PPP_MPPE=m -CONFIG_PPP_MULTILINK=y -CONFIG_PPPOATM=m -CONFIG_PPPOE=m -CONFIG_PPTP=m -CONFIG_PPPOL2TP=m -CONFIG_PPP_ASYNC=m -CONFIG_PPP_SYNC_TTY=m -CONFIG_SLIP=m -CONFIG_SLHC=m -CONFIG_SLIP_COMPRESSED=y -CONFIG_SLIP_SMART=y -CONFIG_SLIP_MODE_SLIP6=y - -# -# Host-side USB support is needed for USB Network Adapter support -# -CONFIG_USB_NET_DRIVERS=m -CONFIG_USB_CATC=m -CONFIG_USB_KAWETH=m -CONFIG_USB_PEGASUS=m -CONFIG_USB_RTL8150=m -CONFIG_USB_RTL8152=m -CONFIG_USB_LAN78XX=m -CONFIG_USB_USBNET=m -CONFIG_USB_NET_AX8817X=m -CONFIG_USB_NET_AX88179_178A=m -CONFIG_USB_NET_CDCETHER=m -CONFIG_USB_NET_CDC_EEM=m -CONFIG_USB_NET_CDC_NCM=m -CONFIG_USB_NET_HUAWEI_CDC_NCM=m -CONFIG_USB_NET_CDC_MBIM=m -CONFIG_USB_NET_DM9601=m -CONFIG_USB_NET_SR9700=m -CONFIG_USB_NET_SR9800=m -CONFIG_USB_NET_SMSC75XX=m -CONFIG_USB_NET_SMSC95XX=m -CONFIG_USB_NET_GL620A=m -CONFIG_USB_NET_NET1080=m -CONFIG_USB_NET_PLUSB=m -CONFIG_USB_NET_MCS7830=m -CONFIG_USB_NET_RNDIS_HOST=m -CONFIG_USB_NET_CDC_SUBSET_ENABLE=m -CONFIG_USB_NET_CDC_SUBSET=m -CONFIG_USB_ALI_M5632=y -CONFIG_USB_AN2720=y -CONFIG_USB_BELKIN=y -CONFIG_USB_ARMLINUX=y -CONFIG_USB_EPSON2888=y -CONFIG_USB_KC2190=y -CONFIG_USB_NET_ZAURUS=m -CONFIG_USB_NET_CX82310_ETH=m -CONFIG_USB_NET_KALMIA=m -CONFIG_USB_NET_QMI_WWAN=m -CONFIG_USB_HSO=m -CONFIG_USB_NET_INT51X1=m -CONFIG_USB_CDC_PHONET=m -CONFIG_USB_IPHETH=m -CONFIG_USB_SIERRA_NET=m -CONFIG_USB_VL600=m -CONFIG_USB_NET_CH9200=m -CONFIG_WLAN=y -CONFIG_WLAN_VENDOR_ADMTEK=y -CONFIG_ADM8211=m -CONFIG_ATH_COMMON=m -CONFIG_WLAN_VENDOR_ATH=y -# CONFIG_ATH_DEBUG is not set -CONFIG_ATH5K=m -# CONFIG_ATH5K_DEBUG is not set -# CONFIG_ATH5K_TRACER is not set -CONFIG_ATH5K_PCI=y -CONFIG_ATH9K_HW=m -CONFIG_ATH9K_COMMON=m -CONFIG_ATH9K_BTCOEX_SUPPORT=y -CONFIG_ATH9K=m -CONFIG_ATH9K_PCI=y -CONFIG_ATH9K_AHB=y -# CONFIG_ATH9K_DEBUGFS is not set -CONFIG_ATH9K_DYNACK=y -CONFIG_ATH9K_WOW=y -CONFIG_ATH9K_RFKILL=y -CONFIG_ATH9K_CHANNEL_CONTEXT=y -CONFIG_ATH9K_PCOEM=y -CONFIG_ATH9K_HTC=m -# CONFIG_ATH9K_HTC_DEBUGFS is not set -CONFIG_ATH9K_HWRNG=y -CONFIG_CARL9170=m -CONFIG_CARL9170_LEDS=y -CONFIG_CARL9170_WPC=y -# CONFIG_CARL9170_HWRNG is not set -CONFIG_ATH6KL=m -CONFIG_ATH6KL_SDIO=m -CONFIG_ATH6KL_USB=m -# CONFIG_ATH6KL_DEBUG is not set -# CONFIG_ATH6KL_TRACING is not set -CONFIG_AR5523=m -CONFIG_WIL6210=m -CONFIG_WIL6210_ISR_COR=y -CONFIG_WIL6210_TRACING=y -CONFIG_WIL6210_DEBUGFS=y -CONFIG_ATH10K=m -CONFIG_ATH10K_CE=y -CONFIG_ATH10K_PCI=m -CONFIG_ATH10K_SDIO=m -CONFIG_ATH10K_USB=m -# CONFIG_ATH10K_DEBUG is not set -# CONFIG_ATH10K_DEBUGFS is not set -# CONFIG_ATH10K_TRACING is not set -CONFIG_WCN36XX=m -# CONFIG_WCN36XX_DEBUGFS is not set -CONFIG_WLAN_VENDOR_ATMEL=y -CONFIG_ATMEL=m -CONFIG_PCI_ATMEL=m -CONFIG_PCMCIA_ATMEL=m -CONFIG_AT76C50X_USB=m -CONFIG_WLAN_VENDOR_BROADCOM=y -CONFIG_B43=m -CONFIG_B43_BCMA=y -CONFIG_B43_SSB=y -CONFIG_B43_BUSES_BCMA_AND_SSB=y -# CONFIG_B43_BUSES_BCMA is not set -# CONFIG_B43_BUSES_SSB is not set -CONFIG_B43_PCI_AUTOSELECT=y -CONFIG_B43_PCICORE_AUTOSELECT=y -CONFIG_B43_SDIO=y -CONFIG_B43_BCMA_PIO=y -CONFIG_B43_PIO=y -CONFIG_B43_PHY_G=y -CONFIG_B43_PHY_N=y -CONFIG_B43_PHY_LP=y -CONFIG_B43_PHY_HT=y -CONFIG_B43_LEDS=y -CONFIG_B43_HWRNG=y -# CONFIG_B43_DEBUG is not set -CONFIG_B43LEGACY=m -CONFIG_B43LEGACY_PCI_AUTOSELECT=y -CONFIG_B43LEGACY_PCICORE_AUTOSELECT=y -CONFIG_B43LEGACY_LEDS=y -CONFIG_B43LEGACY_HWRNG=y -# CONFIG_B43LEGACY_DEBUG is not set -CONFIG_B43LEGACY_DMA=y -CONFIG_B43LEGACY_PIO=y -CONFIG_B43LEGACY_DMA_AND_PIO_MODE=y -# CONFIG_B43LEGACY_DMA_MODE is not set -# CONFIG_B43LEGACY_PIO_MODE is not set -CONFIG_BRCMUTIL=m -CONFIG_BRCMSMAC=m -CONFIG_BRCMFMAC=m -CONFIG_BRCMFMAC_PROTO_BCDC=y -CONFIG_BRCMFMAC_PROTO_MSGBUF=y -CONFIG_BRCMFMAC_SDIO=y -CONFIG_BRCMFMAC_USB=y -CONFIG_BRCMFMAC_PCIE=y -CONFIG_BRCM_TRACING=y -# CONFIG_BRCMDBG is not set -CONFIG_WLAN_VENDOR_CISCO=y -CONFIG_AIRO=m -CONFIG_AIRO_CS=m -CONFIG_WLAN_VENDOR_INTEL=y -CONFIG_IPW2100=m -CONFIG_IPW2100_MONITOR=y -# CONFIG_IPW2100_DEBUG is not set -CONFIG_IPW2200=m -CONFIG_IPW2200_MONITOR=y -CONFIG_IPW2200_RADIOTAP=y -CONFIG_IPW2200_PROMISCUOUS=y -CONFIG_IPW2200_QOS=y -# CONFIG_IPW2200_DEBUG is not set -CONFIG_LIBIPW=m -# CONFIG_LIBIPW_DEBUG is not set -CONFIG_IWLEGACY=m -CONFIG_IWL4965=m -CONFIG_IWL3945=m - -# -# iwl3945 / iwl4965 Debugging Options -# -# CONFIG_IWLEGACY_DEBUG is not set -CONFIG_IWLWIFI=m -CONFIG_IWLWIFI_LEDS=y -CONFIG_IWLDVM=m -CONFIG_IWLMVM=m -CONFIG_IWLWIFI_OPMODE_MODULAR=y -CONFIG_IWLWIFI_BCAST_FILTERING=y - -# -# Debugging Options -# -# CONFIG_IWLWIFI_DEBUG is not set -CONFIG_IWLWIFI_DEVICE_TRACING=y -CONFIG_WLAN_VENDOR_INTERSIL=y -CONFIG_HOSTAP=m -CONFIG_HOSTAP_FIRMWARE=y -CONFIG_HOSTAP_FIRMWARE_NVRAM=y -CONFIG_HOSTAP_PLX=m -CONFIG_HOSTAP_PCI=m -CONFIG_HOSTAP_CS=m -CONFIG_HERMES=m -CONFIG_HERMES_PRISM=y -CONFIG_HERMES_CACHE_FW_ON_INIT=y -CONFIG_PLX_HERMES=m -CONFIG_TMD_HERMES=m -CONFIG_NORTEL_HERMES=m -CONFIG_PCI_HERMES=m -CONFIG_PCMCIA_HERMES=m -CONFIG_PCMCIA_SPECTRUM=m -CONFIG_ORINOCO_USB=m -CONFIG_P54_COMMON=m -CONFIG_P54_USB=m -CONFIG_P54_PCI=m -CONFIG_P54_SPI=m -CONFIG_P54_SPI_DEFAULT_EEPROM=y -CONFIG_P54_LEDS=y -CONFIG_PRISM54=m -CONFIG_WLAN_VENDOR_MARVELL=y -CONFIG_LIBERTAS=m -CONFIG_LIBERTAS_USB=m -CONFIG_LIBERTAS_CS=m -CONFIG_LIBERTAS_SDIO=m -CONFIG_LIBERTAS_SPI=m -# CONFIG_LIBERTAS_DEBUG is not set -CONFIG_LIBERTAS_MESH=y -CONFIG_LIBERTAS_THINFIRM=m -# CONFIG_LIBERTAS_THINFIRM_DEBUG is not set -CONFIG_LIBERTAS_THINFIRM_USB=m -CONFIG_MWIFIEX=m -CONFIG_MWIFIEX_SDIO=m -CONFIG_MWIFIEX_PCIE=m -CONFIG_MWIFIEX_USB=m -CONFIG_MWL8K=m -CONFIG_WLAN_VENDOR_MEDIATEK=y -CONFIG_MT7601U=m -CONFIG_MT76_CORE=m -CONFIG_MT76_LEDS=y -CONFIG_MT76x2_COMMON=m -# CONFIG_MT76x0U is not set -CONFIG_MT76x2E=m -# CONFIG_MT76x2U is not set -CONFIG_WLAN_VENDOR_RALINK=y -CONFIG_RT2X00=m -CONFIG_RT2400PCI=m -CONFIG_RT2500PCI=m -CONFIG_RT61PCI=m -CONFIG_RT2800PCI=m -CONFIG_RT2800PCI_RT33XX=y -CONFIG_RT2800PCI_RT35XX=y -CONFIG_RT2800PCI_RT53XX=y -CONFIG_RT2800PCI_RT3290=y -CONFIG_RT2500USB=m -CONFIG_RT73USB=m -CONFIG_RT2800USB=m -CONFIG_RT2800USB_RT33XX=y -CONFIG_RT2800USB_RT35XX=y -CONFIG_RT2800USB_RT3573=y -CONFIG_RT2800USB_RT53XX=y -CONFIG_RT2800USB_RT55XX=y -CONFIG_RT2800USB_UNKNOWN=y -CONFIG_RT2800_LIB=m -CONFIG_RT2800_LIB_MMIO=m -CONFIG_RT2X00_LIB_MMIO=m -CONFIG_RT2X00_LIB_PCI=m -CONFIG_RT2X00_LIB_USB=m -CONFIG_RT2X00_LIB=m -CONFIG_RT2X00_LIB_FIRMWARE=y -CONFIG_RT2X00_LIB_CRYPTO=y -CONFIG_RT2X00_LIB_LEDS=y -# CONFIG_RT2X00_DEBUG is not set -CONFIG_WLAN_VENDOR_REALTEK=y -CONFIG_RTL8180=m -CONFIG_RTL8187=m -CONFIG_RTL8187_LEDS=y -CONFIG_RTL_CARDS=m -CONFIG_RTL8192CE=m -CONFIG_RTL8192SE=m -CONFIG_RTL8192DE=m -CONFIG_RTL8723AE=m -CONFIG_RTL8723BE=m -CONFIG_RTL8188EE=m -CONFIG_RTL8192EE=m -CONFIG_RTL8821AE=m -CONFIG_RTL8192CU=m -CONFIG_RTLWIFI=m -CONFIG_RTLWIFI_PCI=m -CONFIG_RTLWIFI_USB=m -# CONFIG_RTLWIFI_DEBUG is not set -CONFIG_RTL8192C_COMMON=m -CONFIG_RTL8723_COMMON=m -CONFIG_RTLBTCOEXIST=m -CONFIG_RTL8XXXU=m -CONFIG_RTL8XXXU_UNTESTED=y -CONFIG_WLAN_VENDOR_RSI=y -CONFIG_RSI_91X=m -# CONFIG_RSI_DEBUGFS is not set -CONFIG_RSI_SDIO=m -CONFIG_RSI_USB=m -CONFIG_RSI_COEX=y -CONFIG_WLAN_VENDOR_ST=y -CONFIG_CW1200=m -CONFIG_CW1200_WLAN_SDIO=m -CONFIG_CW1200_WLAN_SPI=m -CONFIG_WLAN_VENDOR_TI=y -CONFIG_WL1251=m -CONFIG_WL1251_SPI=m -CONFIG_WL1251_SDIO=m -CONFIG_WL12XX=m -CONFIG_WL18XX=m -CONFIG_WLCORE=m -CONFIG_WLCORE_SDIO=m -CONFIG_WILINK_PLATFORM_DATA=y -CONFIG_WLAN_VENDOR_ZYDAS=y -CONFIG_USB_ZD1201=m -CONFIG_ZD1211RW=m -# CONFIG_ZD1211RW_DEBUG is not set -CONFIG_WLAN_VENDOR_QUANTENNA=y -CONFIG_QTNFMAC=m -CONFIG_QTNFMAC_PEARL_PCIE=m -CONFIG_PCMCIA_RAYCS=m -CONFIG_PCMCIA_WL3501=m -# CONFIG_MAC80211_HWSIM is not set -CONFIG_USB_NET_RNDIS_WLAN=m - -# -# WiMAX Wireless Broadband devices -# -CONFIG_WIMAX_I2400M=m -CONFIG_WIMAX_I2400M_USB=m -CONFIG_WIMAX_I2400M_DEBUG_LEVEL=8 -CONFIG_WAN=y -CONFIG_LANMEDIA=m -CONFIG_HDLC=m -CONFIG_HDLC_RAW=m -CONFIG_HDLC_RAW_ETH=m -CONFIG_HDLC_CISCO=m -CONFIG_HDLC_FR=m -CONFIG_HDLC_PPP=m -CONFIG_HDLC_X25=m -CONFIG_PCI200SYN=m -CONFIG_WANXL=m -CONFIG_PC300TOO=m -CONFIG_FARSYNC=m -CONFIG_DSCC4=m -CONFIG_DSCC4_PCISYNC=y -CONFIG_DSCC4_PCI_RST=y -CONFIG_DLCI=m -CONFIG_DLCI_MAX=8 -CONFIG_LAPBETHER=m -CONFIG_X25_ASY=m -CONFIG_SBNI=m -CONFIG_SBNI_MULTILINE=y -CONFIG_IEEE802154_DRIVERS=m -CONFIG_IEEE802154_FAKELB=m -CONFIG_IEEE802154_AT86RF230=m -# CONFIG_IEEE802154_AT86RF230_DEBUGFS is not set -CONFIG_IEEE802154_MRF24J40=m -CONFIG_IEEE802154_CC2520=m -CONFIG_IEEE802154_ATUSB=m -CONFIG_IEEE802154_ADF7242=m -CONFIG_IEEE802154_CA8210=m -# CONFIG_IEEE802154_CA8210_DEBUGFS is not set -CONFIG_IEEE802154_MCR20A=m -# CONFIG_IEEE802154_HWSIM is not set -CONFIG_VMXNET3=m -CONFIG_FUJITSU_ES=m -CONFIG_THUNDERBOLT_NET=m -CONFIG_HYPERV_NET=m -CONFIG_NETDEVSIM=m -CONFIG_NET_FAILOVER=m -CONFIG_ISDN=y -CONFIG_ISDN_I4L=m -CONFIG_ISDN_PPP=y -CONFIG_ISDN_PPP_VJ=y -CONFIG_ISDN_MPP=y -CONFIG_IPPP_FILTER=y -CONFIG_ISDN_PPP_BSDCOMP=m -CONFIG_ISDN_AUDIO=y -CONFIG_ISDN_TTY_FAX=y -CONFIG_ISDN_X25=y - -# -# ISDN feature submodules -# -CONFIG_ISDN_DIVERSION=m - -# -# ISDN4Linux hardware drivers -# - -# -# Passive cards -# -CONFIG_ISDN_DRV_HISAX=m - -# -# D-channel protocol features -# -CONFIG_HISAX_EURO=y -CONFIG_DE_AOC=y -# CONFIG_HISAX_NO_SENDCOMPLETE is not set -# CONFIG_HISAX_NO_LLC is not set -# CONFIG_HISAX_NO_KEYPAD is not set -CONFIG_HISAX_1TR6=y -CONFIG_HISAX_NI1=y -CONFIG_HISAX_MAX_CARDS=8 - -# -# HiSax supported cards -# -CONFIG_HISAX_16_3=y -CONFIG_HISAX_TELESPCI=y -CONFIG_HISAX_S0BOX=y -CONFIG_HISAX_FRITZPCI=y -CONFIG_HISAX_AVM_A1_PCMCIA=y -CONFIG_HISAX_ELSA=y -CONFIG_HISAX_DIEHLDIVA=y -CONFIG_HISAX_SEDLBAUER=y -CONFIG_HISAX_NETJET=y -CONFIG_HISAX_NETJET_U=y -CONFIG_HISAX_NICCY=y -CONFIG_HISAX_BKM_A4T=y -CONFIG_HISAX_SCT_QUADRO=y -CONFIG_HISAX_GAZEL=y -CONFIG_HISAX_HFC_PCI=y -CONFIG_HISAX_W6692=y -CONFIG_HISAX_HFC_SX=y -CONFIG_HISAX_ENTERNOW_PCI=y -# CONFIG_HISAX_DEBUG is not set - -# -# HiSax PCMCIA card service modules -# -CONFIG_HISAX_SEDLBAUER_CS=m -CONFIG_HISAX_ELSA_CS=m -CONFIG_HISAX_AVM_A1_CS=m -CONFIG_HISAX_TELES_CS=m - -# -# HiSax sub driver modules -# -CONFIG_HISAX_ST5481=m -CONFIG_HISAX_HFCUSB=m -CONFIG_HISAX_HFC4S8S=m -CONFIG_HISAX_FRITZ_PCIPNP=m -CONFIG_ISDN_CAPI=m -CONFIG_CAPI_TRACE=y -CONFIG_ISDN_CAPI_CAPI20=m -CONFIG_ISDN_CAPI_MIDDLEWARE=y -CONFIG_ISDN_CAPI_CAPIDRV=m -# CONFIG_ISDN_CAPI_CAPIDRV_VERBOSE is not set - -# -# CAPI hardware drivers -# -CONFIG_CAPI_AVM=y -CONFIG_ISDN_DRV_AVMB1_B1PCI=m -CONFIG_ISDN_DRV_AVMB1_B1PCIV4=y -CONFIG_ISDN_DRV_AVMB1_B1PCMCIA=m -CONFIG_ISDN_DRV_AVMB1_AVM_CS=m -CONFIG_ISDN_DRV_AVMB1_T1PCI=m -CONFIG_ISDN_DRV_AVMB1_C4=m -CONFIG_CAPI_EICON=y -CONFIG_ISDN_DIVAS=m -CONFIG_ISDN_DIVAS_BRIPCI=y -CONFIG_ISDN_DIVAS_PRIPCI=y -CONFIG_ISDN_DIVAS_DIVACAPI=m -CONFIG_ISDN_DIVAS_USERIDI=m -CONFIG_ISDN_DIVAS_MAINT=m -CONFIG_ISDN_DRV_GIGASET=m -CONFIG_GIGASET_CAPI=y -CONFIG_GIGASET_BASE=m -CONFIG_GIGASET_M105=m -CONFIG_GIGASET_M101=m -# CONFIG_GIGASET_DEBUG is not set -CONFIG_HYSDN=m -CONFIG_HYSDN_CAPI=y -CONFIG_MISDN=m -CONFIG_MISDN_DSP=m -CONFIG_MISDN_L1OIP=m - -# -# mISDN hardware drivers -# -CONFIG_MISDN_HFCPCI=m -CONFIG_MISDN_HFCMULTI=m -CONFIG_MISDN_HFCUSB=m -CONFIG_MISDN_AVMFRITZ=m -CONFIG_MISDN_SPEEDFAX=m -CONFIG_MISDN_INFINEON=m -CONFIG_MISDN_W6692=m -CONFIG_MISDN_NETJET=m -CONFIG_MISDN_IPAC=m -CONFIG_MISDN_ISAR=m -CONFIG_ISDN_HDLC=m -CONFIG_NVM=y -CONFIG_NVM_PBLK=m -# CONFIG_NVM_PBLK_DEBUG is not set - -# -# Input device support -# -CONFIG_INPUT=y -CONFIG_INPUT_LEDS=y -CONFIG_INPUT_FF_MEMLESS=m -CONFIG_INPUT_POLLDEV=m -CONFIG_INPUT_SPARSEKMAP=m -CONFIG_INPUT_MATRIXKMAP=m - -# -# Userland interfaces -# -CONFIG_INPUT_MOUSEDEV=y -CONFIG_INPUT_MOUSEDEV_PSAUX=y -CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 -CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 -CONFIG_INPUT_JOYDEV=m -CONFIG_INPUT_EVDEV=m -# CONFIG_INPUT_EVBUG is not set - -# -# Input Device Drivers -# -CONFIG_INPUT_KEYBOARD=y -CONFIG_KEYBOARD_ADC=m -CONFIG_KEYBOARD_ADP5588=m -CONFIG_KEYBOARD_ADP5589=m -CONFIG_KEYBOARD_ATKBD=y -CONFIG_KEYBOARD_QT1070=m -CONFIG_KEYBOARD_QT2160=m -CONFIG_KEYBOARD_DLINK_DIR685=m -CONFIG_KEYBOARD_LKKBD=m -CONFIG_KEYBOARD_GPIO=m -CONFIG_KEYBOARD_GPIO_POLLED=m -CONFIG_KEYBOARD_TCA6416=m -CONFIG_KEYBOARD_TCA8418=m -CONFIG_KEYBOARD_MATRIX=m -CONFIG_KEYBOARD_LM8323=m -CONFIG_KEYBOARD_LM8333=m -CONFIG_KEYBOARD_MAX7359=m -CONFIG_KEYBOARD_MCS=m -CONFIG_KEYBOARD_MPR121=m -CONFIG_KEYBOARD_NEWTON=m -CONFIG_KEYBOARD_OPENCORES=m -CONFIG_KEYBOARD_SAMSUNG=m -CONFIG_KEYBOARD_STOWAWAY=m -CONFIG_KEYBOARD_SUNKBD=m -CONFIG_KEYBOARD_TM2_TOUCHKEY=m -CONFIG_KEYBOARD_XTKBD=m -CONFIG_KEYBOARD_CROS_EC=m -CONFIG_KEYBOARD_MTK_PMIC=m -CONFIG_INPUT_MOUSE=y -CONFIG_MOUSE_PS2=m -CONFIG_MOUSE_PS2_ALPS=y -CONFIG_MOUSE_PS2_BYD=y -CONFIG_MOUSE_PS2_LOGIPS2PP=y -CONFIG_MOUSE_PS2_SYNAPTICS=y -CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS=y -CONFIG_MOUSE_PS2_CYPRESS=y -CONFIG_MOUSE_PS2_LIFEBOOK=y -CONFIG_MOUSE_PS2_TRACKPOINT=y -CONFIG_MOUSE_PS2_ELANTECH=y -CONFIG_MOUSE_PS2_ELANTECH_SMBUS=y -CONFIG_MOUSE_PS2_SENTELIC=y -CONFIG_MOUSE_PS2_TOUCHKIT=y -CONFIG_MOUSE_PS2_FOCALTECH=y -# CONFIG_MOUSE_PS2_VMMOUSE is not set -CONFIG_MOUSE_PS2_SMBUS=y -CONFIG_MOUSE_SERIAL=m -CONFIG_MOUSE_APPLETOUCH=m -CONFIG_MOUSE_BCM5974=m -CONFIG_MOUSE_CYAPA=m -CONFIG_MOUSE_ELAN_I2C=m -CONFIG_MOUSE_ELAN_I2C_I2C=y -CONFIG_MOUSE_ELAN_I2C_SMBUS=y -CONFIG_MOUSE_VSXXXAA=m -CONFIG_MOUSE_GPIO=m -CONFIG_MOUSE_SYNAPTICS_I2C=m -CONFIG_MOUSE_SYNAPTICS_USB=m -CONFIG_INPUT_JOYSTICK=y -CONFIG_JOYSTICK_ANALOG=m -CONFIG_JOYSTICK_A3D=m -CONFIG_JOYSTICK_ADI=m -CONFIG_JOYSTICK_COBRA=m -CONFIG_JOYSTICK_GF2K=m -CONFIG_JOYSTICK_GRIP=m -CONFIG_JOYSTICK_GRIP_MP=m -CONFIG_JOYSTICK_GUILLEMOT=m -CONFIG_JOYSTICK_INTERACT=m -CONFIG_JOYSTICK_SIDEWINDER=m -CONFIG_JOYSTICK_TMDC=m -CONFIG_JOYSTICK_IFORCE=m -CONFIG_JOYSTICK_IFORCE_USB=y -CONFIG_JOYSTICK_IFORCE_232=y -CONFIG_JOYSTICK_WARRIOR=m -CONFIG_JOYSTICK_MAGELLAN=m -CONFIG_JOYSTICK_SPACEORB=m -CONFIG_JOYSTICK_SPACEBALL=m -CONFIG_JOYSTICK_STINGER=m -CONFIG_JOYSTICK_TWIDJOY=m -CONFIG_JOYSTICK_ZHENHUA=m -CONFIG_JOYSTICK_DB9=m -CONFIG_JOYSTICK_GAMECON=m -CONFIG_JOYSTICK_TURBOGRAFX=m -CONFIG_JOYSTICK_AS5011=m -# CONFIG_JOYSTICK_JOYDUMP is not set -CONFIG_JOYSTICK_XPAD=m -CONFIG_JOYSTICK_XPAD_FF=y -CONFIG_JOYSTICK_XPAD_LEDS=y -CONFIG_JOYSTICK_WALKERA0701=m -CONFIG_JOYSTICK_PSXPAD_SPI=m -CONFIG_JOYSTICK_PSXPAD_SPI_FF=y -CONFIG_JOYSTICK_PXRC=m -CONFIG_INPUT_TABLET=y -CONFIG_TABLET_USB_ACECAD=m -CONFIG_TABLET_USB_AIPTEK=m -CONFIG_TABLET_USB_GTCO=m -CONFIG_TABLET_USB_HANWANG=m -CONFIG_TABLET_USB_KBTAB=m -CONFIG_TABLET_USB_PEGASUS=m -CONFIG_TABLET_SERIAL_WACOM4=m -CONFIG_INPUT_TOUCHSCREEN=y -CONFIG_TOUCHSCREEN_PROPERTIES=y -CONFIG_TOUCHSCREEN_ADS7846=m -CONFIG_TOUCHSCREEN_AD7877=m -CONFIG_TOUCHSCREEN_AD7879=m -CONFIG_TOUCHSCREEN_AD7879_I2C=m -CONFIG_TOUCHSCREEN_AD7879_SPI=m -# CONFIG_TOUCHSCREEN_ADC is not set -CONFIG_TOUCHSCREEN_ATMEL_MXT=m -# CONFIG_TOUCHSCREEN_ATMEL_MXT_T37 is not set -CONFIG_TOUCHSCREEN_AUO_PIXCIR=m -CONFIG_TOUCHSCREEN_BU21013=m -# CONFIG_TOUCHSCREEN_BU21029 is not set -CONFIG_TOUCHSCREEN_CHIPONE_ICN8505=m -CONFIG_TOUCHSCREEN_CY8CTMG110=m -CONFIG_TOUCHSCREEN_CYTTSP_CORE=m -CONFIG_TOUCHSCREEN_CYTTSP_I2C=m -CONFIG_TOUCHSCREEN_CYTTSP_SPI=m -CONFIG_TOUCHSCREEN_CYTTSP4_CORE=m -CONFIG_TOUCHSCREEN_CYTTSP4_I2C=m -CONFIG_TOUCHSCREEN_CYTTSP4_SPI=m -CONFIG_TOUCHSCREEN_DA9052=m -CONFIG_TOUCHSCREEN_DYNAPRO=m -CONFIG_TOUCHSCREEN_HAMPSHIRE=m -CONFIG_TOUCHSCREEN_EETI=m -CONFIG_TOUCHSCREEN_EGALAX_SERIAL=m -CONFIG_TOUCHSCREEN_EXC3000=m -CONFIG_TOUCHSCREEN_FUJITSU=m -CONFIG_TOUCHSCREEN_GOODIX=m -CONFIG_TOUCHSCREEN_HIDEEP=m -CONFIG_TOUCHSCREEN_ILI210X=m -CONFIG_TOUCHSCREEN_S6SY761=m -CONFIG_TOUCHSCREEN_GUNZE=m -CONFIG_TOUCHSCREEN_EKTF2127=m -CONFIG_TOUCHSCREEN_ELAN=m -CONFIG_TOUCHSCREEN_ELO=m -CONFIG_TOUCHSCREEN_WACOM_W8001=m -CONFIG_TOUCHSCREEN_WACOM_I2C=m -CONFIG_TOUCHSCREEN_MAX11801=m -CONFIG_TOUCHSCREEN_MCS5000=m -CONFIG_TOUCHSCREEN_MMS114=m -CONFIG_TOUCHSCREEN_MELFAS_MIP4=m -CONFIG_TOUCHSCREEN_MTOUCH=m -CONFIG_TOUCHSCREEN_INEXIO=m -CONFIG_TOUCHSCREEN_MK712=m -CONFIG_TOUCHSCREEN_PENMOUNT=m -CONFIG_TOUCHSCREEN_EDT_FT5X06=m -CONFIG_TOUCHSCREEN_TOUCHRIGHT=m -CONFIG_TOUCHSCREEN_TOUCHWIN=m -CONFIG_TOUCHSCREEN_TI_AM335X_TSC=m -CONFIG_TOUCHSCREEN_UCB1400=m -CONFIG_TOUCHSCREEN_PIXCIR=m -CONFIG_TOUCHSCREEN_WDT87XX_I2C=m -CONFIG_TOUCHSCREEN_WM831X=m -CONFIG_TOUCHSCREEN_WM97XX=m -CONFIG_TOUCHSCREEN_WM9705=y -CONFIG_TOUCHSCREEN_WM9712=y -CONFIG_TOUCHSCREEN_WM9713=y -CONFIG_TOUCHSCREEN_USB_COMPOSITE=m -CONFIG_TOUCHSCREEN_MC13783=m -CONFIG_TOUCHSCREEN_USB_EGALAX=y -CONFIG_TOUCHSCREEN_USB_PANJIT=y -CONFIG_TOUCHSCREEN_USB_3M=y -CONFIG_TOUCHSCREEN_USB_ITM=y -CONFIG_TOUCHSCREEN_USB_ETURBO=y -CONFIG_TOUCHSCREEN_USB_GUNZE=y -CONFIG_TOUCHSCREEN_USB_DMC_TSC10=y -CONFIG_TOUCHSCREEN_USB_IRTOUCH=y -CONFIG_TOUCHSCREEN_USB_IDEALTEK=y -CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH=y -CONFIG_TOUCHSCREEN_USB_GOTOP=y -CONFIG_TOUCHSCREEN_USB_JASTEC=y -CONFIG_TOUCHSCREEN_USB_ELO=y -CONFIG_TOUCHSCREEN_USB_E2I=y -CONFIG_TOUCHSCREEN_USB_ZYTRONIC=y -CONFIG_TOUCHSCREEN_USB_ETT_TC45USB=y -CONFIG_TOUCHSCREEN_USB_NEXIO=y -CONFIG_TOUCHSCREEN_USB_EASYTOUCH=y -CONFIG_TOUCHSCREEN_TOUCHIT213=m -CONFIG_TOUCHSCREEN_TSC_SERIO=m -CONFIG_TOUCHSCREEN_TSC200X_CORE=m -CONFIG_TOUCHSCREEN_TSC2004=m -CONFIG_TOUCHSCREEN_TSC2005=m -CONFIG_TOUCHSCREEN_TSC2007=m -CONFIG_TOUCHSCREEN_TSC2007_IIO=y -CONFIG_TOUCHSCREEN_PCAP=m -CONFIG_TOUCHSCREEN_RM_TS=m -CONFIG_TOUCHSCREEN_SILEAD=m -CONFIG_TOUCHSCREEN_SIS_I2C=m -CONFIG_TOUCHSCREEN_ST1232=m -CONFIG_TOUCHSCREEN_STMFTS=m -CONFIG_TOUCHSCREEN_SUR40=m -CONFIG_TOUCHSCREEN_SURFACE3_SPI=m -CONFIG_TOUCHSCREEN_SX8654=m -CONFIG_TOUCHSCREEN_TPS6507X=m -CONFIG_TOUCHSCREEN_ZET6223=m -CONFIG_TOUCHSCREEN_ZFORCE=m -CONFIG_TOUCHSCREEN_ROHM_BU21023=m -CONFIG_INPUT_MISC=y -CONFIG_INPUT_88PM80X_ONKEY=m -CONFIG_INPUT_AD714X=m -CONFIG_INPUT_AD714X_I2C=m -CONFIG_INPUT_AD714X_SPI=m -CONFIG_INPUT_ARIZONA_HAPTICS=m -CONFIG_INPUT_BMA150=m -CONFIG_INPUT_E3X0_BUTTON=m -CONFIG_INPUT_PCSPKR=m -CONFIG_INPUT_MAX77693_HAPTIC=m -CONFIG_INPUT_MC13783_PWRBUTTON=m -CONFIG_INPUT_MMA8450=m -CONFIG_INPUT_APANEL=m -CONFIG_INPUT_GP2A=m -CONFIG_INPUT_GPIO_BEEPER=m -CONFIG_INPUT_GPIO_DECODER=m -CONFIG_INPUT_ATLAS_BTNS=m -CONFIG_INPUT_ATI_REMOTE2=m -CONFIG_INPUT_KEYSPAN_REMOTE=m -CONFIG_INPUT_KXTJ9=m -CONFIG_INPUT_KXTJ9_POLLED_MODE=y -CONFIG_INPUT_POWERMATE=m -CONFIG_INPUT_YEALINK=m -CONFIG_INPUT_CM109=m -CONFIG_INPUT_REGULATOR_HAPTIC=m -CONFIG_INPUT_RETU_PWRBUTTON=m -CONFIG_INPUT_AXP20X_PEK=m -CONFIG_INPUT_UINPUT=m -CONFIG_INPUT_PCF50633_PMU=m -CONFIG_INPUT_PCF8574=m -CONFIG_INPUT_PWM_BEEPER=m -CONFIG_INPUT_PWM_VIBRA=m -CONFIG_INPUT_GPIO_ROTARY_ENCODER=m -CONFIG_INPUT_DA9052_ONKEY=m -CONFIG_INPUT_DA9063_ONKEY=m -CONFIG_INPUT_WM831X_ON=m -CONFIG_INPUT_PCAP=m -CONFIG_INPUT_ADXL34X=m -CONFIG_INPUT_ADXL34X_I2C=m -CONFIG_INPUT_ADXL34X_SPI=m -CONFIG_INPUT_IMS_PCU=m -CONFIG_INPUT_CMA3000=m -CONFIG_INPUT_CMA3000_I2C=m -CONFIG_INPUT_IDEAPAD_SLIDEBAR=m -CONFIG_INPUT_SOC_BUTTON_ARRAY=m -CONFIG_INPUT_DRV260X_HAPTICS=m -CONFIG_INPUT_DRV2665_HAPTICS=m -CONFIG_INPUT_DRV2667_HAPTICS=m -CONFIG_INPUT_RAVE_SP_PWRBUTTON=m -CONFIG_RMI4_CORE=m -CONFIG_RMI4_I2C=m -CONFIG_RMI4_SPI=m -CONFIG_RMI4_SMB=m -CONFIG_RMI4_F03=y -CONFIG_RMI4_F03_SERIO=m -CONFIG_RMI4_2D_SENSOR=y -CONFIG_RMI4_F11=y -CONFIG_RMI4_F12=y -CONFIG_RMI4_F30=y -CONFIG_RMI4_F34=y -CONFIG_RMI4_F54=y -CONFIG_RMI4_F55=y - -# -# Hardware I/O ports -# -CONFIG_SERIO=y -CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y -CONFIG_SERIO_I8042=y -CONFIG_SERIO_SERPORT=m -CONFIG_SERIO_CT82C710=m -CONFIG_SERIO_PARKBD=m -CONFIG_SERIO_PCIPS2=m -CONFIG_SERIO_LIBPS2=y -CONFIG_SERIO_RAW=m -CONFIG_SERIO_ALTERA_PS2=m -CONFIG_SERIO_PS2MULT=m -CONFIG_SERIO_ARC_PS2=m -CONFIG_HYPERV_KEYBOARD=m -CONFIG_SERIO_GPIO_PS2=m -CONFIG_USERIO=m -CONFIG_GAMEPORT=m -CONFIG_GAMEPORT_NS558=m -CONFIG_GAMEPORT_L4=m -CONFIG_GAMEPORT_EMU10K1=m -CONFIG_GAMEPORT_FM801=m - -# -# Character devices -# -CONFIG_TTY=y -CONFIG_VT=y -CONFIG_CONSOLE_TRANSLATIONS=y -CONFIG_VT_CONSOLE=y -CONFIG_VT_CONSOLE_SLEEP=y -CONFIG_HW_CONSOLE=y -CONFIG_VT_HW_CONSOLE_BINDING=y -CONFIG_UNIX98_PTYS=y -CONFIG_LEGACY_PTYS=y -CONFIG_LEGACY_PTY_COUNT=256 -CONFIG_SERIAL_NONSTANDARD=y -CONFIG_ROCKETPORT=m -CONFIG_CYCLADES=m -CONFIG_CYZ_INTR=y -CONFIG_MOXA_INTELLIO=m -CONFIG_MOXA_SMARTIO=m -CONFIG_SYNCLINK=m -CONFIG_SYNCLINKMP=m -CONFIG_SYNCLINK_GT=m -CONFIG_NOZOMI=m -CONFIG_ISI=m -CONFIG_N_HDLC=m -CONFIG_N_GSM=m -CONFIG_TRACE_ROUTER=m -CONFIG_TRACE_SINK=m -CONFIG_DEVMEM=y -# CONFIG_DEVKMEM is not set - -# -# Serial drivers -# -CONFIG_SERIAL_EARLYCON=y -CONFIG_SERIAL_8250=y -# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set -CONFIG_SERIAL_8250_PNP=y -CONFIG_SERIAL_8250_FINTEK=y -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_SERIAL_8250_DMA=y -CONFIG_SERIAL_8250_PCI=y -CONFIG_SERIAL_8250_EXAR=y -CONFIG_SERIAL_8250_CS=m -CONFIG_SERIAL_8250_MEN_MCB=m -CONFIG_SERIAL_8250_NR_UARTS=4 -CONFIG_SERIAL_8250_RUNTIME_UARTS=4 -CONFIG_SERIAL_8250_EXTENDED=y -CONFIG_SERIAL_8250_MANY_PORTS=y -CONFIG_SERIAL_8250_SHARE_IRQ=y -CONFIG_SERIAL_8250_DETECT_IRQ=y -CONFIG_SERIAL_8250_RSA=y -CONFIG_SERIAL_8250_DW=m -CONFIG_SERIAL_8250_RT288X=y -CONFIG_SERIAL_8250_LPSS=y -CONFIG_SERIAL_8250_MID=y -CONFIG_SERIAL_8250_MOXA=m - -# -# Non-8250 serial port support -# -CONFIG_SERIAL_MAX3100=m -CONFIG_SERIAL_MAX310X=y -CONFIG_SERIAL_UARTLITE=m -CONFIG_SERIAL_UARTLITE_NR_UARTS=1 -CONFIG_SERIAL_CORE=y -CONFIG_SERIAL_CORE_CONSOLE=y -CONFIG_SERIAL_JSM=m -CONFIG_SERIAL_SCCNXP=m -CONFIG_SERIAL_SC16IS7XX_CORE=m -CONFIG_SERIAL_SC16IS7XX=m -CONFIG_SERIAL_SC16IS7XX_I2C=y -CONFIG_SERIAL_SC16IS7XX_SPI=y -CONFIG_SERIAL_ALTERA_JTAGUART=m -CONFIG_SERIAL_ALTERA_UART=m -CONFIG_SERIAL_ALTERA_UART_MAXPORTS=4 -CONFIG_SERIAL_ALTERA_UART_BAUDRATE=115200 -CONFIG_SERIAL_IFX6X60=m -CONFIG_SERIAL_ARC=m -CONFIG_SERIAL_ARC_NR_PORTS=1 -CONFIG_SERIAL_RP2=m -CONFIG_SERIAL_RP2_NR_UARTS=32 -CONFIG_SERIAL_FSL_LPUART=m -CONFIG_SERIAL_MEN_Z135=m -CONFIG_SERIAL_DEV_BUS=m -CONFIG_PRINTER=m -CONFIG_LP_CONSOLE=y -CONFIG_PPDEV=m -CONFIG_HVC_DRIVER=y -CONFIG_VIRTIO_CONSOLE=m -CONFIG_IPMI_HANDLER=m -CONFIG_IPMI_DMI_DECODE=y -CONFIG_IPMI_PANIC_EVENT=y -CONFIG_IPMI_PANIC_STRING=y -CONFIG_IPMI_DEVICE_INTERFACE=m -CONFIG_IPMI_SI=m -CONFIG_IPMI_SSIF=m -CONFIG_IPMI_WATCHDOG=m -CONFIG_IPMI_POWEROFF=m -CONFIG_HW_RANDOM=m -CONFIG_HW_RANDOM_TIMERIOMEM=m -CONFIG_HW_RANDOM_INTEL=m -CONFIG_HW_RANDOM_AMD=m -CONFIG_HW_RANDOM_VIA=m -CONFIG_HW_RANDOM_VIRTIO=m -CONFIG_NVRAM=m -CONFIG_R3964=m -CONFIG_APPLICOM=m - -# -# PCMCIA character devices -# -CONFIG_SYNCLINK_CS=m -CONFIG_CARDMAN_4000=m -CONFIG_CARDMAN_4040=m -CONFIG_SCR24X=m -CONFIG_IPWIRELESS=m -CONFIG_MWAVE=m -CONFIG_RAW_DRIVER=m -CONFIG_MAX_RAW_DEVS=256 -CONFIG_HPET=y -CONFIG_HPET_MMAP=y -CONFIG_HPET_MMAP_DEFAULT=y -CONFIG_HANGCHECK_TIMER=m -CONFIG_TCG_TPM=m -CONFIG_HW_RANDOM_TPM=y -CONFIG_TCG_TIS_CORE=m -CONFIG_TCG_TIS=m -CONFIG_TCG_TIS_SPI=m -CONFIG_TCG_TIS_I2C_ATMEL=m -CONFIG_TCG_TIS_I2C_INFINEON=m -CONFIG_TCG_TIS_I2C_NUVOTON=m -CONFIG_TCG_NSC=m -CONFIG_TCG_ATMEL=m -CONFIG_TCG_INFINEON=m -CONFIG_TCG_CRB=m -CONFIG_TCG_VTPM_PROXY=m -CONFIG_TCG_TIS_ST33ZP24=m -CONFIG_TCG_TIS_ST33ZP24_I2C=m -CONFIG_TCG_TIS_ST33ZP24_SPI=m -CONFIG_TELCLOCK=m -CONFIG_DEVPORT=y -CONFIG_XILLYBUS=m -CONFIG_XILLYBUS_PCIE=m -# CONFIG_RANDOM_TRUST_CPU is not set - -# -# I2C support -# -CONFIG_I2C=m -CONFIG_I2C_BOARDINFO=y -CONFIG_I2C_COMPAT=y -CONFIG_I2C_CHARDEV=m -CONFIG_I2C_MUX=m - -# -# Multiplexer I2C Chip support -# -CONFIG_I2C_MUX_GPIO=m -CONFIG_I2C_MUX_LTC4306=m -CONFIG_I2C_MUX_PCA9541=m -CONFIG_I2C_MUX_PCA954x=m -CONFIG_I2C_MUX_REG=m -CONFIG_I2C_MUX_MLXCPLD=m -CONFIG_I2C_HELPER_AUTO=y -CONFIG_I2C_SMBUS=m -CONFIG_I2C_ALGOBIT=m -CONFIG_I2C_ALGOPCA=m - -# -# I2C Hardware Bus support -# - -# -# PC SMBus host controller drivers -# -CONFIG_I2C_ALI1535=m -CONFIG_I2C_ALI1563=m -CONFIG_I2C_ALI15X3=m -CONFIG_I2C_AMD756=m -CONFIG_I2C_AMD756_S4882=m -CONFIG_I2C_AMD8111=m -CONFIG_I2C_I801=m -CONFIG_I2C_ISCH=m -CONFIG_I2C_ISMT=m -CONFIG_I2C_PIIX4=m -CONFIG_I2C_NFORCE2=m -CONFIG_I2C_NFORCE2_S4985=m -CONFIG_I2C_SIS5595=m -CONFIG_I2C_SIS630=m -CONFIG_I2C_SIS96X=m -CONFIG_I2C_VIA=m -CONFIG_I2C_VIAPRO=m - -# -# ACPI drivers -# -CONFIG_I2C_SCMI=m - -# -# I2C system bus drivers (mostly embedded / system-on-chip) -# -CONFIG_I2C_CBUS_GPIO=m -CONFIG_I2C_DESIGNWARE_CORE=m -CONFIG_I2C_DESIGNWARE_PLATFORM=m -# CONFIG_I2C_DESIGNWARE_SLAVE is not set -CONFIG_I2C_DESIGNWARE_PCI=m -# CONFIG_I2C_DESIGNWARE_BAYTRAIL is not set -CONFIG_I2C_EMEV2=m -CONFIG_I2C_GPIO=m -# CONFIG_I2C_GPIO_FAULT_INJECTOR is not set -CONFIG_I2C_KEMPLD=m -CONFIG_I2C_OCORES=m -CONFIG_I2C_PCA_PLATFORM=m -CONFIG_I2C_SIMTEC=m -CONFIG_I2C_XILINX=m - -# -# External I2C/SMBus adapter drivers -# -CONFIG_I2C_DIOLAN_U2C=m -CONFIG_I2C_DLN2=m -CONFIG_I2C_PARPORT=m -CONFIG_I2C_PARPORT_LIGHT=m -CONFIG_I2C_ROBOTFUZZ_OSIF=m -CONFIG_I2C_TAOS_EVM=m -CONFIG_I2C_TINY_USB=m -CONFIG_I2C_VIPERBOARD=m - -# -# Other I2C/SMBus bus drivers -# -CONFIG_I2C_MLXCPLD=m -CONFIG_I2C_CROS_EC_TUNNEL=m -# CONFIG_I2C_STUB is not set -CONFIG_I2C_SLAVE=y -CONFIG_I2C_SLAVE_EEPROM=m -# CONFIG_I2C_DEBUG_CORE is not set -# CONFIG_I2C_DEBUG_ALGO is not set -# CONFIG_I2C_DEBUG_BUS is not set -CONFIG_SPI=y -# CONFIG_SPI_DEBUG is not set -CONFIG_SPI_MASTER=y -CONFIG_SPI_MEM=y - -# -# SPI Master Controller Drivers -# -CONFIG_SPI_ALTERA=m -CONFIG_SPI_AXI_SPI_ENGINE=m -CONFIG_SPI_BITBANG=m -CONFIG_SPI_BUTTERFLY=m -CONFIG_SPI_CADENCE=m -CONFIG_SPI_DESIGNWARE=m -CONFIG_SPI_DW_PCI=m -CONFIG_SPI_DW_MID_DMA=y -CONFIG_SPI_DW_MMIO=m -CONFIG_SPI_DLN2=m -CONFIG_SPI_GPIO=m -CONFIG_SPI_LM70_LLP=m -CONFIG_SPI_OC_TINY=m -CONFIG_SPI_PXA2XX=m -CONFIG_SPI_PXA2XX_PCI=m -CONFIG_SPI_ROCKCHIP=m -CONFIG_SPI_SC18IS602=m -CONFIG_SPI_XCOMM=m -CONFIG_SPI_XILINX=m -CONFIG_SPI_ZYNQMP_GQSPI=m - -# -# SPI Protocol Masters -# -CONFIG_SPI_SPIDEV=m -CONFIG_SPI_LOOPBACK_TEST=m -CONFIG_SPI_TLE62X0=m -CONFIG_SPI_SLAVE=y -CONFIG_SPI_SLAVE_TIME=m -CONFIG_SPI_SLAVE_SYSTEM_CONTROL=m -CONFIG_SPMI=m -CONFIG_HSI=m -CONFIG_HSI_BOARDINFO=y - -# -# HSI controllers -# - -# -# HSI clients -# -CONFIG_HSI_CHAR=m -CONFIG_PPS=y -# CONFIG_PPS_DEBUG is not set -# CONFIG_NTP_PPS is not set - -# -# PPS clients support -# -# CONFIG_PPS_CLIENT_KTIMER is not set -CONFIG_PPS_CLIENT_LDISC=m -CONFIG_PPS_CLIENT_PARPORT=m -CONFIG_PPS_CLIENT_GPIO=m - -# -# PPS generators support -# - -# -# PTP clock support -# -CONFIG_PTP_1588_CLOCK=y -CONFIG_DP83640_PHY=m -CONFIG_PTP_1588_CLOCK_KVM=m -CONFIG_PINCTRL=y -CONFIG_PINMUX=y -CONFIG_PINCONF=y -CONFIG_GENERIC_PINCONF=y -# CONFIG_DEBUG_PINCTRL is not set -CONFIG_PINCTRL_AMD=m -CONFIG_PINCTRL_MCP23S08=m -CONFIG_PINCTRL_BAYTRAIL=y -CONFIG_PINCTRL_CHERRYVIEW=m -CONFIG_PINCTRL_INTEL=m -CONFIG_PINCTRL_BROXTON=m -CONFIG_PINCTRL_CANNONLAKE=m -CONFIG_PINCTRL_CEDARFORK=m -CONFIG_PINCTRL_DENVERTON=m -CONFIG_PINCTRL_GEMINILAKE=m -# CONFIG_PINCTRL_ICELAKE is not set -CONFIG_PINCTRL_LEWISBURG=m -CONFIG_PINCTRL_SUNRISEPOINT=m -CONFIG_GPIOLIB=y -CONFIG_GPIOLIB_FASTPATH_LIMIT=512 -CONFIG_GPIO_ACPI=y -CONFIG_GPIOLIB_IRQCHIP=y -# CONFIG_DEBUG_GPIO is not set -CONFIG_GPIO_SYSFS=y -CONFIG_GPIO_GENERIC=m -CONFIG_GPIO_MAX730X=m - -# -# Memory mapped GPIO drivers -# -CONFIG_GPIO_AMDPT=m -CONFIG_GPIO_DWAPB=m -CONFIG_GPIO_EXAR=m -CONFIG_GPIO_GENERIC_PLATFORM=m -CONFIG_GPIO_ICH=m -CONFIG_GPIO_LYNXPOINT=y -CONFIG_GPIO_MB86S7X=m -CONFIG_GPIO_MENZ127=m -CONFIG_GPIO_MOCKUP=m -CONFIG_GPIO_VX855=m - -# -# Port-mapped I/O GPIO drivers -# -CONFIG_GPIO_F7188X=m -CONFIG_GPIO_IT87=m -CONFIG_GPIO_SCH=m -CONFIG_GPIO_SCH311X=m -CONFIG_GPIO_WINBOND=m -CONFIG_GPIO_WS16C48=m - -# -# I2C GPIO expanders -# -CONFIG_GPIO_ADP5588=m -CONFIG_GPIO_MAX7300=m -CONFIG_GPIO_MAX732X=m -CONFIG_GPIO_PCA953X=m -CONFIG_GPIO_PCF857X=m -CONFIG_GPIO_TPIC2810=m - -# -# MFD GPIO expanders -# -CONFIG_GPIO_ARIZONA=m -CONFIG_GPIO_BD9571MWV=m -CONFIG_GPIO_DA9052=m -CONFIG_GPIO_DLN2=m -CONFIG_GPIO_JANZ_TTL=m -CONFIG_GPIO_KEMPLD=m -CONFIG_GPIO_LP3943=m -CONFIG_GPIO_LP873X=m -CONFIG_GPIO_TPS65086=m -CONFIG_GPIO_TPS65912=m -CONFIG_GPIO_UCB1400=m -CONFIG_GPIO_WHISKEY_COVE=m -CONFIG_GPIO_WM831X=m -CONFIG_GPIO_WM8994=m - -# -# PCI GPIO expanders -# -CONFIG_GPIO_AMD8111=m -CONFIG_GPIO_ML_IOH=m -CONFIG_GPIO_PCI_IDIO_16=m -CONFIG_GPIO_PCIE_IDIO_24=m -CONFIG_GPIO_RDC321X=m - -# -# SPI GPIO expanders -# -CONFIG_GPIO_MAX3191X=m -CONFIG_GPIO_MAX7301=m -CONFIG_GPIO_MC33880=m -CONFIG_GPIO_PISOSR=m -CONFIG_GPIO_XRA1403=m - -# -# USB GPIO expanders -# -CONFIG_GPIO_VIPERBOARD=m -CONFIG_W1=m -CONFIG_W1_CON=y - -# -# 1-wire Bus Masters -# -CONFIG_W1_MASTER_MATROX=m -CONFIG_W1_MASTER_DS2490=m -CONFIG_W1_MASTER_DS2482=m -CONFIG_W1_MASTER_DS1WM=m -CONFIG_W1_MASTER_GPIO=m - -# -# 1-wire Slaves -# -CONFIG_W1_SLAVE_THERM=m -CONFIG_W1_SLAVE_SMEM=m -# CONFIG_W1_SLAVE_DS2405 is not set -CONFIG_W1_SLAVE_DS2408=m -# CONFIG_W1_SLAVE_DS2408_READBACK is not set -CONFIG_W1_SLAVE_DS2413=m -CONFIG_W1_SLAVE_DS2406=m -CONFIG_W1_SLAVE_DS2423=m -CONFIG_W1_SLAVE_DS2805=m -CONFIG_W1_SLAVE_DS2431=m -CONFIG_W1_SLAVE_DS2433=m -CONFIG_W1_SLAVE_DS2433_CRC=y -CONFIG_W1_SLAVE_DS2438=m -CONFIG_W1_SLAVE_DS2780=m -CONFIG_W1_SLAVE_DS2781=m -CONFIG_W1_SLAVE_DS28E04=m -CONFIG_W1_SLAVE_DS28E17=m -CONFIG_POWER_AVS=y -CONFIG_POWER_RESET=y -# CONFIG_POWER_RESET_RESTART is not set -CONFIG_POWER_SUPPLY=y -# CONFIG_POWER_SUPPLY_DEBUG is not set -CONFIG_PDA_POWER=m -CONFIG_GENERIC_ADC_BATTERY=m -CONFIG_WM831X_BACKUP=m -CONFIG_WM831X_POWER=m -# CONFIG_TEST_POWER is not set -# CONFIG_CHARGER_ADP5061 is not set -CONFIG_BATTERY_DS2760=m -CONFIG_BATTERY_DS2780=m -CONFIG_BATTERY_DS2781=m -CONFIG_BATTERY_DS2782=m -CONFIG_BATTERY_SBS=m -CONFIG_CHARGER_SBS=m -CONFIG_MANAGER_SBS=m -CONFIG_BATTERY_BQ27XXX=m -CONFIG_BATTERY_BQ27XXX_I2C=m -CONFIG_BATTERY_BQ27XXX_HDQ=m -# CONFIG_BATTERY_BQ27XXX_DT_UPDATES_NVM is not set -CONFIG_BATTERY_DA9052=m -CONFIG_CHARGER_DA9150=m -CONFIG_BATTERY_DA9150=m -CONFIG_CHARGER_AXP20X=m -CONFIG_BATTERY_AXP20X=m -CONFIG_AXP20X_POWER=m -CONFIG_AXP288_CHARGER=m -CONFIG_AXP288_FUEL_GAUGE=m -CONFIG_BATTERY_MAX17040=m -CONFIG_BATTERY_MAX17042=m -CONFIG_BATTERY_MAX1721X=m -CONFIG_CHARGER_PCF50633=m -CONFIG_CHARGER_ISP1704=m -CONFIG_CHARGER_MAX8903=m -CONFIG_CHARGER_LP8727=m -CONFIG_CHARGER_GPIO=m -CONFIG_CHARGER_MANAGER=y -CONFIG_CHARGER_LTC3651=m -CONFIG_CHARGER_MAX14577=m -CONFIG_CHARGER_MAX77693=m -CONFIG_CHARGER_BQ2415X=m -CONFIG_CHARGER_BQ24190=m -CONFIG_CHARGER_BQ24257=m -CONFIG_CHARGER_BQ24735=m -CONFIG_CHARGER_BQ25890=m -CONFIG_CHARGER_SMB347=m -CONFIG_BATTERY_GAUGE_LTC2941=m -CONFIG_BATTERY_RT5033=m -CONFIG_CHARGER_RT9455=m -# CONFIG_CHARGER_CROS_USBPD is not set -CONFIG_HWMON=m -CONFIG_HWMON_VID=m -# CONFIG_HWMON_DEBUG_CHIP is not set - -# -# Native drivers -# -CONFIG_SENSORS_ABITUGURU=m -CONFIG_SENSORS_ABITUGURU3=m -CONFIG_SENSORS_AD7314=m -CONFIG_SENSORS_AD7414=m -CONFIG_SENSORS_AD7418=m -CONFIG_SENSORS_ADM1021=m -CONFIG_SENSORS_ADM1025=m -CONFIG_SENSORS_ADM1026=m -CONFIG_SENSORS_ADM1029=m -CONFIG_SENSORS_ADM1031=m -CONFIG_SENSORS_ADM9240=m -CONFIG_SENSORS_ADT7X10=m -CONFIG_SENSORS_ADT7310=m -CONFIG_SENSORS_ADT7410=m -CONFIG_SENSORS_ADT7411=m -CONFIG_SENSORS_ADT7462=m -CONFIG_SENSORS_ADT7470=m -CONFIG_SENSORS_ADT7475=m -CONFIG_SENSORS_ASC7621=m -CONFIG_SENSORS_K8TEMP=m -CONFIG_SENSORS_K10TEMP=m -CONFIG_SENSORS_FAM15H_POWER=m -CONFIG_SENSORS_APPLESMC=m -CONFIG_SENSORS_ASB100=m -CONFIG_SENSORS_ASPEED=m -CONFIG_SENSORS_ATXP1=m -CONFIG_SENSORS_DS620=m -CONFIG_SENSORS_DS1621=m -CONFIG_SENSORS_DELL_SMM=m -CONFIG_SENSORS_DA9052_ADC=m -CONFIG_SENSORS_I5K_AMB=m -CONFIG_SENSORS_F71805F=m -CONFIG_SENSORS_F71882FG=m -CONFIG_SENSORS_F75375S=m -CONFIG_SENSORS_MC13783_ADC=m -CONFIG_SENSORS_FSCHMD=m -CONFIG_SENSORS_FTSTEUTATES=m -CONFIG_SENSORS_GL518SM=m -CONFIG_SENSORS_GL520SM=m -CONFIG_SENSORS_G760A=m -CONFIG_SENSORS_G762=m -CONFIG_SENSORS_HIH6130=m -CONFIG_SENSORS_IBMAEM=m -CONFIG_SENSORS_IBMPEX=m -CONFIG_SENSORS_IIO_HWMON=m -CONFIG_SENSORS_I5500=m -CONFIG_SENSORS_CORETEMP=m -CONFIG_SENSORS_IT87=m -CONFIG_SENSORS_JC42=m -CONFIG_SENSORS_POWR1220=m -CONFIG_SENSORS_LINEAGE=m -CONFIG_SENSORS_LTC2945=m -CONFIG_SENSORS_LTC2990=m -CONFIG_SENSORS_LTC4151=m -CONFIG_SENSORS_LTC4215=m -CONFIG_SENSORS_LTC4222=m -CONFIG_SENSORS_LTC4245=m -CONFIG_SENSORS_LTC4260=m -CONFIG_SENSORS_LTC4261=m -CONFIG_SENSORS_MAX1111=m -CONFIG_SENSORS_MAX16065=m -CONFIG_SENSORS_MAX1619=m -CONFIG_SENSORS_MAX1668=m -CONFIG_SENSORS_MAX197=m -CONFIG_SENSORS_MAX31722=m -CONFIG_SENSORS_MAX6621=m -CONFIG_SENSORS_MAX6639=m -CONFIG_SENSORS_MAX6642=m -CONFIG_SENSORS_MAX6650=m -CONFIG_SENSORS_MAX6697=m -CONFIG_SENSORS_MAX31790=m -CONFIG_SENSORS_MCP3021=m -# CONFIG_SENSORS_MLXREG_FAN is not set -CONFIG_SENSORS_TC654=m -CONFIG_SENSORS_MENF21BMC_HWMON=m -CONFIG_SENSORS_ADCXX=m -CONFIG_SENSORS_LM63=m -CONFIG_SENSORS_LM70=m -CONFIG_SENSORS_LM73=m -CONFIG_SENSORS_LM75=m -CONFIG_SENSORS_LM77=m -CONFIG_SENSORS_LM78=m -CONFIG_SENSORS_LM80=m -CONFIG_SENSORS_LM83=m -CONFIG_SENSORS_LM85=m -CONFIG_SENSORS_LM87=m -CONFIG_SENSORS_LM90=m -CONFIG_SENSORS_LM92=m -CONFIG_SENSORS_LM93=m -CONFIG_SENSORS_LM95234=m -CONFIG_SENSORS_LM95241=m -CONFIG_SENSORS_LM95245=m -CONFIG_SENSORS_PC87360=m -CONFIG_SENSORS_PC87427=m -CONFIG_SENSORS_NTC_THERMISTOR=m -CONFIG_SENSORS_NCT6683=m -CONFIG_SENSORS_NCT6775=m -CONFIG_SENSORS_NCT7802=m -CONFIG_SENSORS_NCT7904=m -# CONFIG_SENSORS_NPCM7XX is not set -CONFIG_SENSORS_PCF8591=m -CONFIG_PMBUS=m -CONFIG_SENSORS_PMBUS=m -CONFIG_SENSORS_ADM1275=m -CONFIG_SENSORS_IBM_CFFPS=m -CONFIG_SENSORS_IR35221=m -CONFIG_SENSORS_LM25066=m -CONFIG_SENSORS_LTC2978=m -CONFIG_SENSORS_LTC2978_REGULATOR=y -CONFIG_SENSORS_LTC3815=m -CONFIG_SENSORS_MAX16064=m -CONFIG_SENSORS_MAX20751=m -CONFIG_SENSORS_MAX31785=m -CONFIG_SENSORS_MAX34440=m -CONFIG_SENSORS_MAX8688=m -CONFIG_SENSORS_TPS40422=m -CONFIG_SENSORS_TPS53679=m -CONFIG_SENSORS_UCD9000=m -CONFIG_SENSORS_UCD9200=m -CONFIG_SENSORS_ZL6100=m -CONFIG_SENSORS_SHT15=m -CONFIG_SENSORS_SHT21=m -CONFIG_SENSORS_SHT3x=m -CONFIG_SENSORS_SHTC1=m -CONFIG_SENSORS_SIS5595=m -CONFIG_SENSORS_DME1737=m -CONFIG_SENSORS_EMC1403=m -CONFIG_SENSORS_EMC2103=m -CONFIG_SENSORS_EMC6W201=m -CONFIG_SENSORS_SMSC47M1=m -CONFIG_SENSORS_SMSC47M192=m -CONFIG_SENSORS_SMSC47B397=m -CONFIG_SENSORS_SCH56XX_COMMON=m -CONFIG_SENSORS_SCH5627=m -CONFIG_SENSORS_SCH5636=m -CONFIG_SENSORS_STTS751=m -CONFIG_SENSORS_SMM665=m -CONFIG_SENSORS_ADC128D818=m -CONFIG_SENSORS_ADS1015=m -CONFIG_SENSORS_ADS7828=m -CONFIG_SENSORS_ADS7871=m -CONFIG_SENSORS_AMC6821=m -CONFIG_SENSORS_INA209=m -CONFIG_SENSORS_INA2XX=m -CONFIG_SENSORS_INA3221=m -CONFIG_SENSORS_TC74=m -CONFIG_SENSORS_THMC50=m -CONFIG_SENSORS_TMP102=m -CONFIG_SENSORS_TMP103=m -CONFIG_SENSORS_TMP108=m -CONFIG_SENSORS_TMP401=m -CONFIG_SENSORS_TMP421=m -CONFIG_SENSORS_VIA_CPUTEMP=m -CONFIG_SENSORS_VIA686A=m -CONFIG_SENSORS_VT1211=m -CONFIG_SENSORS_VT8231=m -CONFIG_SENSORS_W83773G=m -CONFIG_SENSORS_W83781D=m -CONFIG_SENSORS_W83791D=m -CONFIG_SENSORS_W83792D=m -CONFIG_SENSORS_W83793=m -CONFIG_SENSORS_W83795=m -# CONFIG_SENSORS_W83795_FANCTRL is not set -CONFIG_SENSORS_W83L785TS=m -CONFIG_SENSORS_W83L786NG=m -CONFIG_SENSORS_W83627HF=m -CONFIG_SENSORS_W83627EHF=m -CONFIG_SENSORS_WM831X=m -CONFIG_SENSORS_XGENE=m - -# -# ACPI drivers -# -CONFIG_SENSORS_ACPI_POWER=m -CONFIG_SENSORS_ATK0110=m -CONFIG_THERMAL=y -# CONFIG_THERMAL_STATISTICS is not set -CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 -CONFIG_THERMAL_WRITABLE_TRIPS=y -CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y -# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set -# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set -# CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR is not set -CONFIG_THERMAL_GOV_FAIR_SHARE=y -CONFIG_THERMAL_GOV_STEP_WISE=y -CONFIG_THERMAL_GOV_BANG_BANG=y -CONFIG_THERMAL_GOV_USER_SPACE=y -# CONFIG_THERMAL_GOV_POWER_ALLOCATOR is not set -CONFIG_CLOCK_THERMAL=y -CONFIG_DEVFREQ_THERMAL=y -# CONFIG_THERMAL_EMULATION is not set -CONFIG_INTEL_POWERCLAMP=m -CONFIG_X86_PKG_TEMP_THERMAL=m -CONFIG_INTEL_SOC_DTS_IOSF_CORE=m -CONFIG_INTEL_SOC_DTS_THERMAL=m - -# -# ACPI INT340X thermal drivers -# -CONFIG_INT340X_THERMAL=m -CONFIG_ACPI_THERMAL_REL=m -CONFIG_INT3406_THERMAL=m -CONFIG_INTEL_BXT_PMIC_THERMAL=m -CONFIG_INTEL_PCH_THERMAL=m -CONFIG_GENERIC_ADC_THERMAL=m -CONFIG_WATCHDOG=y -CONFIG_WATCHDOG_CORE=y -# CONFIG_WATCHDOG_NOWAYOUT is not set -CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y -# CONFIG_WATCHDOG_SYSFS is not set - -# -# Watchdog Device Drivers -# -CONFIG_SOFT_WATCHDOG=m -# CONFIG_SOFT_WATCHDOG_PRETIMEOUT is not set -CONFIG_DA9052_WATCHDOG=m -CONFIG_DA9063_WATCHDOG=m -CONFIG_DA9062_WATCHDOG=m -CONFIG_MENF21BMC_WATCHDOG=m -# CONFIG_MENZ069_WATCHDOG is not set -CONFIG_WDAT_WDT=m -CONFIG_WM831X_WATCHDOG=m -CONFIG_XILINX_WATCHDOG=m -CONFIG_ZIIRAVE_WATCHDOG=m -CONFIG_RAVE_SP_WATCHDOG=m -CONFIG_CADENCE_WATCHDOG=m -CONFIG_DW_WATCHDOG=m -CONFIG_MAX63XX_WATCHDOG=m -CONFIG_RETU_WATCHDOG=m -CONFIG_ACQUIRE_WDT=m -CONFIG_ADVANTECH_WDT=m -CONFIG_ALIM1535_WDT=m -CONFIG_ALIM7101_WDT=m -CONFIG_EBC_C384_WDT=m -CONFIG_F71808E_WDT=m -# CONFIG_SP5100_TCO is not set -CONFIG_SBC_FITPC2_WATCHDOG=m -CONFIG_EUROTECH_WDT=m -CONFIG_IB700_WDT=m -CONFIG_IBMASR=m -CONFIG_WAFER_WDT=m -CONFIG_I6300ESB_WDT=m -CONFIG_IE6XX_WDT=m -CONFIG_ITCO_WDT=m -CONFIG_ITCO_VENDOR_SUPPORT=y -CONFIG_IT8712F_WDT=m -CONFIG_IT87_WDT=m -CONFIG_HP_WATCHDOG=m -CONFIG_KEMPLD_WDT=m -CONFIG_HPWDT_NMI_DECODING=y -CONFIG_SC1200_WDT=m -CONFIG_PC87413_WDT=m -CONFIG_NV_TCO=m -CONFIG_60XX_WDT=m -CONFIG_CPU5_WDT=m -CONFIG_SMSC_SCH311X_WDT=m -CONFIG_SMSC37B787_WDT=m -CONFIG_VIA_WDT=m -CONFIG_W83627HF_WDT=m -CONFIG_W83877F_WDT=m -CONFIG_W83977F_WDT=m -CONFIG_MACHZ_WDT=m -CONFIG_SBC_EPX_C3_WATCHDOG=m -CONFIG_INTEL_MEI_WDT=m -CONFIG_NI903X_WDT=m -CONFIG_NIC7018_WDT=m -CONFIG_MEN_A21_WDT=m - -# -# PCI-based Watchdog Cards -# -CONFIG_PCIPCWATCHDOG=m -CONFIG_WDTPCI=m - -# -# USB-based Watchdog Cards -# -CONFIG_USBPCWATCHDOG=m - -# -# Watchdog Pretimeout Governors -# -CONFIG_WATCHDOG_PRETIMEOUT_GOV=y -# CONFIG_WATCHDOG_PRETIMEOUT_DEFAULT_GOV_NOOP is not set -CONFIG_WATCHDOG_PRETIMEOUT_DEFAULT_GOV_PANIC=y -CONFIG_WATCHDOG_PRETIMEOUT_GOV_NOOP=m -CONFIG_WATCHDOG_PRETIMEOUT_GOV_PANIC=y -CONFIG_SSB_POSSIBLE=y -CONFIG_SSB=m -CONFIG_SSB_SPROM=y -CONFIG_SSB_BLOCKIO=y -CONFIG_SSB_PCIHOST_POSSIBLE=y -CONFIG_SSB_PCIHOST=y -CONFIG_SSB_B43_PCI_BRIDGE=y -CONFIG_SSB_PCMCIAHOST_POSSIBLE=y -CONFIG_SSB_PCMCIAHOST=y -CONFIG_SSB_SDIOHOST_POSSIBLE=y -CONFIG_SSB_SDIOHOST=y -CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y -CONFIG_SSB_DRIVER_PCICORE=y -CONFIG_SSB_DRIVER_GPIO=y -CONFIG_BCMA_POSSIBLE=y -CONFIG_BCMA=m -CONFIG_BCMA_BLOCKIO=y -CONFIG_BCMA_HOST_PCI_POSSIBLE=y -CONFIG_BCMA_HOST_PCI=y -CONFIG_BCMA_HOST_SOC=y -CONFIG_BCMA_DRIVER_PCI=y -CONFIG_BCMA_SFLASH=y -CONFIG_BCMA_DRIVER_GMAC_CMN=y -CONFIG_BCMA_DRIVER_GPIO=y -# CONFIG_BCMA_DEBUG is not set - -# -# Multifunction device drivers -# -CONFIG_MFD_CORE=y -CONFIG_MFD_BCM590XX=m -CONFIG_MFD_BD9571MWV=m -CONFIG_MFD_AXP20X=m -CONFIG_MFD_AXP20X_I2C=m -CONFIG_MFD_CROS_EC=m -# CONFIG_MFD_CROS_EC_CHARDEV is not set -# CONFIG_MFD_MADERA is not set -CONFIG_PMIC_DA9052=y -CONFIG_MFD_DA9052_SPI=y -CONFIG_MFD_DA9062=m -CONFIG_MFD_DA9063=m -CONFIG_MFD_DA9150=m -CONFIG_MFD_DLN2=m -CONFIG_MFD_MC13XXX=m -CONFIG_MFD_MC13XXX_SPI=m -CONFIG_MFD_MC13XXX_I2C=m -CONFIG_HTC_PASIC3=m -CONFIG_MFD_INTEL_QUARK_I2C_GPIO=m -CONFIG_LPC_ICH=m -CONFIG_LPC_SCH=m -CONFIG_INTEL_SOC_PMIC_BXTWC=m -CONFIG_INTEL_SOC_PMIC_CHTDC_TI=m -CONFIG_MFD_INTEL_LPSS=m -CONFIG_MFD_INTEL_LPSS_ACPI=m -CONFIG_MFD_INTEL_LPSS_PCI=m -CONFIG_MFD_JANZ_CMODIO=m -CONFIG_MFD_KEMPLD=m -CONFIG_MFD_88PM800=m -CONFIG_MFD_88PM805=m -CONFIG_MFD_MAX14577=m -CONFIG_MFD_MAX77693=m -CONFIG_MFD_MAX8907=m -CONFIG_MFD_MT6397=m -CONFIG_MFD_MENF21BMC=m -CONFIG_EZX_PCAP=y -CONFIG_MFD_VIPERBOARD=m -CONFIG_MFD_RETU=m -CONFIG_MFD_PCF50633=m -CONFIG_PCF50633_ADC=m -CONFIG_PCF50633_GPIO=m -CONFIG_UCB1400_CORE=m -CONFIG_MFD_RDC321X=m -CONFIG_MFD_RT5033=m -CONFIG_MFD_SI476X_CORE=m -CONFIG_MFD_SM501=m -CONFIG_MFD_SM501_GPIO=y -CONFIG_MFD_SKY81452=m -CONFIG_ABX500_CORE=y -CONFIG_MFD_SYSCON=y -CONFIG_MFD_TI_AM335X_TSCADC=m -CONFIG_MFD_LP3943=m -CONFIG_MFD_TI_LMU=m -CONFIG_TPS6105X=m -CONFIG_TPS65010=m -CONFIG_TPS6507X=m -CONFIG_MFD_TPS65086=m -CONFIG_MFD_TI_LP873X=m -CONFIG_MFD_TPS65912=y -CONFIG_MFD_TPS65912_I2C=m -CONFIG_MFD_TPS65912_SPI=y -CONFIG_MFD_WL1273_CORE=m -CONFIG_MFD_LM3533=m -CONFIG_MFD_VX855=m -CONFIG_MFD_ARIZONA=y -CONFIG_MFD_ARIZONA_I2C=m -CONFIG_MFD_ARIZONA_SPI=m -CONFIG_MFD_CS47L24=y -CONFIG_MFD_WM5102=y -CONFIG_MFD_WM5110=y -CONFIG_MFD_WM8997=y -CONFIG_MFD_WM8998=y -CONFIG_MFD_WM831X=y -CONFIG_MFD_WM831X_SPI=y -CONFIG_MFD_WM8994=m -CONFIG_RAVE_SP_CORE=m -CONFIG_REGULATOR=y -# CONFIG_REGULATOR_DEBUG is not set -CONFIG_REGULATOR_FIXED_VOLTAGE=m -CONFIG_REGULATOR_VIRTUAL_CONSUMER=m -CONFIG_REGULATOR_USERSPACE_CONSUMER=m -CONFIG_REGULATOR_88PG86X=m -CONFIG_REGULATOR_88PM800=m -CONFIG_REGULATOR_ACT8865=m -CONFIG_REGULATOR_AD5398=m -CONFIG_REGULATOR_ANATOP=m -CONFIG_REGULATOR_ARIZONA_LDO1=m -CONFIG_REGULATOR_ARIZONA_MICSUPP=m -CONFIG_REGULATOR_AXP20X=m -CONFIG_REGULATOR_BCM590XX=m -CONFIG_REGULATOR_BD9571MWV=m -CONFIG_REGULATOR_DA9052=m -CONFIG_REGULATOR_DA9062=m -CONFIG_REGULATOR_DA9063=m -CONFIG_REGULATOR_DA9210=m -CONFIG_REGULATOR_DA9211=m -CONFIG_REGULATOR_FAN53555=m -CONFIG_REGULATOR_GPIO=m -CONFIG_REGULATOR_ISL9305=m -CONFIG_REGULATOR_ISL6271A=m -CONFIG_REGULATOR_LM363X=m -CONFIG_REGULATOR_LP3971=m -CONFIG_REGULATOR_LP3972=m -CONFIG_REGULATOR_LP872X=m -CONFIG_REGULATOR_LP8755=m -CONFIG_REGULATOR_LTC3589=m -CONFIG_REGULATOR_LTC3676=m -CONFIG_REGULATOR_MAX14577=m -CONFIG_REGULATOR_MAX1586=m -CONFIG_REGULATOR_MAX8649=m -CONFIG_REGULATOR_MAX8660=m -CONFIG_REGULATOR_MAX8907=m -CONFIG_REGULATOR_MAX8952=m -CONFIG_REGULATOR_MAX77693=m -CONFIG_REGULATOR_MC13XXX_CORE=m -CONFIG_REGULATOR_MC13783=m -CONFIG_REGULATOR_MC13892=m -CONFIG_REGULATOR_MT6311=m -CONFIG_REGULATOR_MT6323=m -CONFIG_REGULATOR_MT6397=m -CONFIG_REGULATOR_PCAP=m -CONFIG_REGULATOR_PCF50633=m -CONFIG_REGULATOR_PFUZE100=m -CONFIG_REGULATOR_PV88060=m -CONFIG_REGULATOR_PV88080=m -CONFIG_REGULATOR_PV88090=m -CONFIG_REGULATOR_PWM=m -CONFIG_REGULATOR_QCOM_SPMI=m -CONFIG_REGULATOR_RT5033=m -CONFIG_REGULATOR_SKY81452=m -CONFIG_REGULATOR_TPS51632=m -CONFIG_REGULATOR_TPS6105X=m -CONFIG_REGULATOR_TPS62360=m -CONFIG_REGULATOR_TPS65023=m -CONFIG_REGULATOR_TPS6507X=m -CONFIG_REGULATOR_TPS65086=m -CONFIG_REGULATOR_TPS65132=m -CONFIG_REGULATOR_TPS6524X=m -CONFIG_REGULATOR_TPS65912=m -CONFIG_REGULATOR_WM831X=m -CONFIG_REGULATOR_WM8994=m -CONFIG_CEC_CORE=m -CONFIG_CEC_NOTIFIER=y -CONFIG_CEC_PIN=y -CONFIG_RC_CORE=m -CONFIG_RC_MAP=m -CONFIG_LIRC=y -CONFIG_RC_DECODERS=y -CONFIG_IR_NEC_DECODER=m -CONFIG_IR_RC5_DECODER=m -CONFIG_IR_RC6_DECODER=m -CONFIG_IR_JVC_DECODER=m -CONFIG_IR_SONY_DECODER=m -CONFIG_IR_SANYO_DECODER=m -CONFIG_IR_SHARP_DECODER=m -CONFIG_IR_MCE_KBD_DECODER=m -CONFIG_IR_XMP_DECODER=m -CONFIG_IR_IMON_DECODER=m -CONFIG_RC_DEVICES=y -CONFIG_RC_ATI_REMOTE=m -CONFIG_IR_ENE=m -CONFIG_IR_IMON=m -CONFIG_IR_IMON_RAW=m -CONFIG_IR_MCEUSB=m -CONFIG_IR_ITE_CIR=m -CONFIG_IR_FINTEK=m -CONFIG_IR_NUVOTON=m -CONFIG_IR_REDRAT3=m -CONFIG_IR_STREAMZAP=m -CONFIG_IR_WINBOND_CIR=m -CONFIG_IR_IGORPLUGUSB=m -CONFIG_IR_IGUANA=m -CONFIG_IR_TTUSBIR=m -CONFIG_RC_LOOPBACK=m -CONFIG_IR_SERIAL=m -CONFIG_IR_SERIAL_TRANSMITTER=y -CONFIG_IR_SIR=m -CONFIG_MEDIA_SUPPORT=m - -# -# Multimedia core support -# -CONFIG_MEDIA_CAMERA_SUPPORT=y -CONFIG_MEDIA_ANALOG_TV_SUPPORT=y -CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y -CONFIG_MEDIA_RADIO_SUPPORT=y -CONFIG_MEDIA_SDR_SUPPORT=y -CONFIG_MEDIA_CEC_SUPPORT=y -CONFIG_MEDIA_CEC_RC=y -# CONFIG_CEC_PIN_ERROR_INJ is not set -CONFIG_MEDIA_CONTROLLER=y -CONFIG_MEDIA_CONTROLLER_DVB=y -CONFIG_VIDEO_DEV=m -CONFIG_VIDEO_V4L2_SUBDEV_API=y -CONFIG_VIDEO_V4L2=m -# CONFIG_VIDEO_ADV_DEBUG is not set -# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set -CONFIG_VIDEO_PCI_SKELETON=m -CONFIG_VIDEO_TUNER=m -CONFIG_V4L2_MEM2MEM_DEV=m -CONFIG_V4L2_FLASH_LED_CLASS=m -CONFIG_V4L2_FWNODE=m -CONFIG_VIDEOBUF_GEN=m -CONFIG_VIDEOBUF_DMA_SG=m -CONFIG_VIDEOBUF_VMALLOC=m -CONFIG_DVB_CORE=m -# CONFIG_DVB_MMAP is not set -CONFIG_DVB_NET=y -CONFIG_TTPCI_EEPROM=m -CONFIG_DVB_MAX_ADAPTERS=8 -CONFIG_DVB_DYNAMIC_MINORS=y -# CONFIG_DVB_DEMUX_SECTION_LOSS_LOG is not set -# CONFIG_DVB_ULE_DEBUG is not set - -# -# Media drivers -# -CONFIG_MEDIA_USB_SUPPORT=y - -# -# Webcam devices -# -CONFIG_USB_VIDEO_CLASS=m -CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y -CONFIG_USB_GSPCA=m -CONFIG_USB_M5602=m -CONFIG_USB_STV06XX=m -CONFIG_USB_GL860=m -CONFIG_USB_GSPCA_BENQ=m -CONFIG_USB_GSPCA_CONEX=m -CONFIG_USB_GSPCA_CPIA1=m -CONFIG_USB_GSPCA_DTCS033=m -CONFIG_USB_GSPCA_ETOMS=m -CONFIG_USB_GSPCA_FINEPIX=m -CONFIG_USB_GSPCA_JEILINJ=m -CONFIG_USB_GSPCA_JL2005BCD=m -CONFIG_USB_GSPCA_KINECT=m -CONFIG_USB_GSPCA_KONICA=m -CONFIG_USB_GSPCA_MARS=m -CONFIG_USB_GSPCA_MR97310A=m -CONFIG_USB_GSPCA_NW80X=m -CONFIG_USB_GSPCA_OV519=m -CONFIG_USB_GSPCA_OV534=m -CONFIG_USB_GSPCA_OV534_9=m -CONFIG_USB_GSPCA_PAC207=m -CONFIG_USB_GSPCA_PAC7302=m -CONFIG_USB_GSPCA_PAC7311=m -CONFIG_USB_GSPCA_SE401=m -CONFIG_USB_GSPCA_SN9C2028=m -CONFIG_USB_GSPCA_SN9C20X=m -CONFIG_USB_GSPCA_SONIXB=m -CONFIG_USB_GSPCA_SONIXJ=m -CONFIG_USB_GSPCA_SPCA500=m -CONFIG_USB_GSPCA_SPCA501=m -CONFIG_USB_GSPCA_SPCA505=m -CONFIG_USB_GSPCA_SPCA506=m -CONFIG_USB_GSPCA_SPCA508=m -CONFIG_USB_GSPCA_SPCA561=m -CONFIG_USB_GSPCA_SPCA1528=m -CONFIG_USB_GSPCA_SQ905=m -CONFIG_USB_GSPCA_SQ905C=m -CONFIG_USB_GSPCA_SQ930X=m -CONFIG_USB_GSPCA_STK014=m -CONFIG_USB_GSPCA_STK1135=m -CONFIG_USB_GSPCA_STV0680=m -CONFIG_USB_GSPCA_SUNPLUS=m -CONFIG_USB_GSPCA_T613=m -CONFIG_USB_GSPCA_TOPRO=m -CONFIG_USB_GSPCA_TOUPTEK=m -CONFIG_USB_GSPCA_TV8532=m -CONFIG_USB_GSPCA_VC032X=m -CONFIG_USB_GSPCA_VICAM=m -CONFIG_USB_GSPCA_XIRLINK_CIT=m -CONFIG_USB_GSPCA_ZC3XX=m -CONFIG_USB_PWC=m -# CONFIG_USB_PWC_DEBUG is not set -CONFIG_USB_PWC_INPUT_EVDEV=y -CONFIG_VIDEO_CPIA2=m -CONFIG_USB_ZR364XX=m -CONFIG_USB_STKWEBCAM=m -CONFIG_USB_S2255=m -CONFIG_VIDEO_USBTV=m - -# -# Analog TV USB devices -# -CONFIG_VIDEO_PVRUSB2=m -CONFIG_VIDEO_PVRUSB2_SYSFS=y -CONFIG_VIDEO_PVRUSB2_DVB=y -# CONFIG_VIDEO_PVRUSB2_DEBUGIFC is not set -CONFIG_VIDEO_HDPVR=m -CONFIG_VIDEO_USBVISION=m -CONFIG_VIDEO_STK1160_COMMON=m -CONFIG_VIDEO_STK1160=m -CONFIG_VIDEO_GO7007=m -CONFIG_VIDEO_GO7007_USB=m -CONFIG_VIDEO_GO7007_LOADER=m -CONFIG_VIDEO_GO7007_USB_S2250_BOARD=m - -# -# Analog/digital TV USB devices -# -CONFIG_VIDEO_AU0828=m -CONFIG_VIDEO_AU0828_V4L2=y -CONFIG_VIDEO_AU0828_RC=y -CONFIG_VIDEO_CX231XX=m -CONFIG_VIDEO_CX231XX_RC=y -CONFIG_VIDEO_CX231XX_ALSA=m -CONFIG_VIDEO_CX231XX_DVB=m -CONFIG_VIDEO_TM6000=m -CONFIG_VIDEO_TM6000_ALSA=m -CONFIG_VIDEO_TM6000_DVB=m - -# -# Digital TV USB devices -# -CONFIG_DVB_USB=m -# CONFIG_DVB_USB_DEBUG is not set -CONFIG_DVB_USB_DIB3000MC=m -CONFIG_DVB_USB_A800=m -CONFIG_DVB_USB_DIBUSB_MB=m -CONFIG_DVB_USB_DIBUSB_MB_FAULTY=y -CONFIG_DVB_USB_DIBUSB_MC=m -CONFIG_DVB_USB_DIB0700=m -CONFIG_DVB_USB_UMT_010=m -CONFIG_DVB_USB_CXUSB=m -CONFIG_DVB_USB_M920X=m -CONFIG_DVB_USB_DIGITV=m -CONFIG_DVB_USB_VP7045=m -CONFIG_DVB_USB_VP702X=m -CONFIG_DVB_USB_GP8PSK=m -CONFIG_DVB_USB_NOVA_T_USB2=m -CONFIG_DVB_USB_TTUSB2=m -CONFIG_DVB_USB_DTT200U=m -CONFIG_DVB_USB_OPERA1=m -CONFIG_DVB_USB_AF9005=m -CONFIG_DVB_USB_AF9005_REMOTE=m -CONFIG_DVB_USB_PCTV452E=m -CONFIG_DVB_USB_DW2102=m -CONFIG_DVB_USB_CINERGY_T2=m -CONFIG_DVB_USB_DTV5100=m -CONFIG_DVB_USB_AZ6027=m -CONFIG_DVB_USB_TECHNISAT_USB2=m -CONFIG_DVB_USB_V2=m -CONFIG_DVB_USB_AF9015=m -CONFIG_DVB_USB_AF9035=m -CONFIG_DVB_USB_ANYSEE=m -CONFIG_DVB_USB_AU6610=m -CONFIG_DVB_USB_AZ6007=m -CONFIG_DVB_USB_CE6230=m -CONFIG_DVB_USB_EC168=m -CONFIG_DVB_USB_GL861=m -CONFIG_DVB_USB_LME2510=m -CONFIG_DVB_USB_MXL111SF=m -CONFIG_DVB_USB_RTL28XXU=m -CONFIG_DVB_USB_DVBSKY=m -CONFIG_DVB_USB_ZD1301=m -CONFIG_DVB_TTUSB_BUDGET=m -CONFIG_DVB_TTUSB_DEC=m -CONFIG_SMS_USB_DRV=m -CONFIG_DVB_B2C2_FLEXCOP_USB=m -# CONFIG_DVB_B2C2_FLEXCOP_USB_DEBUG is not set -CONFIG_DVB_AS102=m - -# -# Webcam, TV (analog/digital) USB devices -# -CONFIG_VIDEO_EM28XX=m -CONFIG_VIDEO_EM28XX_V4L2=m -CONFIG_VIDEO_EM28XX_ALSA=m -CONFIG_VIDEO_EM28XX_DVB=m -CONFIG_VIDEO_EM28XX_RC=m - -# -# Software defined radio USB devices -# -CONFIG_USB_AIRSPY=m -CONFIG_USB_HACKRF=m -CONFIG_USB_MSI2500=m - -# -# USB HDMI CEC adapters -# -CONFIG_USB_PULSE8_CEC=m -CONFIG_USB_RAINSHADOW_CEC=m -CONFIG_MEDIA_PCI_SUPPORT=y - -# -# Media capture support -# -CONFIG_VIDEO_MEYE=m -CONFIG_VIDEO_SOLO6X10=m -CONFIG_VIDEO_TW5864=m -CONFIG_VIDEO_TW68=m -CONFIG_VIDEO_TW686X=m - -# -# Media capture/analog TV support -# -CONFIG_VIDEO_IVTV=m -# CONFIG_VIDEO_IVTV_DEPRECATED_IOCTLS is not set -CONFIG_VIDEO_IVTV_ALSA=m -CONFIG_VIDEO_FB_IVTV=m -CONFIG_VIDEO_HEXIUM_GEMINI=m -CONFIG_VIDEO_HEXIUM_ORION=m -CONFIG_VIDEO_MXB=m -CONFIG_VIDEO_DT3155=m - -# -# Media capture/analog/hybrid TV support -# -CONFIG_VIDEO_CX18=m -CONFIG_VIDEO_CX18_ALSA=m -CONFIG_VIDEO_CX23885=m -CONFIG_MEDIA_ALTERA_CI=m -CONFIG_VIDEO_CX25821=m -CONFIG_VIDEO_CX25821_ALSA=m -CONFIG_VIDEO_CX88=m -CONFIG_VIDEO_CX88_ALSA=m -CONFIG_VIDEO_CX88_BLACKBIRD=m -CONFIG_VIDEO_CX88_DVB=m -CONFIG_VIDEO_CX88_ENABLE_VP3054=y -CONFIG_VIDEO_CX88_VP3054=m -CONFIG_VIDEO_CX88_MPEG=m -CONFIG_VIDEO_BT848=m -CONFIG_DVB_BT8XX=m -CONFIG_VIDEO_SAA7134=m -CONFIG_VIDEO_SAA7134_ALSA=m -CONFIG_VIDEO_SAA7134_RC=y -CONFIG_VIDEO_SAA7134_DVB=m -CONFIG_VIDEO_SAA7134_GO7007=m -CONFIG_VIDEO_SAA7164=m -# CONFIG_VIDEO_COBALT is not set - -# -# Media digital TV PCI Adapters -# -CONFIG_DVB_AV7110_IR=y -CONFIG_DVB_AV7110=m -CONFIG_DVB_AV7110_OSD=y -CONFIG_DVB_BUDGET_CORE=m -CONFIG_DVB_BUDGET=m -CONFIG_DVB_BUDGET_CI=m -CONFIG_DVB_BUDGET_AV=m -CONFIG_DVB_BUDGET_PATCH=m -CONFIG_DVB_B2C2_FLEXCOP_PCI=m -# CONFIG_DVB_B2C2_FLEXCOP_PCI_DEBUG is not set -CONFIG_DVB_PLUTO2=m -CONFIG_DVB_DM1105=m -CONFIG_DVB_PT1=m -CONFIG_DVB_PT3=m -CONFIG_MANTIS_CORE=m -CONFIG_DVB_MANTIS=m -CONFIG_DVB_HOPPER=m -CONFIG_DVB_NGENE=m -CONFIG_DVB_DDBRIDGE=m -# CONFIG_DVB_DDBRIDGE_MSIENABLE is not set -CONFIG_DVB_SMIPCIE=m -CONFIG_DVB_NETUP_UNIDVB=m -CONFIG_VIDEO_IPU3_CIO2=m -CONFIG_V4L_PLATFORM_DRIVERS=y -CONFIG_VIDEO_CAFE_CCIC=m -CONFIG_VIDEO_CADENCE=y -CONFIG_VIDEO_CADENCE_CSI2RX=m -CONFIG_VIDEO_CADENCE_CSI2TX=m -CONFIG_SOC_CAMERA=m -CONFIG_SOC_CAMERA_PLATFORM=m -CONFIG_V4L_MEM2MEM_DRIVERS=y -CONFIG_VIDEO_MEM2MEM_DEINTERLACE=m -CONFIG_VIDEO_SH_VEU=m -# CONFIG_V4L_TEST_DRIVERS is not set -CONFIG_DVB_PLATFORM_DRIVERS=y -CONFIG_CEC_PLATFORM_DRIVERS=y -# CONFIG_VIDEO_CROS_EC_CEC is not set -CONFIG_CEC_GPIO=m -CONFIG_SDR_PLATFORM_DRIVERS=y - -# -# Supported MMC/SDIO adapters -# -CONFIG_SMS_SDIO_DRV=m -CONFIG_RADIO_ADAPTERS=y -CONFIG_RADIO_TEA575X=m -CONFIG_RADIO_SI470X=m -CONFIG_USB_SI470X=m -CONFIG_I2C_SI470X=m -CONFIG_RADIO_SI4713=m -CONFIG_USB_SI4713=m -CONFIG_PLATFORM_SI4713=m -CONFIG_I2C_SI4713=m -CONFIG_RADIO_SI476X=m -CONFIG_USB_MR800=m -CONFIG_USB_DSBR=m -CONFIG_RADIO_MAXIRADIO=m -CONFIG_RADIO_SHARK=m -CONFIG_RADIO_SHARK2=m -CONFIG_USB_KEENE=m -CONFIG_USB_RAREMONO=m -CONFIG_USB_MA901=m -CONFIG_RADIO_TEA5764=m -CONFIG_RADIO_SAA7706H=m -CONFIG_RADIO_TEF6862=m -CONFIG_RADIO_WL1273=m - -# -# Texas Instruments WL128x FM driver (ST based) -# -CONFIG_RADIO_WL128X=m - -# -# Supported FireWire (IEEE 1394) Adapters -# -CONFIG_DVB_FIREDTV=m -CONFIG_DVB_FIREDTV_INPUT=y -CONFIG_MEDIA_COMMON_OPTIONS=y - -# -# common driver options -# -CONFIG_VIDEO_CX2341X=m -CONFIG_VIDEO_TVEEPROM=m -CONFIG_CYPRESS_FIRMWARE=m -CONFIG_VIDEOBUF2_CORE=m -CONFIG_VIDEOBUF2_V4L2=m -CONFIG_VIDEOBUF2_MEMOPS=m -CONFIG_VIDEOBUF2_DMA_CONTIG=m -CONFIG_VIDEOBUF2_VMALLOC=m -CONFIG_VIDEOBUF2_DMA_SG=m -CONFIG_VIDEOBUF2_DVB=m -CONFIG_DVB_B2C2_FLEXCOP=m -CONFIG_VIDEO_SAA7146=m -CONFIG_VIDEO_SAA7146_VV=m -CONFIG_SMS_SIANO_MDTV=m -CONFIG_SMS_SIANO_RC=y -# CONFIG_SMS_SIANO_DEBUGFS is not set - -# -# Media ancillary drivers (tuners, sensors, i2c, spi, frontends) -# -CONFIG_MEDIA_SUBDRV_AUTOSELECT=y -CONFIG_MEDIA_ATTACH=y -CONFIG_VIDEO_IR_I2C=m - -# -# Audio decoders, processors and mixers -# -CONFIG_VIDEO_TVAUDIO=m -CONFIG_VIDEO_TDA7432=m -CONFIG_VIDEO_TDA9840=m -CONFIG_VIDEO_TEA6415C=m -CONFIG_VIDEO_TEA6420=m -CONFIG_VIDEO_MSP3400=m -CONFIG_VIDEO_CS3308=m -CONFIG_VIDEO_CS5345=m -CONFIG_VIDEO_CS53L32A=m -CONFIG_VIDEO_UDA1342=m -CONFIG_VIDEO_WM8775=m -CONFIG_VIDEO_WM8739=m -CONFIG_VIDEO_VP27SMPX=m -CONFIG_VIDEO_SONY_BTF_MPX=m - -# -# RDS decoders -# -CONFIG_VIDEO_SAA6588=m - -# -# Video decoders -# -CONFIG_VIDEO_BT819=m -CONFIG_VIDEO_BT856=m -CONFIG_VIDEO_BT866=m -CONFIG_VIDEO_KS0127=m -CONFIG_VIDEO_SAA7110=m -CONFIG_VIDEO_SAA711X=m -CONFIG_VIDEO_TVP5150=m -CONFIG_VIDEO_TW2804=m -CONFIG_VIDEO_TW9903=m -CONFIG_VIDEO_TW9906=m -CONFIG_VIDEO_VPX3220=m - -# -# Video and audio decoders -# -CONFIG_VIDEO_SAA717X=m -CONFIG_VIDEO_CX25840=m - -# -# Video encoders -# -CONFIG_VIDEO_SAA7127=m -CONFIG_VIDEO_SAA7185=m -CONFIG_VIDEO_ADV7170=m -CONFIG_VIDEO_ADV7175=m - -# -# Camera sensor devices -# -CONFIG_VIDEO_OV2640=m -CONFIG_VIDEO_OV7640=m -CONFIG_VIDEO_OV7670=m -CONFIG_VIDEO_MT9M111=m -CONFIG_VIDEO_MT9V011=m - -# -# Flash devices -# - -# -# Video improvement chips -# -CONFIG_VIDEO_UPD64031A=m -CONFIG_VIDEO_UPD64083=m - -# -# Audio/Video compression chips -# -CONFIG_VIDEO_SAA6752HS=m - -# -# SDR tuner chips -# - -# -# Miscellaneous helper chips -# -CONFIG_VIDEO_M52790=m - -# -# Sensors used on soc_camera driver -# - -# -# soc_camera sensor drivers -# -CONFIG_SOC_CAMERA_MT9M001=m -CONFIG_SOC_CAMERA_MT9M111=m -CONFIG_SOC_CAMERA_MT9T112=m -CONFIG_SOC_CAMERA_MT9V022=m -CONFIG_SOC_CAMERA_OV5642=m -CONFIG_SOC_CAMERA_OV772X=m -CONFIG_SOC_CAMERA_OV9640=m -CONFIG_SOC_CAMERA_OV9740=m -CONFIG_SOC_CAMERA_RJ54N1=m -CONFIG_SOC_CAMERA_TW9910=m - -# -# Media SPI Adapters -# -CONFIG_CXD2880_SPI_DRV=m -CONFIG_MEDIA_TUNER=m -CONFIG_MEDIA_TUNER_SIMPLE=m -CONFIG_MEDIA_TUNER_TDA18250=m -CONFIG_MEDIA_TUNER_TDA8290=m -CONFIG_MEDIA_TUNER_TDA827X=m -CONFIG_MEDIA_TUNER_TDA18271=m -CONFIG_MEDIA_TUNER_TDA9887=m -CONFIG_MEDIA_TUNER_TEA5761=m -CONFIG_MEDIA_TUNER_TEA5767=m -CONFIG_MEDIA_TUNER_MSI001=m -CONFIG_MEDIA_TUNER_MT20XX=m -CONFIG_MEDIA_TUNER_MT2060=m -CONFIG_MEDIA_TUNER_MT2063=m -CONFIG_MEDIA_TUNER_MT2266=m -CONFIG_MEDIA_TUNER_MT2131=m -CONFIG_MEDIA_TUNER_QT1010=m -CONFIG_MEDIA_TUNER_XC2028=m -CONFIG_MEDIA_TUNER_XC5000=m -CONFIG_MEDIA_TUNER_XC4000=m -CONFIG_MEDIA_TUNER_MXL5005S=m -CONFIG_MEDIA_TUNER_MXL5007T=m -CONFIG_MEDIA_TUNER_MC44S803=m -CONFIG_MEDIA_TUNER_MAX2165=m -CONFIG_MEDIA_TUNER_TDA18218=m -CONFIG_MEDIA_TUNER_FC0011=m -CONFIG_MEDIA_TUNER_FC0012=m -CONFIG_MEDIA_TUNER_FC0013=m -CONFIG_MEDIA_TUNER_TDA18212=m -CONFIG_MEDIA_TUNER_E4000=m -CONFIG_MEDIA_TUNER_FC2580=m -CONFIG_MEDIA_TUNER_M88RS6000T=m -CONFIG_MEDIA_TUNER_TUA9001=m -CONFIG_MEDIA_TUNER_SI2157=m -CONFIG_MEDIA_TUNER_IT913X=m -CONFIG_MEDIA_TUNER_R820T=m -CONFIG_MEDIA_TUNER_MXL301RF=m -CONFIG_MEDIA_TUNER_QM1D1C0042=m -CONFIG_MEDIA_TUNER_QM1D1B0004=m - -# -# Multistandard (satellite) frontends -# -CONFIG_DVB_STB0899=m -CONFIG_DVB_STB6100=m -CONFIG_DVB_STV090x=m -CONFIG_DVB_STV0910=m -CONFIG_DVB_STV6110x=m -CONFIG_DVB_STV6111=m -CONFIG_DVB_MXL5XX=m -CONFIG_DVB_M88DS3103=m - -# -# Multistandard (cable + terrestrial) frontends -# -CONFIG_DVB_DRXK=m -CONFIG_DVB_TDA18271C2DD=m -CONFIG_DVB_SI2165=m -CONFIG_DVB_MN88472=m -CONFIG_DVB_MN88473=m - -# -# DVB-S (satellite) frontends -# -CONFIG_DVB_CX24110=m -CONFIG_DVB_CX24123=m -CONFIG_DVB_MT312=m -CONFIG_DVB_ZL10036=m -CONFIG_DVB_ZL10039=m -CONFIG_DVB_S5H1420=m -CONFIG_DVB_STV0288=m -CONFIG_DVB_STB6000=m -CONFIG_DVB_STV0299=m -CONFIG_DVB_STV6110=m -CONFIG_DVB_STV0900=m -CONFIG_DVB_TDA8083=m -CONFIG_DVB_TDA10086=m -CONFIG_DVB_TDA8261=m -CONFIG_DVB_VES1X93=m -CONFIG_DVB_TUNER_ITD1000=m -CONFIG_DVB_TUNER_CX24113=m -CONFIG_DVB_TDA826X=m -CONFIG_DVB_TUA6100=m -CONFIG_DVB_CX24116=m -CONFIG_DVB_CX24117=m -CONFIG_DVB_CX24120=m -CONFIG_DVB_SI21XX=m -CONFIG_DVB_TS2020=m -CONFIG_DVB_DS3000=m -CONFIG_DVB_MB86A16=m -CONFIG_DVB_TDA10071=m - -# -# DVB-T (terrestrial) frontends -# -CONFIG_DVB_SP8870=m -CONFIG_DVB_SP887X=m -CONFIG_DVB_CX22700=m -CONFIG_DVB_CX22702=m -CONFIG_DVB_DRXD=m -CONFIG_DVB_L64781=m -CONFIG_DVB_TDA1004X=m -CONFIG_DVB_NXT6000=m -CONFIG_DVB_MT352=m -CONFIG_DVB_ZL10353=m -CONFIG_DVB_DIB3000MB=m -CONFIG_DVB_DIB3000MC=m -CONFIG_DVB_DIB7000M=m -CONFIG_DVB_DIB7000P=m -CONFIG_DVB_TDA10048=m -CONFIG_DVB_AF9013=m -CONFIG_DVB_EC100=m -CONFIG_DVB_STV0367=m -CONFIG_DVB_CXD2820R=m -CONFIG_DVB_CXD2841ER=m -CONFIG_DVB_RTL2830=m -CONFIG_DVB_RTL2832=m -CONFIG_DVB_RTL2832_SDR=m -CONFIG_DVB_SI2168=m -CONFIG_DVB_AS102_FE=m -CONFIG_DVB_ZD1301_DEMOD=m -CONFIG_DVB_GP8PSK_FE=m - -# -# DVB-C (cable) frontends -# -CONFIG_DVB_VES1820=m -CONFIG_DVB_TDA10021=m -CONFIG_DVB_TDA10023=m -CONFIG_DVB_STV0297=m - -# -# ATSC (North American/Korean Terrestrial/Cable DTV) frontends -# -CONFIG_DVB_NXT200X=m -CONFIG_DVB_OR51211=m -CONFIG_DVB_OR51132=m -CONFIG_DVB_BCM3510=m -CONFIG_DVB_LGDT330X=m -CONFIG_DVB_LGDT3305=m -CONFIG_DVB_LGDT3306A=m -CONFIG_DVB_LG2160=m -CONFIG_DVB_S5H1409=m -CONFIG_DVB_AU8522=m -CONFIG_DVB_AU8522_DTV=m -CONFIG_DVB_AU8522_V4L=m -CONFIG_DVB_S5H1411=m - -# -# ISDB-T (terrestrial) frontends -# -CONFIG_DVB_S921=m -CONFIG_DVB_DIB8000=m -CONFIG_DVB_MB86A20S=m - -# -# ISDB-S (satellite) & ISDB-T (terrestrial) frontends -# -CONFIG_DVB_TC90522=m - -# -# Digital terrestrial only tuners/PLL -# -CONFIG_DVB_PLL=m -CONFIG_DVB_TUNER_DIB0070=m -CONFIG_DVB_TUNER_DIB0090=m - -# -# SEC control devices for DVB-S -# -CONFIG_DVB_DRX39XYJ=m -CONFIG_DVB_LNBH25=m -CONFIG_DVB_LNBP21=m -CONFIG_DVB_LNBP22=m -CONFIG_DVB_ISL6405=m -CONFIG_DVB_ISL6421=m -CONFIG_DVB_ISL6423=m -CONFIG_DVB_A8293=m -CONFIG_DVB_LGS8GXX=m -CONFIG_DVB_ATBM8830=m -CONFIG_DVB_TDA665x=m -CONFIG_DVB_IX2505V=m -CONFIG_DVB_M88RS2000=m -CONFIG_DVB_AF9033=m -CONFIG_DVB_HORUS3A=m -CONFIG_DVB_ASCOT2E=m -CONFIG_DVB_HELENE=m - -# -# Common Interface (EN50221) controller drivers -# -CONFIG_DVB_CXD2099=m -CONFIG_DVB_SP2=m - -# -# Tools to develop new frontends -# -CONFIG_DVB_DUMMY_FE=m - -# -# Graphics support -# -CONFIG_AGP=m -CONFIG_AGP_AMD64=m -CONFIG_AGP_INTEL=m -CONFIG_AGP_SIS=m -CONFIG_AGP_VIA=m -CONFIG_INTEL_GTT=m -CONFIG_VGA_ARB=y -CONFIG_VGA_ARB_MAX_GPUS=16 -CONFIG_VGA_SWITCHEROO=y -CONFIG_DRM=m -CONFIG_DRM_MIPI_DSI=y -CONFIG_DRM_DP_AUX_CHARDEV=y -CONFIG_DRM_DEBUG_SELFTEST=m -CONFIG_DRM_KMS_HELPER=m -CONFIG_DRM_KMS_FB_HELPER=y -CONFIG_DRM_FBDEV_EMULATION=y -CONFIG_DRM_FBDEV_OVERALLOC=100 -# CONFIG_DRM_LOAD_EDID_FIRMWARE is not set -# CONFIG_DRM_DP_CEC is not set -CONFIG_DRM_TTM=m -CONFIG_DRM_GEM_CMA_HELPER=y -CONFIG_DRM_KMS_CMA_HELPER=y -CONFIG_DRM_VM=y -CONFIG_DRM_SCHED=m - -# -# I2C encoder or helper chips -# -CONFIG_DRM_I2C_CH7006=m -CONFIG_DRM_I2C_SIL164=m -CONFIG_DRM_I2C_NXP_TDA998X=m -CONFIG_DRM_I2C_NXP_TDA9950=m -CONFIG_DRM_RADEON=m -# CONFIG_DRM_RADEON_USERPTR is not set -CONFIG_DRM_AMDGPU=m -CONFIG_DRM_AMDGPU_SI=y -CONFIG_DRM_AMDGPU_CIK=y -CONFIG_DRM_AMDGPU_USERPTR=y -# CONFIG_DRM_AMDGPU_GART_DEBUGFS is not set - -# -# ACP (Audio CoProcessor) Configuration -# -CONFIG_DRM_AMD_ACP=y - -# -# Display Engine Configuration -# -CONFIG_DRM_AMD_DC=y -CONFIG_DRM_AMD_DC_DCN1_0=y -# CONFIG_DEBUG_KERNEL_DC is not set - -# -# AMD Library routines -# -CONFIG_CHASH=m -# CONFIG_CHASH_STATS is not set -# CONFIG_CHASH_SELFTEST is not set -CONFIG_DRM_NOUVEAU=m -CONFIG_NOUVEAU_DEBUG=5 -CONFIG_NOUVEAU_DEBUG_DEFAULT=3 -# CONFIG_NOUVEAU_DEBUG_MMU is not set -CONFIG_DRM_NOUVEAU_BACKLIGHT=y -CONFIG_DRM_I915=m -# CONFIG_DRM_I915_ALPHA_SUPPORT is not set -CONFIG_DRM_I915_CAPTURE_ERROR=y -CONFIG_DRM_I915_COMPRESS_ERROR=y -CONFIG_DRM_I915_USERPTR=y -CONFIG_DRM_I915_GVT=y -CONFIG_DRM_I915_GVT_KVMGT=m -CONFIG_DRM_VGEM=m -# CONFIG_DRM_VKMS is not set -CONFIG_DRM_VMWGFX=m -CONFIG_DRM_VMWGFX_FBCON=y -CONFIG_DRM_GMA500=m -CONFIG_DRM_GMA600=y -CONFIG_DRM_GMA3600=y -CONFIG_DRM_UDL=m -CONFIG_DRM_AST=m -CONFIG_DRM_MGAG200=m -CONFIG_DRM_CIRRUS_QEMU=m -CONFIG_DRM_QXL=m -CONFIG_DRM_BOCHS=m -CONFIG_DRM_VIRTIO_GPU=m -CONFIG_DRM_PANEL=y - -# -# Display Panels -# -CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN=m -CONFIG_DRM_BRIDGE=y -CONFIG_DRM_PANEL_BRIDGE=y - -# -# Display Interface Bridges -# -CONFIG_DRM_ANALOGIX_ANX78XX=m -CONFIG_HSA_AMD=m -CONFIG_DRM_HISI_HIBMC=m -CONFIG_DRM_TINYDRM=m -CONFIG_TINYDRM_MIPI_DBI=m -CONFIG_TINYDRM_ILI9225=m -# CONFIG_TINYDRM_ILI9341 is not set -CONFIG_TINYDRM_MI0283QT=m -CONFIG_TINYDRM_REPAPER=m -CONFIG_TINYDRM_ST7586=m -CONFIG_TINYDRM_ST7735R=m -# CONFIG_DRM_LEGACY is not set -CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y -CONFIG_DRM_LIB_RANDOM=y - -# -# Frame buffer Devices -# -CONFIG_FB=y -CONFIG_FIRMWARE_EDID=y -CONFIG_FB_CMDLINE=y -CONFIG_FB_NOTIFY=y -CONFIG_FB_BOOT_VESA_SUPPORT=y -CONFIG_FB_CFB_FILLRECT=y -CONFIG_FB_CFB_COPYAREA=y -CONFIG_FB_CFB_IMAGEBLIT=y -CONFIG_FB_SYS_FILLRECT=m -CONFIG_FB_SYS_COPYAREA=m -CONFIG_FB_SYS_IMAGEBLIT=m -CONFIG_FB_FOREIGN_ENDIAN=y -CONFIG_FB_BOTH_ENDIAN=y -# CONFIG_FB_BIG_ENDIAN is not set -# CONFIG_FB_LITTLE_ENDIAN is not set -CONFIG_FB_SYS_FOPS=m -CONFIG_FB_DEFERRED_IO=y -CONFIG_FB_BACKLIGHT=y -CONFIG_FB_MODE_HELPERS=y -CONFIG_FB_TILEBLITTING=y - -# -# Frame buffer hardware drivers -# -# CONFIG_FB_CIRRUS is not set -# CONFIG_FB_PM2 is not set -# CONFIG_FB_CYBER2000 is not set -# CONFIG_FB_ARC is not set -# CONFIG_FB_ASILIANT is not set -# CONFIG_FB_IMSTT is not set -# CONFIG_FB_VGA16 is not set -# CONFIG_FB_UVESA is not set -CONFIG_FB_VESA=y -CONFIG_FB_EFI=y -# CONFIG_FB_N411 is not set -# CONFIG_FB_HGA is not set -# CONFIG_FB_OPENCORES is not set -# CONFIG_FB_S1D13XXX is not set -# CONFIG_FB_NVIDIA is not set -# CONFIG_FB_RIVA is not set -# CONFIG_FB_I740 is not set -# CONFIG_FB_LE80578 is not set -# CONFIG_FB_MATROX is not set -# CONFIG_FB_RADEON is not set -# CONFIG_FB_ATY128 is not set -# CONFIG_FB_ATY is not set -# CONFIG_FB_S3 is not set -# CONFIG_FB_SAVAGE is not set -# CONFIG_FB_SIS is not set -# CONFIG_FB_VIA is not set -# CONFIG_FB_NEOMAGIC is not set -# CONFIG_FB_KYRO is not set -# CONFIG_FB_3DFX is not set -# CONFIG_FB_VOODOO1 is not set -# CONFIG_FB_VT8623 is not set -# CONFIG_FB_TRIDENT is not set -# CONFIG_FB_ARK is not set -# CONFIG_FB_PM3 is not set -# CONFIG_FB_CARMINE is not set -# CONFIG_FB_SM501 is not set -# CONFIG_FB_SMSCUFX is not set -# CONFIG_FB_UDL is not set -# CONFIG_FB_IBM_GXT4500 is not set -# CONFIG_FB_VIRTUAL is not set -# CONFIG_FB_METRONOME is not set -# CONFIG_FB_MB862XX is not set -# CONFIG_FB_BROADSHEET is not set -# CONFIG_FB_HYPERV is not set -CONFIG_FB_SIMPLE=y -# CONFIG_FB_SM712 is not set -CONFIG_BACKLIGHT_LCD_SUPPORT=y -CONFIG_LCD_CLASS_DEVICE=m -CONFIG_LCD_L4F00242T03=m -CONFIG_LCD_LMS283GF05=m -CONFIG_LCD_LTV350QV=m -CONFIG_LCD_ILI922X=m -CONFIG_LCD_ILI9320=m -CONFIG_LCD_TDO24M=m -CONFIG_LCD_VGG2432A4=m -CONFIG_LCD_PLATFORM=m -CONFIG_LCD_S6E63M0=m -CONFIG_LCD_LD9040=m -CONFIG_LCD_AMS369FG06=m -CONFIG_LCD_LMS501KF03=m -CONFIG_LCD_HX8357=m -CONFIG_LCD_OTM3225A=m -CONFIG_BACKLIGHT_CLASS_DEVICE=y -CONFIG_BACKLIGHT_GENERIC=m -CONFIG_BACKLIGHT_LM3533=m -CONFIG_BACKLIGHT_PWM=m -CONFIG_BACKLIGHT_DA9052=m -CONFIG_BACKLIGHT_APPLE=m -CONFIG_BACKLIGHT_PM8941_WLED=m -CONFIG_BACKLIGHT_SAHARA=m -CONFIG_BACKLIGHT_WM831X=m -CONFIG_BACKLIGHT_ADP8860=m -CONFIG_BACKLIGHT_ADP8870=m -CONFIG_BACKLIGHT_PCF50633=m -CONFIG_BACKLIGHT_LM3630A=m -CONFIG_BACKLIGHT_LM3639=m -CONFIG_BACKLIGHT_LP855X=m -CONFIG_BACKLIGHT_SKY81452=m -CONFIG_BACKLIGHT_GPIO=m -CONFIG_BACKLIGHT_LV5207LP=m -CONFIG_BACKLIGHT_BD6107=m -CONFIG_BACKLIGHT_ARCXCNN=m -CONFIG_BACKLIGHT_RAVE_SP=m -CONFIG_HDMI=y - -# -# Console display driver support -# -CONFIG_VGA_CONSOLE=y -CONFIG_VGACON_SOFT_SCROLLBACK=y -CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=64 -# CONFIG_VGACON_SOFT_SCROLLBACK_PERSISTENT_ENABLE_BY_DEFAULT is not set -CONFIG_DUMMY_CONSOLE=y -CONFIG_DUMMY_CONSOLE_COLUMNS=80 -CONFIG_DUMMY_CONSOLE_ROWS=25 -CONFIG_FRAMEBUFFER_CONSOLE=y -CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y -CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y -# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set -CONFIG_LOGO=y -CONFIG_LOGO_LINUX_MONO=y -CONFIG_LOGO_LINUX_VGA16=y -CONFIG_LOGO_LINUX_CLUT224=y -CONFIG_SOUND=m -CONFIG_SOUND_OSS_CORE=y -CONFIG_SOUND_OSS_CORE_PRECLAIM=y -CONFIG_SND=m -CONFIG_SND_TIMER=m -CONFIG_SND_PCM=m -CONFIG_SND_PCM_ELD=y -CONFIG_SND_PCM_IEC958=y -CONFIG_SND_DMAENGINE_PCM=m -CONFIG_SND_HWDEP=m -CONFIG_SND_SEQ_DEVICE=m -CONFIG_SND_RAWMIDI=m -CONFIG_SND_COMPRESS_OFFLOAD=m -CONFIG_SND_JACK=y -CONFIG_SND_JACK_INPUT_DEV=y -CONFIG_SND_OSSEMUL=y -CONFIG_SND_MIXER_OSS=m -CONFIG_SND_PCM_OSS=m -CONFIG_SND_PCM_OSS_PLUGINS=y -CONFIG_SND_PCM_TIMER=y -CONFIG_SND_HRTIMER=m -CONFIG_SND_DYNAMIC_MINORS=y -CONFIG_SND_MAX_CARDS=32 -CONFIG_SND_SUPPORT_OLD_API=y -CONFIG_SND_PROC_FS=y -CONFIG_SND_VERBOSE_PROCFS=y -# CONFIG_SND_VERBOSE_PRINTK is not set -# CONFIG_SND_DEBUG is not set -CONFIG_SND_VMASTER=y -CONFIG_SND_DMA_SGBUF=y -CONFIG_SND_SEQUENCER=m -CONFIG_SND_SEQ_DUMMY=m -CONFIG_SND_SEQUENCER_OSS=m -CONFIG_SND_SEQ_HRTIMER_DEFAULT=y -CONFIG_SND_SEQ_MIDI_EVENT=m -CONFIG_SND_SEQ_MIDI=m -CONFIG_SND_SEQ_MIDI_EMUL=m -CONFIG_SND_SEQ_VIRMIDI=m -CONFIG_SND_MPU401_UART=m -CONFIG_SND_OPL3_LIB=m -CONFIG_SND_OPL3_LIB_SEQ=m -CONFIG_SND_VX_LIB=m -CONFIG_SND_AC97_CODEC=m -CONFIG_SND_DRIVERS=y -# CONFIG_SND_PCSP is not set -CONFIG_SND_DUMMY=m -CONFIG_SND_ALOOP=m -CONFIG_SND_VIRMIDI=m -CONFIG_SND_MTPAV=m -CONFIG_SND_MTS64=m -CONFIG_SND_SERIAL_U16550=m -CONFIG_SND_MPU401=m -CONFIG_SND_PORTMAN2X4=m -CONFIG_SND_AC97_POWER_SAVE=y -CONFIG_SND_AC97_POWER_SAVE_DEFAULT=0 -CONFIG_SND_SB_COMMON=m -CONFIG_SND_PCI=y -CONFIG_SND_AD1889=m -CONFIG_SND_ALS300=m -CONFIG_SND_ALS4000=m -CONFIG_SND_ALI5451=m -CONFIG_SND_ASIHPI=m -CONFIG_SND_ATIIXP=m -CONFIG_SND_ATIIXP_MODEM=m -CONFIG_SND_AU8810=m -CONFIG_SND_AU8820=m -CONFIG_SND_AU8830=m -CONFIG_SND_AW2=m -CONFIG_SND_AZT3328=m -CONFIG_SND_BT87X=m -CONFIG_SND_BT87X_OVERCLOCK=y -CONFIG_SND_CA0106=m -CONFIG_SND_CMIPCI=m -CONFIG_SND_OXYGEN_LIB=m -CONFIG_SND_OXYGEN=m -CONFIG_SND_CS4281=m -CONFIG_SND_CS46XX=m -CONFIG_SND_CS46XX_NEW_DSP=y -CONFIG_SND_CTXFI=m -CONFIG_SND_DARLA20=m -CONFIG_SND_GINA20=m -CONFIG_SND_LAYLA20=m -CONFIG_SND_DARLA24=m -CONFIG_SND_GINA24=m -CONFIG_SND_LAYLA24=m -CONFIG_SND_MONA=m -CONFIG_SND_MIA=m -CONFIG_SND_ECHO3G=m -CONFIG_SND_INDIGO=m -CONFIG_SND_INDIGOIO=m -CONFIG_SND_INDIGODJ=m -CONFIG_SND_INDIGOIOX=m -CONFIG_SND_INDIGODJX=m -CONFIG_SND_EMU10K1=m -CONFIG_SND_EMU10K1_SEQ=m -CONFIG_SND_EMU10K1X=m -CONFIG_SND_ENS1370=m -CONFIG_SND_ENS1371=m -CONFIG_SND_ES1938=m -CONFIG_SND_ES1968=m -CONFIG_SND_ES1968_INPUT=y -CONFIG_SND_ES1968_RADIO=y -CONFIG_SND_FM801=m -CONFIG_SND_FM801_TEA575X_BOOL=y -CONFIG_SND_HDSP=m -CONFIG_SND_HDSPM=m -CONFIG_SND_ICE1712=m -CONFIG_SND_ICE1724=m -CONFIG_SND_INTEL8X0=m -CONFIG_SND_INTEL8X0M=m -CONFIG_SND_KORG1212=m -CONFIG_SND_LOLA=m -CONFIG_SND_LX6464ES=m -CONFIG_SND_MAESTRO3=m -CONFIG_SND_MAESTRO3_INPUT=y -CONFIG_SND_MIXART=m -CONFIG_SND_NM256=m -CONFIG_SND_PCXHR=m -CONFIG_SND_RIPTIDE=m -CONFIG_SND_RME32=m -CONFIG_SND_RME96=m -CONFIG_SND_RME9652=m -CONFIG_SND_SONICVIBES=m -CONFIG_SND_TRIDENT=m -CONFIG_SND_VIA82XX=m -CONFIG_SND_VIA82XX_MODEM=m -CONFIG_SND_VIRTUOSO=m -CONFIG_SND_VX222=m -CONFIG_SND_YMFPCI=m - -# -# HD-Audio -# -CONFIG_SND_HDA=m -CONFIG_SND_HDA_INTEL=m -CONFIG_SND_HDA_HWDEP=y -CONFIG_SND_HDA_RECONFIG=y -CONFIG_SND_HDA_INPUT_BEEP=y -CONFIG_SND_HDA_INPUT_BEEP_MODE=1 -CONFIG_SND_HDA_PATCH_LOADER=y -CONFIG_SND_HDA_CODEC_REALTEK=m -CONFIG_SND_HDA_CODEC_ANALOG=m -CONFIG_SND_HDA_CODEC_SIGMATEL=m -CONFIG_SND_HDA_CODEC_VIA=m -CONFIG_SND_HDA_CODEC_HDMI=m -CONFIG_SND_HDA_CODEC_CIRRUS=m -CONFIG_SND_HDA_CODEC_CONEXANT=m -CONFIG_SND_HDA_CODEC_CA0110=m -CONFIG_SND_HDA_CODEC_CA0132=m -CONFIG_SND_HDA_CODEC_CA0132_DSP=y -CONFIG_SND_HDA_CODEC_CMEDIA=m -CONFIG_SND_HDA_CODEC_SI3054=m -CONFIG_SND_HDA_GENERIC=m -CONFIG_SND_HDA_POWER_SAVE_DEFAULT=0 -CONFIG_SND_HDA_CORE=m -CONFIG_SND_HDA_DSP_LOADER=y -CONFIG_SND_HDA_COMPONENT=y -CONFIG_SND_HDA_I915=y -CONFIG_SND_HDA_EXT_CORE=m -CONFIG_SND_HDA_PREALLOC_SIZE=64 -CONFIG_SND_SPI=y -CONFIG_SND_USB=y -CONFIG_SND_USB_AUDIO=m -CONFIG_SND_USB_UA101=m -CONFIG_SND_USB_USX2Y=m -CONFIG_SND_USB_CAIAQ=m -CONFIG_SND_USB_CAIAQ_INPUT=y -CONFIG_SND_USB_US122L=m -CONFIG_SND_USB_6FIRE=m -CONFIG_SND_USB_HIFACE=m -CONFIG_SND_BCD2000=m -CONFIG_SND_USB_LINE6=m -CONFIG_SND_USB_POD=m -CONFIG_SND_USB_PODHD=m -CONFIG_SND_USB_TONEPORT=m -CONFIG_SND_USB_VARIAX=m -CONFIG_SND_FIREWIRE=y -CONFIG_SND_FIREWIRE_LIB=m -CONFIG_SND_DICE=m -CONFIG_SND_OXFW=m -CONFIG_SND_ISIGHT=m -CONFIG_SND_FIREWORKS=m -CONFIG_SND_BEBOB=m -CONFIG_SND_FIREWIRE_DIGI00X=m -CONFIG_SND_FIREWIRE_TASCAM=m -CONFIG_SND_FIREWIRE_MOTU=m -CONFIG_SND_FIREFACE=m -CONFIG_SND_PCMCIA=y -CONFIG_SND_VXPOCKET=m -CONFIG_SND_PDAUDIOCF=m -CONFIG_SND_SOC=m -CONFIG_SND_SOC_GENERIC_DMAENGINE_PCM=y -CONFIG_SND_SOC_COMPRESS=y -CONFIG_SND_SOC_TOPOLOGY=y -CONFIG_SND_SOC_ACPI=m -CONFIG_SND_SOC_AMD_ACP=m -CONFIG_SND_SOC_AMD_CZ_DA7219MX98357_MACH=m -CONFIG_SND_SOC_AMD_CZ_RT5645_MACH=m -CONFIG_SND_ATMEL_SOC=m -CONFIG_SND_DESIGNWARE_I2S=m -CONFIG_SND_DESIGNWARE_PCM=y - -# -# SoC Audio for Freescale CPUs -# - -# -# Common SoC Audio options for Freescale CPUs: -# -CONFIG_SND_SOC_FSL_ASRC=m -CONFIG_SND_SOC_FSL_SAI=m -CONFIG_SND_SOC_FSL_SSI=m -CONFIG_SND_SOC_FSL_SPDIF=m -CONFIG_SND_SOC_FSL_ESAI=m -CONFIG_SND_SOC_IMX_AUDMUX=m -CONFIG_SND_I2S_HI6210_I2S=m -CONFIG_SND_SOC_IMG=y -CONFIG_SND_SOC_IMG_I2S_IN=m -CONFIG_SND_SOC_IMG_I2S_OUT=m -CONFIG_SND_SOC_IMG_PARALLEL_OUT=m -CONFIG_SND_SOC_IMG_SPDIF_IN=m -CONFIG_SND_SOC_IMG_SPDIF_OUT=m -CONFIG_SND_SOC_IMG_PISTACHIO_INTERNAL_DAC=m -CONFIG_SND_SOC_INTEL_SST_TOPLEVEL=y -CONFIG_SND_SST_IPC=m -CONFIG_SND_SST_IPC_PCI=m -CONFIG_SND_SST_IPC_ACPI=m -CONFIG_SND_SOC_INTEL_SST_ACPI=m -CONFIG_SND_SOC_INTEL_SST=m -CONFIG_SND_SOC_INTEL_SST_FIRMWARE=m -CONFIG_SND_SOC_INTEL_HASWELL=m -CONFIG_SND_SST_ATOM_HIFI2_PLATFORM=m -CONFIG_SND_SST_ATOM_HIFI2_PLATFORM_PCI=m -CONFIG_SND_SST_ATOM_HIFI2_PLATFORM_ACPI=m -CONFIG_SND_SOC_INTEL_SKYLAKE_SSP_CLK=m -CONFIG_SND_SOC_INTEL_SKYLAKE=m -CONFIG_SND_SOC_ACPI_INTEL_MATCH=m -CONFIG_SND_SOC_INTEL_MACH=y -CONFIG_SND_SOC_INTEL_HASWELL_MACH=m -CONFIG_SND_SOC_INTEL_BDW_RT5677_MACH=m -CONFIG_SND_SOC_INTEL_BROADWELL_MACH=m -CONFIG_SND_SOC_INTEL_BYTCR_RT5640_MACH=m -CONFIG_SND_SOC_INTEL_BYTCR_RT5651_MACH=m -CONFIG_SND_SOC_INTEL_CHT_BSW_RT5672_MACH=m -CONFIG_SND_SOC_INTEL_CHT_BSW_RT5645_MACH=m -CONFIG_SND_SOC_INTEL_CHT_BSW_MAX98090_TI_MACH=m -CONFIG_SND_SOC_INTEL_CHT_BSW_NAU8824_MACH=m -CONFIG_SND_SOC_INTEL_BYT_CHT_DA7213_MACH=m -CONFIG_SND_SOC_INTEL_BYT_CHT_ES8316_MACH=m -CONFIG_SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH=m -CONFIG_SND_SOC_INTEL_SKL_RT286_MACH=m -CONFIG_SND_SOC_INTEL_SKL_NAU88L25_SSM4567_MACH=m -CONFIG_SND_SOC_INTEL_SKL_NAU88L25_MAX98357A_MACH=m -CONFIG_SND_SOC_INTEL_BXT_DA7219_MAX98357A_MACH=m -CONFIG_SND_SOC_INTEL_BXT_RT298_MACH=m -CONFIG_SND_SOC_INTEL_KBL_RT5663_MAX98927_MACH=m -CONFIG_SND_SOC_INTEL_KBL_RT5663_RT5514_MAX98927_MACH=m -CONFIG_SND_SOC_INTEL_KBL_DA7219_MAX98357A_MACH=m -# CONFIG_SND_SOC_INTEL_GLK_RT5682_MAX98357A_MACH is not set - -# -# STMicroelectronics STM32 SOC audio support -# -CONFIG_SND_SOC_XTFPGA_I2S=m -CONFIG_ZX_TDM=m -CONFIG_SND_SOC_I2C_AND_SPI=m - -# -# CODEC drivers -# -# CONFIG_SND_SOC_AC97_CODEC is not set -CONFIG_SND_SOC_ADAU_UTILS=m -CONFIG_SND_SOC_ADAU1701=m -CONFIG_SND_SOC_ADAU17X1=m -CONFIG_SND_SOC_ADAU1761=m -CONFIG_SND_SOC_ADAU1761_I2C=m -CONFIG_SND_SOC_ADAU1761_SPI=m -CONFIG_SND_SOC_ADAU7002=m -CONFIG_SND_SOC_AK4104=m -CONFIG_SND_SOC_AK4458=m -CONFIG_SND_SOC_AK4554=m -CONFIG_SND_SOC_AK4613=m -CONFIG_SND_SOC_AK4642=m -CONFIG_SND_SOC_AK5386=m -CONFIG_SND_SOC_AK5558=m -CONFIG_SND_SOC_ALC5623=m -CONFIG_SND_SOC_BD28623=m -# CONFIG_SND_SOC_BT_SCO is not set -CONFIG_SND_SOC_CS35L32=m -CONFIG_SND_SOC_CS35L33=m -CONFIG_SND_SOC_CS35L34=m -CONFIG_SND_SOC_CS35L35=m -CONFIG_SND_SOC_CS42L42=m -CONFIG_SND_SOC_CS42L51=m -CONFIG_SND_SOC_CS42L51_I2C=m -CONFIG_SND_SOC_CS42L52=m -CONFIG_SND_SOC_CS42L56=m -CONFIG_SND_SOC_CS42L73=m -CONFIG_SND_SOC_CS4265=m -CONFIG_SND_SOC_CS4270=m -CONFIG_SND_SOC_CS4271=m -CONFIG_SND_SOC_CS4271_I2C=m -CONFIG_SND_SOC_CS4271_SPI=m -CONFIG_SND_SOC_CS42XX8=m -CONFIG_SND_SOC_CS42XX8_I2C=m -CONFIG_SND_SOC_CS43130=m -CONFIG_SND_SOC_CS4349=m -CONFIG_SND_SOC_CS53L30=m -CONFIG_SND_SOC_DA7213=m -CONFIG_SND_SOC_DA7219=m -CONFIG_SND_SOC_DMIC=m -CONFIG_SND_SOC_HDMI_CODEC=m -CONFIG_SND_SOC_ES7134=m -# CONFIG_SND_SOC_ES7241 is not set -CONFIG_SND_SOC_ES8316=m -CONFIG_SND_SOC_ES8328=m -CONFIG_SND_SOC_ES8328_I2C=m -CONFIG_SND_SOC_ES8328_SPI=m -CONFIG_SND_SOC_GTM601=m -CONFIG_SND_SOC_HDAC_HDMI=m -CONFIG_SND_SOC_INNO_RK3036=m -CONFIG_SND_SOC_MAX98090=m -CONFIG_SND_SOC_MAX98357A=m -CONFIG_SND_SOC_MAX98504=m -CONFIG_SND_SOC_MAX9867=m -CONFIG_SND_SOC_MAX98927=m -CONFIG_SND_SOC_MAX98373=m -CONFIG_SND_SOC_MAX9860=m -CONFIG_SND_SOC_MSM8916_WCD_ANALOG=m -CONFIG_SND_SOC_MSM8916_WCD_DIGITAL=m -CONFIG_SND_SOC_PCM1681=m -CONFIG_SND_SOC_PCM1789=m -CONFIG_SND_SOC_PCM1789_I2C=m -CONFIG_SND_SOC_PCM179X=m -CONFIG_SND_SOC_PCM179X_I2C=m -CONFIG_SND_SOC_PCM179X_SPI=m -CONFIG_SND_SOC_PCM186X=m -CONFIG_SND_SOC_PCM186X_I2C=m -CONFIG_SND_SOC_PCM186X_SPI=m -CONFIG_SND_SOC_PCM3168A=m -CONFIG_SND_SOC_PCM3168A_I2C=m -CONFIG_SND_SOC_PCM3168A_SPI=m -CONFIG_SND_SOC_PCM512x=m -CONFIG_SND_SOC_PCM512x_I2C=m -CONFIG_SND_SOC_PCM512x_SPI=m -CONFIG_SND_SOC_RL6231=m -CONFIG_SND_SOC_RL6347A=m -CONFIG_SND_SOC_RT286=m -CONFIG_SND_SOC_RT298=m -CONFIG_SND_SOC_RT5514=m -CONFIG_SND_SOC_RT5514_SPI=m -CONFIG_SND_SOC_RT5616=m -CONFIG_SND_SOC_RT5631=m -CONFIG_SND_SOC_RT5640=m -CONFIG_SND_SOC_RT5645=m -CONFIG_SND_SOC_RT5651=m -CONFIG_SND_SOC_RT5663=m -CONFIG_SND_SOC_RT5670=m -CONFIG_SND_SOC_RT5677=m -CONFIG_SND_SOC_RT5677_SPI=m -CONFIG_SND_SOC_SGTL5000=m -CONFIG_SND_SOC_SI476X=m -CONFIG_SND_SOC_SIGMADSP=m -CONFIG_SND_SOC_SIGMADSP_I2C=m -CONFIG_SND_SOC_SIGMADSP_REGMAP=m -# CONFIG_SND_SOC_SIMPLE_AMPLIFIER is not set -CONFIG_SND_SOC_SIRF_AUDIO_CODEC=m -CONFIG_SND_SOC_SPDIF=m -CONFIG_SND_SOC_SSM2305=m -CONFIG_SND_SOC_SSM2602=m -CONFIG_SND_SOC_SSM2602_SPI=m -CONFIG_SND_SOC_SSM2602_I2C=m -CONFIG_SND_SOC_SSM4567=m -CONFIG_SND_SOC_STA32X=m -CONFIG_SND_SOC_STA350=m -CONFIG_SND_SOC_STI_SAS=m -CONFIG_SND_SOC_TAS2552=m -CONFIG_SND_SOC_TAS5086=m -CONFIG_SND_SOC_TAS571X=m -CONFIG_SND_SOC_TAS5720=m -CONFIG_SND_SOC_TAS6424=m -CONFIG_SND_SOC_TDA7419=m -CONFIG_SND_SOC_TFA9879=m -CONFIG_SND_SOC_TLV320AIC23=m -CONFIG_SND_SOC_TLV320AIC23_I2C=m -CONFIG_SND_SOC_TLV320AIC23_SPI=m -CONFIG_SND_SOC_TLV320AIC31XX=m -CONFIG_SND_SOC_TLV320AIC32X4=m -CONFIG_SND_SOC_TLV320AIC32X4_I2C=m -CONFIG_SND_SOC_TLV320AIC32X4_SPI=m -CONFIG_SND_SOC_TLV320AIC3X=m -CONFIG_SND_SOC_TS3A227E=m -CONFIG_SND_SOC_TSCS42XX=m -CONFIG_SND_SOC_TSCS454=m -CONFIG_SND_SOC_WM8510=m -CONFIG_SND_SOC_WM8523=m -CONFIG_SND_SOC_WM8524=m -CONFIG_SND_SOC_WM8580=m -CONFIG_SND_SOC_WM8711=m -CONFIG_SND_SOC_WM8728=m -CONFIG_SND_SOC_WM8731=m -CONFIG_SND_SOC_WM8737=m -CONFIG_SND_SOC_WM8741=m -CONFIG_SND_SOC_WM8750=m -CONFIG_SND_SOC_WM8753=m -CONFIG_SND_SOC_WM8770=m -CONFIG_SND_SOC_WM8776=m -CONFIG_SND_SOC_WM8782=m -CONFIG_SND_SOC_WM8804=m -CONFIG_SND_SOC_WM8804_I2C=m -CONFIG_SND_SOC_WM8804_SPI=m -CONFIG_SND_SOC_WM8903=m -CONFIG_SND_SOC_WM8960=m -CONFIG_SND_SOC_WM8962=m -CONFIG_SND_SOC_WM8974=m -CONFIG_SND_SOC_WM8978=m -CONFIG_SND_SOC_WM8985=m -CONFIG_SND_SOC_ZX_AUD96P22=m -CONFIG_SND_SOC_MAX9759=m -CONFIG_SND_SOC_MT6351=m -CONFIG_SND_SOC_NAU8540=m -CONFIG_SND_SOC_NAU8810=m -CONFIG_SND_SOC_NAU8824=m -CONFIG_SND_SOC_NAU8825=m -CONFIG_SND_SOC_TPA6130A2=m -CONFIG_SND_SIMPLE_CARD_UTILS=m -CONFIG_SND_SIMPLE_CARD=m -CONFIG_SND_X86=y -CONFIG_HDMI_LPE_AUDIO=m -CONFIG_SND_SYNTH_EMUX=m -CONFIG_AC97_BUS=m - -# -# HID support -# -CONFIG_HID=m -CONFIG_HID_BATTERY_STRENGTH=y -CONFIG_HIDRAW=y -CONFIG_UHID=m -CONFIG_HID_GENERIC=m - -# -# Special HID drivers -# -CONFIG_HID_A4TECH=m -CONFIG_HID_ACCUTOUCH=m -CONFIG_HID_ACRUX=m -CONFIG_HID_ACRUX_FF=y -CONFIG_HID_APPLE=m -CONFIG_HID_APPLEIR=m -CONFIG_HID_ASUS=m -CONFIG_HID_AUREAL=m -CONFIG_HID_BELKIN=m -CONFIG_HID_BETOP_FF=m -CONFIG_HID_CHERRY=m -CONFIG_HID_CHICONY=m -CONFIG_HID_CORSAIR=m -# CONFIG_HID_COUGAR is not set -CONFIG_HID_PRODIKEYS=m -CONFIG_HID_CMEDIA=m -CONFIG_HID_CP2112=m -CONFIG_HID_CYPRESS=m -CONFIG_HID_DRAGONRISE=m -CONFIG_DRAGONRISE_FF=y -CONFIG_HID_EMS_FF=m -CONFIG_HID_ELAN=m -CONFIG_HID_ELECOM=m -CONFIG_HID_ELO=m -CONFIG_HID_EZKEY=m -CONFIG_HID_GEMBIRD=m -CONFIG_HID_GFRM=m -CONFIG_HID_HOLTEK=m -CONFIG_HOLTEK_FF=y -CONFIG_HID_GOOGLE_HAMMER=m -CONFIG_HID_GT683R=m -CONFIG_HID_KEYTOUCH=m -CONFIG_HID_KYE=m -CONFIG_HID_UCLOGIC=m -CONFIG_HID_WALTOP=m -CONFIG_HID_GYRATION=m -CONFIG_HID_ICADE=m -CONFIG_HID_ITE=m -CONFIG_HID_JABRA=m -CONFIG_HID_TWINHAN=m -CONFIG_HID_KENSINGTON=m -CONFIG_HID_LCPOWER=m -CONFIG_HID_LED=m -CONFIG_HID_LENOVO=m -CONFIG_HID_LOGITECH=m -CONFIG_HID_LOGITECH_DJ=m -CONFIG_HID_LOGITECH_HIDPP=m -CONFIG_LOGITECH_FF=y -CONFIG_LOGIRUMBLEPAD2_FF=y -CONFIG_LOGIG940_FF=y -CONFIG_LOGIWHEELS_FF=y -CONFIG_HID_MAGICMOUSE=m -CONFIG_HID_MAYFLASH=m -CONFIG_HID_REDRAGON=m -CONFIG_HID_MICROSOFT=m -CONFIG_HID_MONTEREY=m -CONFIG_HID_MULTITOUCH=m -CONFIG_HID_NTI=m -CONFIG_HID_NTRIG=m -CONFIG_HID_ORTEK=m -CONFIG_HID_PANTHERLORD=m -CONFIG_PANTHERLORD_FF=y -CONFIG_HID_PENMOUNT=m -CONFIG_HID_PETALYNX=m -CONFIG_HID_PICOLCD=m -CONFIG_HID_PICOLCD_FB=y -CONFIG_HID_PICOLCD_BACKLIGHT=y -CONFIG_HID_PICOLCD_LCD=y -CONFIG_HID_PICOLCD_LEDS=y -CONFIG_HID_PICOLCD_CIR=y -CONFIG_HID_PLANTRONICS=m -CONFIG_HID_PRIMAX=m -CONFIG_HID_RETRODE=m -CONFIG_HID_ROCCAT=m -CONFIG_HID_SAITEK=m -CONFIG_HID_SAMSUNG=m -CONFIG_HID_SONY=m -CONFIG_SONY_FF=y -CONFIG_HID_SPEEDLINK=m -CONFIG_HID_STEAM=m -CONFIG_HID_STEELSERIES=m -CONFIG_HID_SUNPLUS=m -CONFIG_HID_RMI=m -CONFIG_HID_GREENASIA=m -CONFIG_GREENASIA_FF=y -CONFIG_HID_HYPERV_MOUSE=m -CONFIG_HID_SMARTJOYPLUS=m -CONFIG_SMARTJOYPLUS_FF=y -CONFIG_HID_TIVO=m -CONFIG_HID_TOPSEED=m -CONFIG_HID_THINGM=m -CONFIG_HID_THRUSTMASTER=m -CONFIG_THRUSTMASTER_FF=y -CONFIG_HID_UDRAW_PS3=m -CONFIG_HID_WACOM=m -CONFIG_HID_WIIMOTE=m -CONFIG_HID_XINMO=m -CONFIG_HID_ZEROPLUS=m -CONFIG_ZEROPLUS_FF=y -CONFIG_HID_ZYDACRON=m -CONFIG_HID_SENSOR_HUB=m -CONFIG_HID_SENSOR_CUSTOM_SENSOR=m -CONFIG_HID_ALPS=m - -# -# USB HID support -# -CONFIG_USB_HID=m -CONFIG_HID_PID=y -CONFIG_USB_HIDDEV=y - -# -# I2C HID support -# -CONFIG_I2C_HID=m - -# -# Intel ISH HID support -# -CONFIG_INTEL_ISH_HID=m -CONFIG_USB_OHCI_LITTLE_ENDIAN=y -CONFIG_USB_SUPPORT=y -CONFIG_USB_COMMON=y -CONFIG_USB_ARCH_HAS_HCD=y -CONFIG_USB=m -CONFIG_USB_PCI=y -CONFIG_USB_ANNOUNCE_NEW_DEVICES=y - -# -# Miscellaneous USB options -# -CONFIG_USB_DEFAULT_PERSIST=y -CONFIG_USB_DYNAMIC_MINORS=y -CONFIG_USB_OTG=y -# CONFIG_USB_OTG_WHITELIST is not set -# CONFIG_USB_OTG_BLACKLIST_HUB is not set -CONFIG_USB_OTG_FSM=m -CONFIG_USB_LEDS_TRIGGER_USBPORT=m -CONFIG_USB_MON=m -CONFIG_USB_WUSB=m -CONFIG_USB_WUSB_CBAF=m -# CONFIG_USB_WUSB_CBAF_DEBUG is not set - -# -# USB Host Controller Drivers -# -CONFIG_USB_C67X00_HCD=m -CONFIG_USB_XHCI_HCD=m -CONFIG_USB_XHCI_DBGCAP=y -CONFIG_USB_XHCI_PCI=m -CONFIG_USB_XHCI_PLATFORM=m -CONFIG_USB_EHCI_HCD=m -CONFIG_USB_EHCI_ROOT_HUB_TT=y -CONFIG_USB_EHCI_TT_NEWSCHED=y -CONFIG_USB_EHCI_PCI=m -CONFIG_USB_EHCI_HCD_PLATFORM=m -CONFIG_USB_OXU210HP_HCD=m -CONFIG_USB_ISP116X_HCD=m -CONFIG_USB_FOTG210_HCD=m -CONFIG_USB_MAX3421_HCD=m -CONFIG_USB_OHCI_HCD=m -CONFIG_USB_OHCI_HCD_PCI=m -CONFIG_USB_OHCI_HCD_SSB=y -CONFIG_USB_OHCI_HCD_PLATFORM=m -CONFIG_USB_UHCI_HCD=m -CONFIG_USB_U132_HCD=m -CONFIG_USB_SL811_HCD=m -# CONFIG_USB_SL811_HCD_ISO is not set -CONFIG_USB_SL811_CS=m -CONFIG_USB_R8A66597_HCD=m -CONFIG_USB_WHCI_HCD=m -CONFIG_USB_HWA_HCD=m -CONFIG_USB_HCD_BCMA=m -CONFIG_USB_HCD_SSB=m -# CONFIG_USB_HCD_TEST_MODE is not set - -# -# USB Device Class drivers -# -CONFIG_USB_ACM=m -CONFIG_USB_PRINTER=m -CONFIG_USB_WDM=m -CONFIG_USB_TMC=m - -# -# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may -# - -# -# also be needed; see USB_STORAGE Help for more info -# -CONFIG_USB_STORAGE=m -# CONFIG_USB_STORAGE_DEBUG is not set -CONFIG_USB_STORAGE_REALTEK=m -CONFIG_REALTEK_AUTOPM=y -CONFIG_USB_STORAGE_DATAFAB=m -CONFIG_USB_STORAGE_FREECOM=m -CONFIG_USB_STORAGE_ISD200=m -CONFIG_USB_STORAGE_USBAT=m -CONFIG_USB_STORAGE_SDDR09=m -CONFIG_USB_STORAGE_SDDR55=m -CONFIG_USB_STORAGE_JUMPSHOT=m -CONFIG_USB_STORAGE_ALAUDA=m -CONFIG_USB_STORAGE_ONETOUCH=m -CONFIG_USB_STORAGE_KARMA=m -CONFIG_USB_STORAGE_CYPRESS_ATACB=m -CONFIG_USB_STORAGE_ENE_UB6250=m -CONFIG_USB_UAS=m - -# -# USB Imaging devices -# -CONFIG_USB_MDC800=m -CONFIG_USB_MICROTEK=m -CONFIG_USBIP_CORE=m -CONFIG_USBIP_VHCI_HCD=m -CONFIG_USBIP_VHCI_HC_PORTS=8 -CONFIG_USBIP_VHCI_NR_HCS=1 -CONFIG_USBIP_HOST=m -CONFIG_USBIP_VUDC=m -# CONFIG_USBIP_DEBUG is not set -CONFIG_USB_MUSB_HDRC=m -# CONFIG_USB_MUSB_HOST is not set -# CONFIG_USB_MUSB_GADGET is not set -CONFIG_USB_MUSB_DUAL_ROLE=y - -# -# Platform Glue Layer -# - -# -# MUSB DMA mode -# -CONFIG_MUSB_PIO_ONLY=y -CONFIG_USB_DWC3=m -# CONFIG_USB_DWC3_ULPI is not set -# CONFIG_USB_DWC3_HOST is not set -# CONFIG_USB_DWC3_GADGET is not set -CONFIG_USB_DWC3_DUAL_ROLE=y - -# -# Platform Glue Driver Support -# -CONFIG_USB_DWC3_PCI=m -CONFIG_USB_DWC3_HAPS=m -CONFIG_USB_DWC2=m -# CONFIG_USB_DWC2_HOST is not set - -# -# Gadget/Dual-role mode requires USB Gadget support to be enabled -# -# CONFIG_USB_DWC2_PERIPHERAL is not set -CONFIG_USB_DWC2_DUAL_ROLE=y -CONFIG_USB_DWC2_PCI=m -# CONFIG_USB_DWC2_DEBUG is not set -# CONFIG_USB_DWC2_TRACK_MISSED_SOFS is not set -CONFIG_USB_CHIPIDEA=m -CONFIG_USB_CHIPIDEA_PCI=m -CONFIG_USB_CHIPIDEA_UDC=y -CONFIG_USB_CHIPIDEA_HOST=y -CONFIG_USB_ISP1760=m -CONFIG_USB_ISP1760_HCD=y -CONFIG_USB_ISP1761_UDC=y -# CONFIG_USB_ISP1760_HOST_ROLE is not set -# CONFIG_USB_ISP1760_GADGET_ROLE is not set -CONFIG_USB_ISP1760_DUAL_ROLE=y - -# -# USB port drivers -# -CONFIG_USB_USS720=m -CONFIG_USB_SERIAL=m -CONFIG_USB_SERIAL_GENERIC=y -CONFIG_USB_SERIAL_SIMPLE=m -CONFIG_USB_SERIAL_AIRCABLE=m -CONFIG_USB_SERIAL_ARK3116=m -CONFIG_USB_SERIAL_BELKIN=m -CONFIG_USB_SERIAL_CH341=m -CONFIG_USB_SERIAL_WHITEHEAT=m -CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m -CONFIG_USB_SERIAL_CP210X=m -CONFIG_USB_SERIAL_CYPRESS_M8=m -CONFIG_USB_SERIAL_EMPEG=m -CONFIG_USB_SERIAL_FTDI_SIO=m -CONFIG_USB_SERIAL_VISOR=m -CONFIG_USB_SERIAL_IPAQ=m -CONFIG_USB_SERIAL_IR=m -CONFIG_USB_SERIAL_EDGEPORT=m -CONFIG_USB_SERIAL_EDGEPORT_TI=m -CONFIG_USB_SERIAL_F81232=m -CONFIG_USB_SERIAL_F8153X=m -CONFIG_USB_SERIAL_GARMIN=m -CONFIG_USB_SERIAL_IPW=m -CONFIG_USB_SERIAL_IUU=m -CONFIG_USB_SERIAL_KEYSPAN_PDA=m -CONFIG_USB_SERIAL_KEYSPAN=m -CONFIG_USB_SERIAL_KLSI=m -CONFIG_USB_SERIAL_KOBIL_SCT=m -CONFIG_USB_SERIAL_MCT_U232=m -CONFIG_USB_SERIAL_METRO=m -CONFIG_USB_SERIAL_MOS7720=m -CONFIG_USB_SERIAL_MOS7715_PARPORT=y -CONFIG_USB_SERIAL_MOS7840=m -CONFIG_USB_SERIAL_MXUPORT=m -CONFIG_USB_SERIAL_NAVMAN=m -CONFIG_USB_SERIAL_PL2303=m -CONFIG_USB_SERIAL_OTI6858=m -CONFIG_USB_SERIAL_QCAUX=m -CONFIG_USB_SERIAL_QUALCOMM=m -CONFIG_USB_SERIAL_SPCP8X5=m -CONFIG_USB_SERIAL_SAFE=m -CONFIG_USB_SERIAL_SAFE_PADDED=y -CONFIG_USB_SERIAL_SIERRAWIRELESS=m -CONFIG_USB_SERIAL_SYMBOL=m -CONFIG_USB_SERIAL_TI=m -CONFIG_USB_SERIAL_CYBERJACK=m -CONFIG_USB_SERIAL_XIRCOM=m -CONFIG_USB_SERIAL_WWAN=m -CONFIG_USB_SERIAL_OPTION=m -CONFIG_USB_SERIAL_OMNINET=m -CONFIG_USB_SERIAL_OPTICON=m -CONFIG_USB_SERIAL_XSENS_MT=m -CONFIG_USB_SERIAL_WISHBONE=m -CONFIG_USB_SERIAL_SSU100=m -CONFIG_USB_SERIAL_QT2=m -# CONFIG_USB_SERIAL_UPD78F0730 is not set -# CONFIG_USB_SERIAL_DEBUG is not set - -# -# USB Miscellaneous drivers -# -CONFIG_USB_EMI62=m -CONFIG_USB_EMI26=m -CONFIG_USB_ADUTUX=m -CONFIG_USB_SEVSEG=m -CONFIG_USB_RIO500=m -CONFIG_USB_LEGOTOWER=m -CONFIG_USB_LCD=m -CONFIG_USB_CYPRESS_CY7C63=m -CONFIG_USB_CYTHERM=m -CONFIG_USB_IDMOUSE=m -CONFIG_USB_FTDI_ELAN=m -CONFIG_USB_APPLEDISPLAY=m -CONFIG_USB_SISUSBVGA=m -CONFIG_USB_SISUSBVGA_CON=y -CONFIG_USB_LD=m -CONFIG_USB_TRANCEVIBRATOR=m -CONFIG_USB_IOWARRIOR=m -CONFIG_USB_TEST=m -CONFIG_USB_EHSET_TEST_FIXTURE=m -CONFIG_USB_ISIGHTFW=m -CONFIG_USB_YUREX=m -CONFIG_USB_EZUSB_FX2=m -CONFIG_USB_HUB_USB251XB=m -CONFIG_USB_HSIC_USB3503=m -CONFIG_USB_HSIC_USB4604=m -CONFIG_USB_LINK_LAYER_TEST=m -CONFIG_USB_CHAOSKEY=m -CONFIG_USB_ATM=m -CONFIG_USB_SPEEDTOUCH=m -CONFIG_USB_CXACRU=m -CONFIG_USB_UEAGLEATM=m -CONFIG_USB_XUSBATM=m - -# -# USB Physical Layer drivers -# -CONFIG_USB_PHY=y -CONFIG_NOP_USB_XCEIV=m -CONFIG_USB_GPIO_VBUS=m -CONFIG_TAHVO_USB=m -CONFIG_TAHVO_USB_HOST_BY_DEFAULT=y -CONFIG_USB_ISP1301=m -CONFIG_USB_GADGET=m -# CONFIG_USB_GADGET_DEBUG is not set -# CONFIG_USB_GADGET_DEBUG_FILES is not set -# CONFIG_USB_GADGET_DEBUG_FS is not set -CONFIG_USB_GADGET_VBUS_DRAW=2 -CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2 -CONFIG_U_SERIAL_CONSOLE=y - -# -# USB Peripheral Controller -# -CONFIG_USB_FOTG210_UDC=m -CONFIG_USB_GR_UDC=m -CONFIG_USB_R8A66597=m -CONFIG_USB_PXA27X=m -CONFIG_USB_MV_UDC=m -CONFIG_USB_MV_U3D=m -CONFIG_USB_SNP_CORE=m -CONFIG_USB_M66592=m -CONFIG_USB_BDC_UDC=m - -# -# Platform Support -# -CONFIG_USB_BDC_PCI=m -CONFIG_USB_AMD5536UDC=m -CONFIG_USB_NET2272=m -CONFIG_USB_NET2272_DMA=y -CONFIG_USB_NET2280=m -CONFIG_USB_GOKU=m -CONFIG_USB_EG20T=m -# CONFIG_USB_DUMMY_HCD is not set -CONFIG_USB_LIBCOMPOSITE=m -CONFIG_USB_F_ACM=m -CONFIG_USB_F_SS_LB=m -CONFIG_USB_U_SERIAL=m -CONFIG_USB_U_ETHER=m -CONFIG_USB_U_AUDIO=m -CONFIG_USB_F_SERIAL=m -CONFIG_USB_F_OBEX=m -CONFIG_USB_F_NCM=m -CONFIG_USB_F_ECM=m -CONFIG_USB_F_PHONET=m -CONFIG_USB_F_EEM=m -CONFIG_USB_F_SUBSET=m -CONFIG_USB_F_RNDIS=m -CONFIG_USB_F_MASS_STORAGE=m -CONFIG_USB_F_FS=m -CONFIG_USB_F_UAC1=m -CONFIG_USB_F_UAC2=m -CONFIG_USB_F_UVC=m -CONFIG_USB_F_MIDI=m -CONFIG_USB_F_HID=m -CONFIG_USB_F_PRINTER=m -CONFIG_USB_F_TCM=m -CONFIG_USB_CONFIGFS=m -CONFIG_USB_CONFIGFS_SERIAL=y -CONFIG_USB_CONFIGFS_ACM=y -CONFIG_USB_CONFIGFS_OBEX=y -CONFIG_USB_CONFIGFS_NCM=y -CONFIG_USB_CONFIGFS_ECM=y -CONFIG_USB_CONFIGFS_ECM_SUBSET=y -CONFIG_USB_CONFIGFS_RNDIS=y -CONFIG_USB_CONFIGFS_EEM=y -CONFIG_USB_CONFIGFS_PHONET=y -CONFIG_USB_CONFIGFS_MASS_STORAGE=y -CONFIG_USB_CONFIGFS_F_LB_SS=y -CONFIG_USB_CONFIGFS_F_FS=y -CONFIG_USB_CONFIGFS_F_UAC1=y -# CONFIG_USB_CONFIGFS_F_UAC1_LEGACY is not set -CONFIG_USB_CONFIGFS_F_UAC2=y -CONFIG_USB_CONFIGFS_F_MIDI=y -CONFIG_USB_CONFIGFS_F_HID=y -CONFIG_USB_CONFIGFS_F_UVC=y -CONFIG_USB_CONFIGFS_F_PRINTER=y -CONFIG_USB_CONFIGFS_F_TCM=y -CONFIG_USB_ZERO=m -CONFIG_USB_ZERO_HNPTEST=y -CONFIG_USB_AUDIO=m -CONFIG_GADGET_UAC1=y -# CONFIG_GADGET_UAC1_LEGACY is not set -CONFIG_USB_ETH=m -CONFIG_USB_ETH_RNDIS=y -CONFIG_USB_ETH_EEM=y -CONFIG_USB_G_NCM=m -CONFIG_USB_GADGETFS=m -CONFIG_USB_FUNCTIONFS=m -CONFIG_USB_FUNCTIONFS_ETH=y -CONFIG_USB_FUNCTIONFS_RNDIS=y -CONFIG_USB_FUNCTIONFS_GENERIC=y -CONFIG_USB_MASS_STORAGE=m -CONFIG_USB_GADGET_TARGET=m -CONFIG_USB_G_SERIAL=m -CONFIG_USB_MIDI_GADGET=m -CONFIG_USB_G_PRINTER=m -CONFIG_USB_CDC_COMPOSITE=m -CONFIG_USB_G_NOKIA=m -CONFIG_USB_G_ACM_MS=m -CONFIG_USB_G_MULTI=m -CONFIG_USB_G_MULTI_RNDIS=y -CONFIG_USB_G_MULTI_CDC=y -CONFIG_USB_G_HID=m -# CONFIG_USB_G_DBGP is not set -CONFIG_USB_G_WEBCAM=m -CONFIG_TYPEC=m -CONFIG_TYPEC_TCPM=m -CONFIG_TYPEC_TCPCI=m -CONFIG_TYPEC_RT1711H=m -CONFIG_TYPEC_FUSB302=m -CONFIG_TYPEC_UCSI=m -CONFIG_UCSI_ACPI=m -CONFIG_TYPEC_TPS6598X=m - -# -# USB Type-C Multiplexer/DeMultiplexer Switch support -# -CONFIG_TYPEC_MUX_PI3USB30532=m - -# -# USB Type-C Alternate Mode drivers -# -# CONFIG_TYPEC_DP_ALTMODE is not set -CONFIG_USB_ROLE_SWITCH=m -CONFIG_USB_ROLES_INTEL_XHCI=m -CONFIG_USB_LED_TRIG=y -CONFIG_USB_ULPI_BUS=m -CONFIG_UWB=m -CONFIG_UWB_HWA=m -CONFIG_UWB_WHCI=m -CONFIG_UWB_I1480U=m -CONFIG_MMC=m -CONFIG_MMC_BLOCK=m -CONFIG_MMC_BLOCK_MINORS=8 -CONFIG_SDIO_UART=m -# CONFIG_MMC_TEST is not set - -# -# MMC/SD/SDIO Host Controller Drivers -# -# CONFIG_MMC_DEBUG is not set -CONFIG_MMC_SDHCI=m -CONFIG_MMC_SDHCI_PCI=m -CONFIG_MMC_RICOH_MMC=y -CONFIG_MMC_SDHCI_ACPI=m -CONFIG_MMC_SDHCI_PLTFM=m -CONFIG_MMC_SDHCI_F_SDH30=m -CONFIG_MMC_WBSD=m -CONFIG_MMC_TIFM_SD=m -CONFIG_MMC_SPI=m -CONFIG_MMC_SDRICOH_CS=m -CONFIG_MMC_CB710=m -CONFIG_MMC_VIA_SDMMC=m -CONFIG_MMC_VUB300=m -CONFIG_MMC_USHC=m -CONFIG_MMC_USDHI6ROL0=m -CONFIG_MMC_REALTEK_PCI=m -CONFIG_MMC_REALTEK_USB=m -CONFIG_MMC_CQHCI=m -CONFIG_MMC_TOSHIBA_PCI=m -CONFIG_MMC_MTK=m -CONFIG_MMC_SDHCI_XENON=m -CONFIG_MEMSTICK=m -# CONFIG_MEMSTICK_DEBUG is not set - -# -# MemoryStick drivers -# -# CONFIG_MEMSTICK_UNSAFE_RESUME is not set -CONFIG_MSPRO_BLOCK=m -CONFIG_MS_BLOCK=m - -# -# MemoryStick Host Controller Drivers -# -CONFIG_MEMSTICK_TIFM_MS=m -CONFIG_MEMSTICK_JMICRON_38X=m -CONFIG_MEMSTICK_R592=m -CONFIG_MEMSTICK_REALTEK_PCI=m -CONFIG_MEMSTICK_REALTEK_USB=m -CONFIG_NEW_LEDS=y -CONFIG_LEDS_CLASS=y -CONFIG_LEDS_CLASS_FLASH=m -# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set - -# -# LED drivers -# -CONFIG_LEDS_APU=m -CONFIG_LEDS_AS3645A=m -CONFIG_LEDS_LM3530=m -CONFIG_LEDS_LM3533=m -CONFIG_LEDS_LM3642=m -CONFIG_LEDS_LM3601X=m -CONFIG_LEDS_MT6323=m -CONFIG_LEDS_PCA9532=m -CONFIG_LEDS_PCA9532_GPIO=y -CONFIG_LEDS_GPIO=m -CONFIG_LEDS_LP3944=m -CONFIG_LEDS_LP3952=m -CONFIG_LEDS_LP55XX_COMMON=m -CONFIG_LEDS_LP5521=m -CONFIG_LEDS_LP5523=m -CONFIG_LEDS_LP5562=m -CONFIG_LEDS_LP8501=m -CONFIG_LEDS_CLEVO_MAIL=m -CONFIG_LEDS_PCA955X=m -# CONFIG_LEDS_PCA955X_GPIO is not set -CONFIG_LEDS_PCA963X=m -CONFIG_LEDS_WM831X_STATUS=m -CONFIG_LEDS_DA9052=m -CONFIG_LEDS_DAC124S085=m -CONFIG_LEDS_PWM=m -CONFIG_LEDS_REGULATOR=m -CONFIG_LEDS_BD2802=m -CONFIG_LEDS_INTEL_SS4200=m -CONFIG_LEDS_LT3593=m -CONFIG_LEDS_MC13783=m -CONFIG_LEDS_TCA6507=m -CONFIG_LEDS_TLC591XX=m -CONFIG_LEDS_LM355x=m -CONFIG_LEDS_MENF21BMC=m - -# -# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM) -# -CONFIG_LEDS_BLINKM=m -CONFIG_LEDS_MLXCPLD=m -CONFIG_LEDS_MLXREG=m -CONFIG_LEDS_USER=m -CONFIG_LEDS_NIC78BX=m - -# -# LED Triggers -# -CONFIG_LEDS_TRIGGERS=y -CONFIG_LEDS_TRIGGER_TIMER=m -CONFIG_LEDS_TRIGGER_ONESHOT=m -CONFIG_LEDS_TRIGGER_DISK=y -# CONFIG_LEDS_TRIGGER_MTD is not set -CONFIG_LEDS_TRIGGER_HEARTBEAT=m -CONFIG_LEDS_TRIGGER_BACKLIGHT=m -CONFIG_LEDS_TRIGGER_CPU=y -CONFIG_LEDS_TRIGGER_ACTIVITY=m -CONFIG_LEDS_TRIGGER_GPIO=m -CONFIG_LEDS_TRIGGER_DEFAULT_ON=m - -# -# iptables trigger is under Netfilter config (LED target) -# -CONFIG_LEDS_TRIGGER_TRANSIENT=m -CONFIG_LEDS_TRIGGER_CAMERA=m -CONFIG_LEDS_TRIGGER_PANIC=y -CONFIG_LEDS_TRIGGER_NETDEV=m -CONFIG_ACCESSIBILITY=y -CONFIG_A11Y_BRAILLE_CONSOLE=y -CONFIG_INFINIBAND=m -CONFIG_INFINIBAND_USER_MAD=m -CONFIG_INFINIBAND_USER_ACCESS=m -# CONFIG_INFINIBAND_EXP_LEGACY_VERBS_NEW_UAPI is not set -CONFIG_INFINIBAND_USER_MEM=y -CONFIG_INFINIBAND_ON_DEMAND_PAGING=y -CONFIG_INFINIBAND_ADDR_TRANS=y -CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y -CONFIG_INFINIBAND_MTHCA=m -CONFIG_INFINIBAND_MTHCA_DEBUG=y -CONFIG_INFINIBAND_QIB=m -CONFIG_INFINIBAND_QIB_DCA=y -CONFIG_INFINIBAND_CXGB3=m -CONFIG_INFINIBAND_CXGB4=m -CONFIG_INFINIBAND_I40IW=m -CONFIG_MLX4_INFINIBAND=m -CONFIG_MLX5_INFINIBAND=m -CONFIG_INFINIBAND_NES=m -# CONFIG_INFINIBAND_NES_DEBUG is not set -CONFIG_INFINIBAND_OCRDMA=m -CONFIG_INFINIBAND_VMWARE_PVRDMA=m -CONFIG_INFINIBAND_USNIC=m -CONFIG_INFINIBAND_IPOIB=m -CONFIG_INFINIBAND_IPOIB_CM=y -CONFIG_INFINIBAND_IPOIB_DEBUG=y -# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set -CONFIG_INFINIBAND_SRP=m -CONFIG_INFINIBAND_SRPT=m -CONFIG_INFINIBAND_ISER=m -CONFIG_INFINIBAND_ISERT=m -CONFIG_INFINIBAND_OPA_VNIC=m -CONFIG_INFINIBAND_RDMAVT=m -CONFIG_RDMA_RXE=m -CONFIG_INFINIBAND_HFI1=m -# CONFIG_HFI1_DEBUG_SDMA_ORDER is not set -# CONFIG_SDMA_VERBOSITY is not set -CONFIG_INFINIBAND_QEDR=m -CONFIG_INFINIBAND_BNXT_RE=m -CONFIG_EDAC_ATOMIC_SCRUB=y -CONFIG_EDAC_SUPPORT=y -CONFIG_EDAC=y -CONFIG_EDAC_LEGACY_SYSFS=y -# CONFIG_EDAC_DEBUG is not set -CONFIG_EDAC_DECODE_MCE=m -CONFIG_EDAC_GHES=y -CONFIG_EDAC_AMD64=m -CONFIG_EDAC_AMD64_ERROR_INJECTION=y -CONFIG_EDAC_E752X=m -CONFIG_EDAC_I82975X=m -CONFIG_EDAC_I3000=m -CONFIG_EDAC_I3200=m -CONFIG_EDAC_IE31200=m -CONFIG_EDAC_X38=m -CONFIG_EDAC_I5400=m -CONFIG_EDAC_I7CORE=m -CONFIG_EDAC_I5000=m -CONFIG_EDAC_I5100=m -CONFIG_EDAC_I7300=m -CONFIG_EDAC_SBRIDGE=m -CONFIG_EDAC_SKX=m -CONFIG_EDAC_PND2=m -CONFIG_RTC_LIB=y -CONFIG_RTC_MC146818_LIB=y -CONFIG_RTC_CLASS=y -CONFIG_RTC_HCTOSYS=y -CONFIG_RTC_HCTOSYS_DEVICE="rtc0" -CONFIG_RTC_SYSTOHC=y -CONFIG_RTC_SYSTOHC_DEVICE="rtc0" -# CONFIG_RTC_DEBUG is not set -CONFIG_RTC_NVMEM=y - -# -# RTC interfaces -# -CONFIG_RTC_INTF_SYSFS=y -CONFIG_RTC_INTF_PROC=y -CONFIG_RTC_INTF_DEV=y -CONFIG_RTC_INTF_DEV_UIE_EMUL=y -# CONFIG_RTC_DRV_TEST is not set - -# -# I2C RTC drivers -# -CONFIG_RTC_DRV_88PM80X=m -CONFIG_RTC_DRV_ABB5ZES3=m -CONFIG_RTC_DRV_ABX80X=m -CONFIG_RTC_DRV_DS1307=m -# CONFIG_RTC_DRV_DS1307_CENTURY is not set -CONFIG_RTC_DRV_DS1374=m -CONFIG_RTC_DRV_DS1374_WDT=y -CONFIG_RTC_DRV_DS1672=m -CONFIG_RTC_DRV_MAX6900=m -CONFIG_RTC_DRV_MAX8907=m -CONFIG_RTC_DRV_RS5C372=m -CONFIG_RTC_DRV_ISL1208=m -CONFIG_RTC_DRV_ISL12022=m -CONFIG_RTC_DRV_X1205=m -CONFIG_RTC_DRV_PCF8523=m -CONFIG_RTC_DRV_PCF85063=m -CONFIG_RTC_DRV_PCF85363=m -CONFIG_RTC_DRV_PCF8563=m -CONFIG_RTC_DRV_PCF8583=m -CONFIG_RTC_DRV_M41T80=m -CONFIG_RTC_DRV_M41T80_WDT=y -CONFIG_RTC_DRV_BQ32K=m -CONFIG_RTC_DRV_S35390A=m -CONFIG_RTC_DRV_FM3130=m -CONFIG_RTC_DRV_RX8010=m -CONFIG_RTC_DRV_RX8581=m -CONFIG_RTC_DRV_RX8025=m -CONFIG_RTC_DRV_EM3027=m -CONFIG_RTC_DRV_RV8803=m - -# -# SPI RTC drivers -# -CONFIG_RTC_DRV_M41T93=m -CONFIG_RTC_DRV_M41T94=m -CONFIG_RTC_DRV_DS1302=m -CONFIG_RTC_DRV_DS1305=m -CONFIG_RTC_DRV_DS1343=m -CONFIG_RTC_DRV_DS1347=m -CONFIG_RTC_DRV_DS1390=m -CONFIG_RTC_DRV_MAX6916=m -CONFIG_RTC_DRV_R9701=m -CONFIG_RTC_DRV_RX4581=m -CONFIG_RTC_DRV_RX6110=m -CONFIG_RTC_DRV_RS5C348=m -CONFIG_RTC_DRV_MAX6902=m -CONFIG_RTC_DRV_PCF2123=m -CONFIG_RTC_DRV_MCP795=m -CONFIG_RTC_I2C_AND_SPI=m - -# -# SPI and I2C RTC drivers -# -CONFIG_RTC_DRV_DS3232=m -CONFIG_RTC_DRV_DS3232_HWMON=y -CONFIG_RTC_DRV_PCF2127=m -CONFIG_RTC_DRV_RV3029C2=m -CONFIG_RTC_DRV_RV3029_HWMON=y - -# -# Platform RTC drivers -# -CONFIG_RTC_DRV_CMOS=y -CONFIG_RTC_DRV_DS1286=m -CONFIG_RTC_DRV_DS1511=m -CONFIG_RTC_DRV_DS1553=m -CONFIG_RTC_DRV_DS1685_FAMILY=m -CONFIG_RTC_DRV_DS1685=y -# CONFIG_RTC_DRV_DS1689 is not set -# CONFIG_RTC_DRV_DS17285 is not set -# CONFIG_RTC_DRV_DS17485 is not set -# CONFIG_RTC_DRV_DS17885 is not set -CONFIG_RTC_DS1685_PROC_REGS=y -CONFIG_RTC_DRV_DS1742=m -CONFIG_RTC_DRV_DS2404=m -CONFIG_RTC_DRV_DA9052=m -CONFIG_RTC_DRV_DA9063=m -CONFIG_RTC_DRV_STK17TA8=m -CONFIG_RTC_DRV_M48T86=m -CONFIG_RTC_DRV_M48T35=m -CONFIG_RTC_DRV_M48T59=m -CONFIG_RTC_DRV_MSM6242=m -CONFIG_RTC_DRV_BQ4802=m -CONFIG_RTC_DRV_RP5C01=m -CONFIG_RTC_DRV_V3020=m -CONFIG_RTC_DRV_WM831X=m -CONFIG_RTC_DRV_PCF50633=m -# CONFIG_RTC_DRV_CROS_EC is not set - -# -# on-CPU RTC drivers -# -CONFIG_RTC_DRV_FTRTC010=m -CONFIG_RTC_DRV_PCAP=m -CONFIG_RTC_DRV_MC13XXX=m -CONFIG_RTC_DRV_MT6397=m - -# -# HID Sensor RTC drivers -# -CONFIG_RTC_DRV_HID_SENSOR_TIME=m -CONFIG_DMADEVICES=y -# CONFIG_DMADEVICES_DEBUG is not set - -# -# DMA Devices -# -CONFIG_DMA_ENGINE=y -CONFIG_DMA_VIRTUAL_CHANNELS=y -CONFIG_DMA_ACPI=y -CONFIG_ALTERA_MSGDMA=m -CONFIG_INTEL_IDMA64=m -CONFIG_INTEL_IOATDMA=m -CONFIG_INTEL_MIC_X100_DMA=m -CONFIG_QCOM_HIDMA_MGMT=m -CONFIG_QCOM_HIDMA=m -CONFIG_DW_DMAC_CORE=y -CONFIG_DW_DMAC=m -CONFIG_DW_DMAC_PCI=y -CONFIG_HSU_DMA=y - -# -# DMA Clients -# -CONFIG_ASYNC_TX_DMA=y -# CONFIG_DMATEST is not set -CONFIG_DMA_ENGINE_RAID=y - -# -# DMABUF options -# -CONFIG_SYNC_FILE=y -# CONFIG_SW_SYNC is not set -CONFIG_DCA=m -CONFIG_AUXDISPLAY=y -CONFIG_HD44780=m -CONFIG_KS0108=m -CONFIG_KS0108_PORT=0x378 -CONFIG_KS0108_DELAY=2 -CONFIG_CFAG12864B=m -CONFIG_CFAG12864B_RATE=20 -CONFIG_IMG_ASCII_LCD=m -CONFIG_PANEL=m -CONFIG_PANEL_PARPORT=0 -CONFIG_PANEL_PROFILE=5 -# CONFIG_PANEL_CHANGE_MESSAGE is not set -CONFIG_CHARLCD=m -CONFIG_UIO=m -CONFIG_UIO_CIF=m -CONFIG_UIO_PDRV_GENIRQ=m -CONFIG_UIO_DMEM_GENIRQ=m -CONFIG_UIO_AEC=m -CONFIG_UIO_SERCOS3=m -CONFIG_UIO_PCI_GENERIC=m -CONFIG_UIO_NETX=m -CONFIG_UIO_PRUSS=m -CONFIG_UIO_MF624=m -CONFIG_UIO_HV_GENERIC=m -CONFIG_VFIO_IOMMU_TYPE1=m -CONFIG_VFIO_VIRQFD=m -CONFIG_VFIO=m -# CONFIG_VFIO_NOIOMMU is not set -CONFIG_VFIO_PCI=m -CONFIG_VFIO_PCI_VGA=y -CONFIG_VFIO_PCI_MMAP=y -CONFIG_VFIO_PCI_INTX=y -CONFIG_VFIO_PCI_IGD=y -CONFIG_VFIO_MDEV=m -CONFIG_VFIO_MDEV_DEVICE=m -CONFIG_IRQ_BYPASS_MANAGER=m -CONFIG_VIRT_DRIVERS=y -# CONFIG_VBOXGUEST is not set -CONFIG_VIRTIO=m -CONFIG_VIRTIO_MENU=y -CONFIG_VIRTIO_PCI=m -CONFIG_VIRTIO_PCI_LEGACY=y -CONFIG_VIRTIO_BALLOON=m -CONFIG_VIRTIO_INPUT=m -CONFIG_VIRTIO_MMIO=m -CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y - -# -# Microsoft Hyper-V guest support -# -CONFIG_HYPERV=m -CONFIG_HYPERV_TSCPAGE=y -CONFIG_HYPERV_UTILS=m -CONFIG_HYPERV_BALLOON=m -CONFIG_STAGING=y -CONFIG_PRISM2_USB=m -CONFIG_COMEDI=m -# CONFIG_COMEDI_DEBUG is not set -CONFIG_COMEDI_DEFAULT_BUF_SIZE_KB=2048 -CONFIG_COMEDI_DEFAULT_BUF_MAXSIZE_KB=20480 -CONFIG_COMEDI_MISC_DRIVERS=y -CONFIG_COMEDI_BOND=m -CONFIG_COMEDI_TEST=m -CONFIG_COMEDI_PARPORT=m -CONFIG_COMEDI_ISA_DRIVERS=y -CONFIG_COMEDI_PCL711=m -CONFIG_COMEDI_PCL724=m -CONFIG_COMEDI_PCL726=m -CONFIG_COMEDI_PCL730=m -CONFIG_COMEDI_PCL812=m -CONFIG_COMEDI_PCL816=m -CONFIG_COMEDI_PCL818=m -CONFIG_COMEDI_PCM3724=m -CONFIG_COMEDI_AMPLC_DIO200_ISA=m -CONFIG_COMEDI_AMPLC_PC236_ISA=m -CONFIG_COMEDI_AMPLC_PC263_ISA=m -CONFIG_COMEDI_RTI800=m -CONFIG_COMEDI_RTI802=m -CONFIG_COMEDI_DAC02=m -CONFIG_COMEDI_DAS16M1=m -CONFIG_COMEDI_DAS08_ISA=m -CONFIG_COMEDI_DAS16=m -CONFIG_COMEDI_DAS800=m -CONFIG_COMEDI_DAS1800=m -CONFIG_COMEDI_DAS6402=m -CONFIG_COMEDI_DT2801=m -CONFIG_COMEDI_DT2811=m -CONFIG_COMEDI_DT2814=m -CONFIG_COMEDI_DT2815=m -CONFIG_COMEDI_DT2817=m -CONFIG_COMEDI_DT282X=m -CONFIG_COMEDI_DMM32AT=m -CONFIG_COMEDI_FL512=m -CONFIG_COMEDI_AIO_AIO12_8=m -CONFIG_COMEDI_AIO_IIRO_16=m -CONFIG_COMEDI_II_PCI20KC=m -CONFIG_COMEDI_C6XDIGIO=m -CONFIG_COMEDI_MPC624=m -CONFIG_COMEDI_ADQ12B=m -CONFIG_COMEDI_NI_AT_A2150=m -CONFIG_COMEDI_NI_AT_AO=m -CONFIG_COMEDI_NI_ATMIO=m -CONFIG_COMEDI_NI_ATMIO16D=m -CONFIG_COMEDI_NI_LABPC_ISA=m -CONFIG_COMEDI_PCMAD=m -CONFIG_COMEDI_PCMDA12=m -CONFIG_COMEDI_PCMMIO=m -CONFIG_COMEDI_PCMUIO=m -CONFIG_COMEDI_MULTIQ3=m -CONFIG_COMEDI_S526=m -CONFIG_COMEDI_PCI_DRIVERS=m -CONFIG_COMEDI_8255_PCI=m -CONFIG_COMEDI_ADDI_WATCHDOG=m -CONFIG_COMEDI_ADDI_APCI_1032=m -CONFIG_COMEDI_ADDI_APCI_1500=m -CONFIG_COMEDI_ADDI_APCI_1516=m -CONFIG_COMEDI_ADDI_APCI_1564=m -CONFIG_COMEDI_ADDI_APCI_16XX=m -CONFIG_COMEDI_ADDI_APCI_2032=m -CONFIG_COMEDI_ADDI_APCI_2200=m -CONFIG_COMEDI_ADDI_APCI_3120=m -CONFIG_COMEDI_ADDI_APCI_3501=m -CONFIG_COMEDI_ADDI_APCI_3XXX=m -CONFIG_COMEDI_ADL_PCI6208=m -CONFIG_COMEDI_ADL_PCI7X3X=m -CONFIG_COMEDI_ADL_PCI8164=m -CONFIG_COMEDI_ADL_PCI9111=m -CONFIG_COMEDI_ADL_PCI9118=m -CONFIG_COMEDI_ADV_PCI1710=m -CONFIG_COMEDI_ADV_PCI1720=m -CONFIG_COMEDI_ADV_PCI1723=m -CONFIG_COMEDI_ADV_PCI1724=m -CONFIG_COMEDI_ADV_PCI1760=m -CONFIG_COMEDI_ADV_PCI_DIO=m -CONFIG_COMEDI_AMPLC_DIO200_PCI=m -CONFIG_COMEDI_AMPLC_PC236_PCI=m -CONFIG_COMEDI_AMPLC_PC263_PCI=m -CONFIG_COMEDI_AMPLC_PCI224=m -CONFIG_COMEDI_AMPLC_PCI230=m -CONFIG_COMEDI_CONTEC_PCI_DIO=m -CONFIG_COMEDI_DAS08_PCI=m -CONFIG_COMEDI_DT3000=m -CONFIG_COMEDI_DYNA_PCI10XX=m -CONFIG_COMEDI_GSC_HPDI=m -CONFIG_COMEDI_MF6X4=m -CONFIG_COMEDI_ICP_MULTI=m -CONFIG_COMEDI_DAQBOARD2000=m -CONFIG_COMEDI_JR3_PCI=m -CONFIG_COMEDI_KE_COUNTER=m -CONFIG_COMEDI_CB_PCIDAS64=m -CONFIG_COMEDI_CB_PCIDAS=m -CONFIG_COMEDI_CB_PCIDDA=m -CONFIG_COMEDI_CB_PCIMDAS=m -CONFIG_COMEDI_CB_PCIMDDA=m -CONFIG_COMEDI_ME4000=m -CONFIG_COMEDI_ME_DAQ=m -CONFIG_COMEDI_NI_6527=m -CONFIG_COMEDI_NI_65XX=m -CONFIG_COMEDI_NI_660X=m -CONFIG_COMEDI_NI_670X=m -CONFIG_COMEDI_NI_LABPC_PCI=m -CONFIG_COMEDI_NI_PCIDIO=m -CONFIG_COMEDI_NI_PCIMIO=m -CONFIG_COMEDI_RTD520=m -CONFIG_COMEDI_S626=m -CONFIG_COMEDI_MITE=m -CONFIG_COMEDI_NI_TIOCMD=m -CONFIG_COMEDI_PCMCIA_DRIVERS=m -CONFIG_COMEDI_CB_DAS16_CS=m -CONFIG_COMEDI_DAS08_CS=m -CONFIG_COMEDI_NI_DAQ_700_CS=m -CONFIG_COMEDI_NI_DAQ_DIO24_CS=m -CONFIG_COMEDI_NI_LABPC_CS=m -CONFIG_COMEDI_NI_MIO_CS=m -CONFIG_COMEDI_QUATECH_DAQP_CS=m -CONFIG_COMEDI_USB_DRIVERS=m -CONFIG_COMEDI_DT9812=m -CONFIG_COMEDI_NI_USB6501=m -CONFIG_COMEDI_USBDUX=m -CONFIG_COMEDI_USBDUXFAST=m -CONFIG_COMEDI_USBDUXSIGMA=m -CONFIG_COMEDI_VMK80XX=m -CONFIG_COMEDI_8254=m -CONFIG_COMEDI_8255=m -CONFIG_COMEDI_8255_SA=m -CONFIG_COMEDI_KCOMEDILIB=m -CONFIG_COMEDI_AMPLC_DIO200=m -CONFIG_COMEDI_AMPLC_PC236=m -CONFIG_COMEDI_DAS08=m -CONFIG_COMEDI_ISADMA=m -CONFIG_COMEDI_NI_LABPC=m -CONFIG_COMEDI_NI_LABPC_ISADMA=m -CONFIG_COMEDI_NI_TIO=m -CONFIG_RTL8192U=m -CONFIG_RTLLIB=m -CONFIG_RTLLIB_CRYPTO_CCMP=m -CONFIG_RTLLIB_CRYPTO_TKIP=m -CONFIG_RTLLIB_CRYPTO_WEP=m -CONFIG_RTL8192E=m -CONFIG_RTL8723BS=m -CONFIG_R8712U=m -CONFIG_R8188EU=m -CONFIG_88EU_AP_MODE=y -CONFIG_R8822BE=m -CONFIG_RTLWIFI_DEBUG_ST=y -CONFIG_RTS5208=m -CONFIG_VT6655=m -CONFIG_VT6656=m - -# -# IIO staging drivers -# - -# -# Accelerometers -# -CONFIG_ADIS16203=m -CONFIG_ADIS16240=m - -# -# Analog to digital converters -# -CONFIG_AD7606=m -CONFIG_AD7606_IFACE_PARALLEL=m -CONFIG_AD7606_IFACE_SPI=m -CONFIG_AD7780=m -CONFIG_AD7816=m -CONFIG_AD7192=m -CONFIG_AD7280=m - -# -# Analog digital bi-direction converters -# -CONFIG_ADT7316=m -CONFIG_ADT7316_SPI=m -CONFIG_ADT7316_I2C=m - -# -# Capacitance to digital converters -# -CONFIG_AD7150=m -CONFIG_AD7152=m -CONFIG_AD7746=m - -# -# Direct Digital Synthesis -# -CONFIG_AD9832=m -CONFIG_AD9834=m - -# -# Network Analyzer, Impedance Converters -# -CONFIG_AD5933=m - -# -# Active energy metering IC -# -CONFIG_ADE7854=m -CONFIG_ADE7854_I2C=m -CONFIG_ADE7854_SPI=m - -# -# Resolver to digital converters -# -CONFIG_AD2S90=m -CONFIG_AD2S1210=m -CONFIG_FB_SM750=m -CONFIG_FB_XGI=m - -# -# Speakup console speech -# -CONFIG_SPEAKUP=m -CONFIG_SPEAKUP_SYNTH_ACNTSA=m -CONFIG_SPEAKUP_SYNTH_APOLLO=m -CONFIG_SPEAKUP_SYNTH_AUDPTR=m -CONFIG_SPEAKUP_SYNTH_BNS=m -CONFIG_SPEAKUP_SYNTH_DECTLK=m -CONFIG_SPEAKUP_SYNTH_DECEXT=m -CONFIG_SPEAKUP_SYNTH_LTLK=m -CONFIG_SPEAKUP_SYNTH_SOFT=m -CONFIG_SPEAKUP_SYNTH_SPKOUT=m -CONFIG_SPEAKUP_SYNTH_TXPRT=m -# CONFIG_SPEAKUP_SYNTH_DUMMY is not set -CONFIG_STAGING_MEDIA=y -CONFIG_I2C_BCM2048=m -CONFIG_SOC_CAMERA_IMX074=m -CONFIG_SOC_CAMERA_MT9T031=m -CONFIG_VIDEO_ZORAN=m -CONFIG_VIDEO_ZORAN_DC30=m -CONFIG_VIDEO_ZORAN_ZR36060=m -CONFIG_VIDEO_ZORAN_BUZ=m -CONFIG_VIDEO_ZORAN_DC10=m -CONFIG_VIDEO_ZORAN_LML33=m -CONFIG_VIDEO_ZORAN_LML33R10=m -CONFIG_VIDEO_ZORAN_AVS6EYES=m - -# -# Android -# -CONFIG_LTE_GDM724X=m -CONFIG_FIREWIRE_SERIAL=m -CONFIG_FWTTY_MAX_TOTAL_PORTS=64 -CONFIG_FWTTY_MAX_CARD_PORTS=32 -CONFIG_MTD_SPINAND_MT29F=m -CONFIG_MTD_SPINAND_ONDIEECC=y -CONFIG_DGNC=m -CONFIG_GS_FPGABOOT=m -CONFIG_UNISYSSPAR=y -CONFIG_FB_TFT=m -CONFIG_FB_TFT_AGM1264K_FL=m -CONFIG_FB_TFT_BD663474=m -CONFIG_FB_TFT_HX8340BN=m -CONFIG_FB_TFT_HX8347D=m -CONFIG_FB_TFT_HX8353D=m -# CONFIG_FB_TFT_HX8357D is not set -# CONFIG_FB_TFT_ILI9163 is not set -CONFIG_FB_TFT_ILI9320=m -CONFIG_FB_TFT_ILI9325=m -CONFIG_FB_TFT_ILI9340=m -CONFIG_FB_TFT_ILI9341=m -CONFIG_FB_TFT_ILI9481=m -CONFIG_FB_TFT_ILI9486=m -CONFIG_FB_TFT_PCD8544=m -CONFIG_FB_TFT_RA8875=m -CONFIG_FB_TFT_S6D02A1=m -CONFIG_FB_TFT_S6D1121=m -CONFIG_FB_TFT_SH1106=m -CONFIG_FB_TFT_SSD1289=m -CONFIG_FB_TFT_SSD1305=m -CONFIG_FB_TFT_SSD1306=m -CONFIG_FB_TFT_SSD1331=m -CONFIG_FB_TFT_SSD1351=m -CONFIG_FB_TFT_ST7735R=m -CONFIG_FB_TFT_ST7789V=m -CONFIG_FB_TFT_TINYLCD=m -CONFIG_FB_TFT_TLS8204=m -CONFIG_FB_TFT_UC1611=m -CONFIG_FB_TFT_UC1701=m -CONFIG_FB_TFT_UPD161704=m -CONFIG_FB_TFT_WATTEROTT=m -CONFIG_FB_FLEX=m -CONFIG_FB_TFT_FBTFT_DEVICE=m -CONFIG_WILC1000=m -CONFIG_WILC1000_SDIO=m -CONFIG_WILC1000_SPI=m -# CONFIG_WILC1000_HW_OOB_INTR is not set -CONFIG_MOST=m -CONFIG_MOST_CDEV=m -CONFIG_MOST_NET=m -CONFIG_MOST_SOUND=m -CONFIG_MOST_VIDEO=m -CONFIG_MOST_I2C=m -CONFIG_MOST_USB=m -CONFIG_KS7010=m -CONFIG_GREYBUS=m -CONFIG_GREYBUS_ES2=m -CONFIG_GREYBUS_AUDIO=m -CONFIG_GREYBUS_BOOTROM=m -CONFIG_GREYBUS_FIRMWARE=m -CONFIG_GREYBUS_HID=m -CONFIG_GREYBUS_LIGHT=m -CONFIG_GREYBUS_LOG=m -CONFIG_GREYBUS_LOOPBACK=m -CONFIG_GREYBUS_POWER=m -CONFIG_GREYBUS_RAW=m -CONFIG_GREYBUS_VIBRATOR=m -CONFIG_GREYBUS_BRIDGED_PHY=m -CONFIG_GREYBUS_GPIO=m -CONFIG_GREYBUS_I2C=m -CONFIG_GREYBUS_PWM=m -CONFIG_GREYBUS_SDIO=m -CONFIG_GREYBUS_SPI=m -CONFIG_GREYBUS_UART=m -CONFIG_GREYBUS_USB=m -# CONFIG_DRM_VBOXVIDEO is not set -CONFIG_PI433=m -CONFIG_MTK_MMC=m -# CONFIG_MTK_AEE_KDUMP is not set -# CONFIG_MTK_MMC_CD_POLL is not set - -# -# Gasket devices -# -# CONFIG_STAGING_GASKET_FRAMEWORK is not set -# CONFIG_XIL_AXIS_FIFO is not set -# CONFIG_EROFS_FS is not set -CONFIG_X86_PLATFORM_DEVICES=y -CONFIG_ACER_WMI=m -CONFIG_ACER_WIRELESS=m -CONFIG_ACERHDF=m -CONFIG_ALIENWARE_WMI=m -CONFIG_ASUS_LAPTOP=m -CONFIG_DELL_SMBIOS=m -CONFIG_DELL_SMBIOS_WMI=y -CONFIG_DELL_SMBIOS_SMM=y -CONFIG_DELL_LAPTOP=m -CONFIG_DELL_WMI=m -CONFIG_DELL_WMI_DESCRIPTOR=m -CONFIG_DELL_WMI_AIO=m -CONFIG_DELL_WMI_LED=m -CONFIG_DELL_SMO8800=m -CONFIG_DELL_RBTN=m -CONFIG_FUJITSU_LAPTOP=m -CONFIG_FUJITSU_TABLET=m -CONFIG_AMILO_RFKILL=m -CONFIG_GPD_POCKET_FAN=m -CONFIG_HP_ACCEL=m -CONFIG_HP_WIRELESS=m -CONFIG_HP_WMI=m -CONFIG_MSI_LAPTOP=m -CONFIG_PANASONIC_LAPTOP=m -CONFIG_COMPAL_LAPTOP=m -CONFIG_SONY_LAPTOP=m -CONFIG_SONYPI_COMPAT=y -CONFIG_IDEAPAD_LAPTOP=m -CONFIG_SURFACE3_WMI=m -CONFIG_THINKPAD_ACPI=m -CONFIG_THINKPAD_ACPI_ALSA_SUPPORT=y -# CONFIG_THINKPAD_ACPI_DEBUGFACILITIES is not set -# CONFIG_THINKPAD_ACPI_DEBUG is not set -# CONFIG_THINKPAD_ACPI_UNSAFE_LEDS is not set -CONFIG_THINKPAD_ACPI_VIDEO=y -CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y -CONFIG_SENSORS_HDAPS=m -CONFIG_INTEL_MENLOW=m -CONFIG_EEEPC_LAPTOP=m -CONFIG_ASUS_WMI=m -CONFIG_ASUS_NB_WMI=m -CONFIG_EEEPC_WMI=m -CONFIG_ASUS_WIRELESS=m -CONFIG_ACPI_WMI=m -CONFIG_WMI_BMOF=m -CONFIG_INTEL_WMI_THUNDERBOLT=m -CONFIG_MSI_WMI=m -CONFIG_PEAQ_WMI=m -CONFIG_TOPSTAR_LAPTOP=m -CONFIG_ACPI_TOSHIBA=m -CONFIG_TOSHIBA_BT_RFKILL=m -CONFIG_TOSHIBA_HAPS=m -CONFIG_TOSHIBA_WMI=m -CONFIG_ACPI_CMPC=m -CONFIG_INTEL_CHT_INT33FE=m -CONFIG_INTEL_INT0002_VGPIO=m -CONFIG_INTEL_HID_EVENT=m -CONFIG_INTEL_VBTN=m -CONFIG_INTEL_IPS=m -CONFIG_INTEL_PMC_CORE=y -CONFIG_IBM_RTL=m -CONFIG_SAMSUNG_LAPTOP=m -CONFIG_MXM_WMI=m -CONFIG_INTEL_OAKTRAIL=m -CONFIG_SAMSUNG_Q10=m -CONFIG_APPLE_GMUX=m -CONFIG_INTEL_RST=m -CONFIG_INTEL_SMARTCONNECT=m -CONFIG_PVPANIC=m -CONFIG_INTEL_PMC_IPC=m -CONFIG_INTEL_BXTWC_PMIC_TMU=m -CONFIG_SURFACE_PRO3_BUTTON=m -CONFIG_SURFACE_3_BUTTON=m -CONFIG_INTEL_PUNIT_IPC=m -CONFIG_INTEL_TELEMETRY=m -CONFIG_MLX_PLATFORM=m -# CONFIG_INTEL_TURBO_MAX_3 is not set -CONFIG_INTEL_CHTDC_TI_PWRBTN=m -# CONFIG_I2C_MULTI_INSTANTIATE is not set -CONFIG_PMC_ATOM=y -CONFIG_CHROME_PLATFORMS=y -CONFIG_CHROMEOS_LAPTOP=m -CONFIG_CHROMEOS_PSTORE=m -CONFIG_CHROMEOS_TBMC=m -# CONFIG_CROS_EC_I2C is not set -# CONFIG_CROS_EC_SPI is not set -CONFIG_CROS_EC_LPC=m -CONFIG_CROS_EC_LPC_MEC=y -CONFIG_CROS_EC_PROTO=y -CONFIG_CROS_KBD_LED_BACKLIGHT=m -CONFIG_MELLANOX_PLATFORM=y -CONFIG_MLXREG_HOTPLUG=m -# CONFIG_MLXREG_IO is not set -CONFIG_CLKDEV_LOOKUP=y -CONFIG_HAVE_CLK_PREPARE=y -CONFIG_COMMON_CLK=y - -# -# Common Clock Framework -# -CONFIG_COMMON_CLK_WM831X=m -# CONFIG_COMMON_CLK_MAX9485 is not set -CONFIG_COMMON_CLK_SI5351=m -CONFIG_COMMON_CLK_SI544=m -CONFIG_COMMON_CLK_CDCE706=m -CONFIG_COMMON_CLK_CS2000_CP=m -CONFIG_COMMON_CLK_PWM=m -CONFIG_HWSPINLOCK=y - -# -# Clock Source drivers -# -CONFIG_CLKEVT_I8253=y -CONFIG_I8253_LOCK=y -CONFIG_CLKBLD_I8253=y -CONFIG_MAILBOX=y -CONFIG_PCC=y -CONFIG_ALTERA_MBOX=m -CONFIG_IOMMU_API=y -CONFIG_IOMMU_SUPPORT=y - -# -# Generic IOMMU Pagetable Support -# -# CONFIG_IOMMU_DEBUGFS is not set -# CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set -CONFIG_IOMMU_IOVA=y -CONFIG_AMD_IOMMU=y -CONFIG_AMD_IOMMU_V2=m -CONFIG_DMAR_TABLE=y -CONFIG_INTEL_IOMMU=y -CONFIG_INTEL_IOMMU_SVM=y -# CONFIG_INTEL_IOMMU_DEFAULT_ON is not set -CONFIG_INTEL_IOMMU_FLOPPY_WA=y -CONFIG_IRQ_REMAP=y - -# -# Remoteproc drivers -# -CONFIG_REMOTEPROC=m - -# -# Rpmsg drivers -# -CONFIG_RPMSG=m -CONFIG_RPMSG_CHAR=m -CONFIG_RPMSG_QCOM_GLINK_NATIVE=m -CONFIG_RPMSG_QCOM_GLINK_RPM=m -CONFIG_RPMSG_VIRTIO=m -CONFIG_SOUNDWIRE=y - -# -# SoundWire Devices -# -CONFIG_SOUNDWIRE_BUS=m -CONFIG_SOUNDWIRE_CADENCE=m -CONFIG_SOUNDWIRE_INTEL=m - -# -# SOC (System On Chip) specific Drivers -# - -# -# Amlogic SoC drivers -# - -# -# Broadcom SoC drivers -# - -# -# NXP/Freescale QorIQ SoC drivers -# - -# -# i.MX SoC drivers -# - -# -# Qualcomm SoC drivers -# -CONFIG_SOC_TI=y - -# -# Xilinx SoC drivers -# -CONFIG_XILINX_VCU=m -CONFIG_PM_DEVFREQ=y - -# -# DEVFREQ Governors -# -CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y -CONFIG_DEVFREQ_GOV_PERFORMANCE=y -CONFIG_DEVFREQ_GOV_POWERSAVE=y -CONFIG_DEVFREQ_GOV_USERSPACE=y -CONFIG_DEVFREQ_GOV_PASSIVE=m - -# -# DEVFREQ Drivers -# -CONFIG_PM_DEVFREQ_EVENT=y -CONFIG_EXTCON=y - -# -# Extcon Device Drivers -# -CONFIG_EXTCON_ADC_JACK=m -CONFIG_EXTCON_ARIZONA=m -CONFIG_EXTCON_AXP288=m -CONFIG_EXTCON_GPIO=m -# CONFIG_EXTCON_INTEL_INT3496 is not set -CONFIG_EXTCON_MAX14577=m -CONFIG_EXTCON_MAX3355=m -CONFIG_EXTCON_MAX77693=m -CONFIG_EXTCON_RT8973A=m -CONFIG_EXTCON_SM5502=m -# CONFIG_EXTCON_USB_GPIO is not set -CONFIG_EXTCON_USBC_CROS_EC=m -CONFIG_MEMORY=y -CONFIG_IIO=m -CONFIG_IIO_BUFFER=y -CONFIG_IIO_BUFFER_CB=m -CONFIG_IIO_BUFFER_HW_CONSUMER=m -CONFIG_IIO_KFIFO_BUF=m -CONFIG_IIO_TRIGGERED_BUFFER=m -CONFIG_IIO_CONFIGFS=m -CONFIG_IIO_TRIGGER=y -CONFIG_IIO_CONSUMERS_PER_TRIGGER=2 -CONFIG_IIO_SW_DEVICE=m -CONFIG_IIO_SW_TRIGGER=m -CONFIG_IIO_TRIGGERED_EVENT=m - -# -# Accelerometers -# -CONFIG_ADIS16201=m -CONFIG_ADIS16209=m -CONFIG_BMA180=m -CONFIG_BMA220=m -CONFIG_BMC150_ACCEL=m -CONFIG_BMC150_ACCEL_I2C=m -CONFIG_BMC150_ACCEL_SPI=m -CONFIG_DA280=m -CONFIG_DA311=m -CONFIG_DMARD09=m -CONFIG_DMARD10=m -CONFIG_HID_SENSOR_ACCEL_3D=m -CONFIG_IIO_CROS_EC_ACCEL_LEGACY=m -CONFIG_IIO_ST_ACCEL_3AXIS=m -CONFIG_IIO_ST_ACCEL_I2C_3AXIS=m -CONFIG_IIO_ST_ACCEL_SPI_3AXIS=m -CONFIG_KXSD9=m -CONFIG_KXSD9_SPI=m -CONFIG_KXSD9_I2C=m -CONFIG_KXCJK1013=m -CONFIG_MC3230=m -CONFIG_MMA7455=m -CONFIG_MMA7455_I2C=m -CONFIG_MMA7455_SPI=m -CONFIG_MMA7660=m -CONFIG_MMA8452=m -CONFIG_MMA9551_CORE=m -CONFIG_MMA9551=m -CONFIG_MMA9553=m -CONFIG_MXC4005=m -CONFIG_MXC6255=m -CONFIG_SCA3000=m -CONFIG_STK8312=m -CONFIG_STK8BA50=m - -# -# Analog to digital converters -# -CONFIG_AD_SIGMA_DELTA=m -CONFIG_AD7266=m -CONFIG_AD7291=m -CONFIG_AD7298=m -CONFIG_AD7476=m -CONFIG_AD7766=m -CONFIG_AD7791=m -CONFIG_AD7793=m -CONFIG_AD7887=m -CONFIG_AD7923=m -CONFIG_AD799X=m -CONFIG_AXP20X_ADC=m -CONFIG_AXP288_ADC=m -CONFIG_CC10001_ADC=m -CONFIG_DA9150_GPADC=m -CONFIG_DLN2_ADC=m -CONFIG_HI8435=m -CONFIG_HX711=m -CONFIG_INA2XX_ADC=m -CONFIG_LTC2471=m -CONFIG_LTC2485=m -CONFIG_LTC2497=m -CONFIG_MAX1027=m -CONFIG_MAX11100=m -CONFIG_MAX1118=m -CONFIG_MAX1363=m -CONFIG_MAX9611=m -CONFIG_MCP320X=m -CONFIG_MCP3422=m -CONFIG_MEN_Z188_ADC=m -CONFIG_NAU7802=m -CONFIG_QCOM_VADC_COMMON=m -CONFIG_QCOM_SPMI_IADC=m -CONFIG_QCOM_SPMI_VADC=m -CONFIG_TI_ADC081C=m -CONFIG_TI_ADC0832=m -CONFIG_TI_ADC084S021=m -CONFIG_TI_ADC12138=m -CONFIG_TI_ADC108S102=m -CONFIG_TI_ADC128S052=m -CONFIG_TI_ADC161S626=m -CONFIG_TI_ADS1015=m -CONFIG_TI_ADS7950=m -CONFIG_TI_AM335X_ADC=m -CONFIG_TI_TLC4541=m -CONFIG_VIPERBOARD_ADC=m - -# -# Analog Front Ends -# - -# -# Amplifiers -# -CONFIG_AD8366=m - -# -# Chemical Sensors -# -CONFIG_ATLAS_PH_SENSOR=m -# CONFIG_BME680 is not set -CONFIG_CCS811=m -CONFIG_IAQCORE=m -CONFIG_VZ89X=m -CONFIG_IIO_CROS_EC_SENSORS_CORE=m -CONFIG_IIO_CROS_EC_SENSORS=m - -# -# Hid Sensor IIO Common -# -CONFIG_HID_SENSOR_IIO_COMMON=m -CONFIG_HID_SENSOR_IIO_TRIGGER=m -CONFIG_IIO_MS_SENSORS_I2C=m - -# -# SSP Sensor Common -# -CONFIG_IIO_SSP_SENSORS_COMMONS=m -CONFIG_IIO_SSP_SENSORHUB=m -CONFIG_IIO_ST_SENSORS_I2C=m -CONFIG_IIO_ST_SENSORS_SPI=m -CONFIG_IIO_ST_SENSORS_CORE=m - -# -# Counters -# - -# -# Digital to analog converters -# -CONFIG_AD5064=m -CONFIG_AD5360=m -CONFIG_AD5380=m -CONFIG_AD5421=m -CONFIG_AD5446=m -CONFIG_AD5449=m -CONFIG_AD5592R_BASE=m -CONFIG_AD5592R=m -CONFIG_AD5593R=m -CONFIG_AD5504=m -CONFIG_AD5624R_SPI=m -CONFIG_LTC2632=m -CONFIG_AD5686=m -CONFIG_AD5686_SPI=m -CONFIG_AD5696_I2C=m -CONFIG_AD5755=m -# CONFIG_AD5758 is not set -CONFIG_AD5761=m -CONFIG_AD5764=m -CONFIG_AD5791=m -CONFIG_AD7303=m -CONFIG_AD8801=m -CONFIG_DS4424=m -CONFIG_M62332=m -CONFIG_MAX517=m -CONFIG_MCP4725=m -CONFIG_MCP4922=m -CONFIG_TI_DAC082S085=m -CONFIG_TI_DAC5571=m - -# -# IIO dummy driver -# -# CONFIG_IIO_SIMPLE_DUMMY is not set - -# -# Frequency Synthesizers DDS/PLL -# - -# -# Clock Generator/Distribution -# -CONFIG_AD9523=m - -# -# Phase-Locked Loop (PLL) frequency synthesizers -# -CONFIG_ADF4350=m - -# -# Digital gyroscope sensors -# -CONFIG_ADIS16080=m -CONFIG_ADIS16130=m -CONFIG_ADIS16136=m -CONFIG_ADIS16260=m -CONFIG_ADXRS450=m -CONFIG_BMG160=m -CONFIG_BMG160_I2C=m -CONFIG_BMG160_SPI=m -CONFIG_HID_SENSOR_GYRO_3D=m -CONFIG_MPU3050=m -CONFIG_MPU3050_I2C=m -CONFIG_IIO_ST_GYRO_3AXIS=m -CONFIG_IIO_ST_GYRO_I2C_3AXIS=m -CONFIG_IIO_ST_GYRO_SPI_3AXIS=m -CONFIG_ITG3200=m - -# -# Health Sensors -# - -# -# Heart Rate Monitors -# -CONFIG_AFE4403=m -CONFIG_AFE4404=m -CONFIG_MAX30100=m -CONFIG_MAX30102=m - -# -# Humidity sensors -# -CONFIG_AM2315=m -CONFIG_DHT11=m -CONFIG_HDC100X=m -CONFIG_HID_SENSOR_HUMIDITY=m -CONFIG_HTS221=m -CONFIG_HTS221_I2C=m -CONFIG_HTS221_SPI=m -CONFIG_HTU21=m -CONFIG_SI7005=m -CONFIG_SI7020=m - -# -# Inertial measurement units -# -CONFIG_ADIS16400=m -CONFIG_ADIS16480=m -CONFIG_BMI160=m -CONFIG_BMI160_I2C=m -CONFIG_BMI160_SPI=m -CONFIG_KMX61=m -CONFIG_INV_MPU6050_IIO=m -CONFIG_INV_MPU6050_I2C=m -CONFIG_INV_MPU6050_SPI=m -CONFIG_IIO_ST_LSM6DSX=m -CONFIG_IIO_ST_LSM6DSX_I2C=m -CONFIG_IIO_ST_LSM6DSX_SPI=m -CONFIG_IIO_ADIS_LIB=m -CONFIG_IIO_ADIS_LIB_BUFFER=y - -# -# Light sensors -# -# CONFIG_ACPI_ALS is not set -CONFIG_ADJD_S311=m -CONFIG_AL3320A=m -CONFIG_APDS9300=m -CONFIG_APDS9960=m -CONFIG_BH1750=m -CONFIG_BH1780=m -CONFIG_CM32181=m -CONFIG_CM3232=m -CONFIG_CM3323=m -CONFIG_CM36651=m -CONFIG_IIO_CROS_EC_LIGHT_PROX=m -CONFIG_GP2AP020A00F=m -CONFIG_SENSORS_ISL29018=m -CONFIG_SENSORS_ISL29028=m -CONFIG_ISL29125=m -CONFIG_HID_SENSOR_ALS=m -CONFIG_HID_SENSOR_PROX=m -CONFIG_JSA1212=m -CONFIG_RPR0521=m -CONFIG_SENSORS_LM3533=m -CONFIG_LTR501=m -CONFIG_LV0104CS=m -CONFIG_MAX44000=m -CONFIG_OPT3001=m -CONFIG_PA12203001=m -# CONFIG_SI1133 is not set -CONFIG_SI1145=m -CONFIG_STK3310=m -CONFIG_ST_UVIS25=m -CONFIG_ST_UVIS25_I2C=m -CONFIG_ST_UVIS25_SPI=m -CONFIG_TCS3414=m -CONFIG_TCS3472=m -CONFIG_SENSORS_TSL2563=m -CONFIG_TSL2583=m -CONFIG_TSL2772=m -CONFIG_TSL4531=m -CONFIG_US5182D=m -CONFIG_VCNL4000=m -CONFIG_VEML6070=m -CONFIG_VL6180=m -CONFIG_ZOPT2201=m - -# -# Magnetometer sensors -# -CONFIG_AK8975=m -CONFIG_AK09911=m -CONFIG_BMC150_MAGN=m -CONFIG_BMC150_MAGN_I2C=m -CONFIG_BMC150_MAGN_SPI=m -CONFIG_MAG3110=m -CONFIG_HID_SENSOR_MAGNETOMETER_3D=m -CONFIG_MMC35240=m -CONFIG_IIO_ST_MAGN_3AXIS=m -CONFIG_IIO_ST_MAGN_I2C_3AXIS=m -CONFIG_IIO_ST_MAGN_SPI_3AXIS=m -CONFIG_SENSORS_HMC5843=m -CONFIG_SENSORS_HMC5843_I2C=m -CONFIG_SENSORS_HMC5843_SPI=m - -# -# Multiplexers -# - -# -# Inclinometer sensors -# -CONFIG_HID_SENSOR_INCLINOMETER_3D=m -CONFIG_HID_SENSOR_DEVICE_ROTATION=m - -# -# Triggers - standalone -# -CONFIG_IIO_HRTIMER_TRIGGER=m -CONFIG_IIO_INTERRUPT_TRIGGER=m -CONFIG_IIO_TIGHTLOOP_TRIGGER=m -CONFIG_IIO_SYSFS_TRIGGER=m - -# -# Digital potentiometers -# -CONFIG_AD5272=m -CONFIG_DS1803=m -CONFIG_MAX5481=m -CONFIG_MAX5487=m -CONFIG_MCP4018=m -CONFIG_MCP4131=m -CONFIG_MCP4531=m -CONFIG_TPL0102=m - -# -# Digital potentiostats -# -CONFIG_LMP91000=m - -# -# Pressure sensors -# -CONFIG_ABP060MG=m -CONFIG_BMP280=m -CONFIG_BMP280_I2C=m -CONFIG_BMP280_SPI=m -CONFIG_IIO_CROS_EC_BARO=m -CONFIG_HID_SENSOR_PRESS=m -CONFIG_HP03=m -CONFIG_MPL115=m -CONFIG_MPL115_I2C=m -CONFIG_MPL115_SPI=m -CONFIG_MPL3115=m -CONFIG_MS5611=m -CONFIG_MS5611_I2C=m -CONFIG_MS5611_SPI=m -CONFIG_MS5637=m -CONFIG_IIO_ST_PRESS=m -CONFIG_IIO_ST_PRESS_I2C=m -CONFIG_IIO_ST_PRESS_SPI=m -CONFIG_T5403=m -CONFIG_HP206C=m -CONFIG_ZPA2326=m -CONFIG_ZPA2326_I2C=m -CONFIG_ZPA2326_SPI=m - -# -# Lightning sensors -# -CONFIG_AS3935=m - -# -# Proximity and distance sensors -# -# CONFIG_ISL29501 is not set -CONFIG_LIDAR_LITE_V2=m -CONFIG_RFD77402=m -CONFIG_SRF04=m -CONFIG_SX9500=m -CONFIG_SRF08=m - -# -# Resolver to digital converters -# -CONFIG_AD2S1200=m - -# -# Temperature sensors -# -CONFIG_MAXIM_THERMOCOUPLE=m -CONFIG_HID_SENSOR_TEMP=m -CONFIG_MLX90614=m -CONFIG_MLX90632=m -CONFIG_TMP006=m -CONFIG_TMP007=m -CONFIG_TSYS01=m -CONFIG_TSYS02D=m -CONFIG_NTB=m -CONFIG_NTB_AMD=m -CONFIG_NTB_IDT=m -CONFIG_NTB_INTEL=m -CONFIG_NTB_SWITCHTEC=m -CONFIG_NTB_PINGPONG=m -CONFIG_NTB_TOOL=m -CONFIG_NTB_PERF=m -CONFIG_NTB_TRANSPORT=m -CONFIG_VME_BUS=y - -# -# VME Bridge Drivers -# -CONFIG_VME_CA91CX42=m -CONFIG_VME_TSI148=m -CONFIG_VME_FAKE=m - -# -# VME Board Drivers -# -CONFIG_VMIVME_7805=m - -# -# VME Device Drivers -# -CONFIG_VME_USER=m -CONFIG_PWM=y -CONFIG_PWM_SYSFS=y -CONFIG_PWM_CROS_EC=m -CONFIG_PWM_LP3943=m -CONFIG_PWM_LPSS=m -CONFIG_PWM_LPSS_PCI=m -CONFIG_PWM_LPSS_PLATFORM=m -CONFIG_PWM_PCA9685=m - -# -# IRQ chip support -# -CONFIG_ARM_GIC_MAX_NR=1 -CONFIG_IPACK_BUS=m -CONFIG_BOARD_TPCI200=m -CONFIG_SERIAL_IPOCTAL=m -CONFIG_RESET_CONTROLLER=y -CONFIG_RESET_TI_SYSCON=m -CONFIG_FMC=m -CONFIG_FMC_FAKEDEV=m -CONFIG_FMC_TRIVIAL=m -CONFIG_FMC_WRITE_EEPROM=m -CONFIG_FMC_CHARDEV=m - -# -# PHY Subsystem -# -CONFIG_GENERIC_PHY=y -CONFIG_BCM_KONA_USB2_PHY=m -CONFIG_PHY_PXA_28NM_HSIC=m -CONFIG_PHY_PXA_28NM_USB2=m -CONFIG_PHY_CPCAP_USB=m -CONFIG_PHY_QCOM_USB_HS=m -CONFIG_PHY_QCOM_USB_HSIC=m -CONFIG_PHY_SAMSUNG_USB2=m -CONFIG_PHY_TUSB1210=m -CONFIG_POWERCAP=y -CONFIG_INTEL_RAPL=m -# CONFIG_IDLE_INJECT is not set -CONFIG_MCB=m -CONFIG_MCB_PCI=m -CONFIG_MCB_LPC=m - -# -# Performance monitor support -# -CONFIG_RAS=y -CONFIG_RAS_CEC=y -CONFIG_THUNDERBOLT=m - -# -# Android -# -# CONFIG_ANDROID is not set -CONFIG_LIBNVDIMM=m -CONFIG_BLK_DEV_PMEM=m -CONFIG_ND_BLK=m -CONFIG_ND_CLAIM=y -CONFIG_ND_BTT=m -CONFIG_BTT=y -CONFIG_DAX_DRIVER=y -CONFIG_DAX=y -CONFIG_DEV_DAX=m -CONFIG_NVMEM=y -CONFIG_RAVE_SP_EEPROM=m - -# -# HW tracing support -# -CONFIG_STM=m -# CONFIG_STM_DUMMY is not set -CONFIG_STM_SOURCE_CONSOLE=m -CONFIG_STM_SOURCE_HEARTBEAT=m -# CONFIG_STM_SOURCE_FTRACE is not set -CONFIG_INTEL_TH=m -CONFIG_INTEL_TH_PCI=m -CONFIG_INTEL_TH_ACPI=m -CONFIG_INTEL_TH_GTH=m -CONFIG_INTEL_TH_STH=m -CONFIG_INTEL_TH_MSU=m -CONFIG_INTEL_TH_PTI=m -# CONFIG_INTEL_TH_DEBUG is not set -CONFIG_FPGA=m -CONFIG_ALTERA_PR_IP_CORE=m -CONFIG_FPGA_MGR_ALTERA_PS_SPI=m -CONFIG_FPGA_MGR_ALTERA_CVP=m -CONFIG_FPGA_MGR_XILINX_SPI=m -CONFIG_FPGA_MGR_MACHXO2_SPI=m -CONFIG_FPGA_BRIDGE=m -CONFIG_XILINX_PR_DECOUPLER=m -CONFIG_FPGA_REGION=m -# CONFIG_FPGA_DFL is not set -CONFIG_PM_OPP=y -# CONFIG_UNISYS_VISORBUS is not set -CONFIG_SIOX=m -CONFIG_SIOX_BUS_GPIO=m -CONFIG_SLIMBUS=m -CONFIG_SLIM_QCOM_CTRL=m - -# -# File systems -# -CONFIG_DCACHE_WORD_ACCESS=y -CONFIG_FS_IOMAP=y -CONFIG_EXT2_FS=m -CONFIG_EXT2_FS_XATTR=y -CONFIG_EXT2_FS_POSIX_ACL=y -CONFIG_EXT2_FS_SECURITY=y -CONFIG_EXT3_FS=m -CONFIG_EXT3_FS_POSIX_ACL=y -CONFIG_EXT3_FS_SECURITY=y -CONFIG_EXT4_FS=m -CONFIG_EXT4_FS_POSIX_ACL=y -CONFIG_EXT4_FS_SECURITY=y -CONFIG_EXT4_ENCRYPTION=y -CONFIG_EXT4_FS_ENCRYPTION=y -# CONFIG_EXT4_DEBUG is not set -CONFIG_JBD2=m -# CONFIG_JBD2_DEBUG is not set -CONFIG_FS_MBCACHE=m -CONFIG_REISERFS_FS=m -# CONFIG_REISERFS_CHECK is not set -CONFIG_REISERFS_PROC_INFO=y -CONFIG_REISERFS_FS_XATTR=y -CONFIG_REISERFS_FS_POSIX_ACL=y -CONFIG_REISERFS_FS_SECURITY=y -CONFIG_JFS_FS=m -CONFIG_JFS_POSIX_ACL=y -CONFIG_JFS_SECURITY=y -# CONFIG_JFS_DEBUG is not set -CONFIG_JFS_STATISTICS=y -CONFIG_XFS_FS=m -CONFIG_XFS_QUOTA=y -CONFIG_XFS_POSIX_ACL=y -CONFIG_XFS_RT=y -CONFIG_XFS_ONLINE_SCRUB=y -# CONFIG_XFS_ONLINE_REPAIR is not set -# CONFIG_XFS_WARN is not set -# CONFIG_XFS_DEBUG is not set -CONFIG_GFS2_FS=m -CONFIG_GFS2_FS_LOCKING_DLM=y -CONFIG_OCFS2_FS=m -CONFIG_OCFS2_FS_O2CB=m -CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m -CONFIG_OCFS2_FS_STATS=y -CONFIG_OCFS2_DEBUG_MASKLOG=y -CONFIG_OCFS2_DEBUG_FS=y -CONFIG_BTRFS_FS=m -CONFIG_BTRFS_FS_POSIX_ACL=y -# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set -# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set -# CONFIG_BTRFS_DEBUG is not set -# CONFIG_BTRFS_ASSERT is not set -# CONFIG_BTRFS_FS_REF_VERIFY is not set -CONFIG_NILFS2_FS=m -CONFIG_F2FS_FS=m -CONFIG_F2FS_STAT_FS=y -CONFIG_F2FS_FS_XATTR=y -CONFIG_F2FS_FS_POSIX_ACL=y -CONFIG_F2FS_FS_SECURITY=y -CONFIG_F2FS_CHECK_FS=y -CONFIG_F2FS_FS_ENCRYPTION=y -# CONFIG_F2FS_IO_TRACE is not set -# CONFIG_F2FS_FAULT_INJECTION is not set -CONFIG_FS_DAX=y -CONFIG_FS_POSIX_ACL=y -CONFIG_EXPORTFS=y -CONFIG_EXPORTFS_BLOCK_OPS=y -CONFIG_FILE_LOCKING=y -CONFIG_MANDATORY_FILE_LOCKING=y -CONFIG_FS_ENCRYPTION=m -CONFIG_FSNOTIFY=y -CONFIG_DNOTIFY=y -CONFIG_INOTIFY_USER=y -CONFIG_FANOTIFY=y -# CONFIG_FANOTIFY_ACCESS_PERMISSIONS is not set -CONFIG_QUOTA=y -CONFIG_QUOTA_NETLINK_INTERFACE=y -# CONFIG_PRINT_QUOTA_WARNING is not set -# CONFIG_QUOTA_DEBUG is not set -CONFIG_QUOTA_TREE=m -CONFIG_QFMT_V1=m -CONFIG_QFMT_V2=m -CONFIG_QUOTACTL=y -CONFIG_QUOTACTL_COMPAT=y -CONFIG_AUTOFS4_FS=m -CONFIG_AUTOFS_FS=m -CONFIG_FUSE_FS=m -CONFIG_CUSE=m -CONFIG_OVERLAY_FS=m -# CONFIG_OVERLAY_FS_REDIRECT_DIR is not set -CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y -CONFIG_OVERLAY_FS_INDEX=y -CONFIG_OVERLAY_FS_NFS_EXPORT=y -# CONFIG_OVERLAY_FS_XINO_AUTO is not set -# CONFIG_OVERLAY_FS_METACOPY is not set - -# -# Caches -# -CONFIG_FSCACHE=m -CONFIG_FSCACHE_STATS=y -# CONFIG_FSCACHE_HISTOGRAM is not set -# CONFIG_FSCACHE_DEBUG is not set -# CONFIG_FSCACHE_OBJECT_LIST is not set -CONFIG_CACHEFILES=m -# CONFIG_CACHEFILES_DEBUG is not set -# CONFIG_CACHEFILES_HISTOGRAM is not set - -# -# CD-ROM/DVD Filesystems -# -CONFIG_ISO9660_FS=m -CONFIG_JOLIET=y -CONFIG_ZISOFS=y -CONFIG_UDF_FS=m - -# -# DOS/FAT/NT Filesystems -# -CONFIG_FAT_FS=m -CONFIG_MSDOS_FS=m -CONFIG_VFAT_FS=m -CONFIG_FAT_DEFAULT_CODEPAGE=437 -CONFIG_FAT_DEFAULT_IOCHARSET="utf8" -# CONFIG_FAT_DEFAULT_UTF8 is not set -CONFIG_NTFS_FS=m -# CONFIG_NTFS_DEBUG is not set -CONFIG_NTFS_RW=y - -# -# Pseudo filesystems -# -CONFIG_PROC_FS=y -CONFIG_PROC_KCORE=y -CONFIG_PROC_SYSCTL=y -CONFIG_PROC_PAGE_MONITOR=y -# CONFIG_PROC_CHILDREN is not set -CONFIG_KERNFS=y -CONFIG_SYSFS=y -CONFIG_TMPFS=y -CONFIG_TMPFS_POSIX_ACL=y -CONFIG_TMPFS_XATTR=y -CONFIG_HUGETLBFS=y -CONFIG_HUGETLB_PAGE=y -CONFIG_MEMFD_CREATE=y -CONFIG_ARCH_HAS_GIGANTIC_PAGE=y -CONFIG_CONFIGFS_FS=y -CONFIG_EFIVAR_FS=m -CONFIG_MISC_FILESYSTEMS=y -CONFIG_ORANGEFS_FS=m -CONFIG_ADFS_FS=m -# CONFIG_ADFS_FS_RW is not set -CONFIG_AFFS_FS=m -CONFIG_ECRYPT_FS=m -CONFIG_ECRYPT_FS_MESSAGING=y -CONFIG_HFS_FS=m -CONFIG_HFSPLUS_FS=m -CONFIG_BEFS_FS=m -# CONFIG_BEFS_DEBUG is not set -CONFIG_BFS_FS=m -CONFIG_EFS_FS=m -CONFIG_JFFS2_FS=m -CONFIG_JFFS2_FS_DEBUG=0 -CONFIG_JFFS2_FS_WRITEBUFFER=y -CONFIG_JFFS2_FS_WBUF_VERIFY=y -CONFIG_JFFS2_SUMMARY=y -CONFIG_JFFS2_FS_XATTR=y -CONFIG_JFFS2_FS_POSIX_ACL=y -CONFIG_JFFS2_FS_SECURITY=y -CONFIG_JFFS2_COMPRESSION_OPTIONS=y -CONFIG_JFFS2_ZLIB=y -CONFIG_JFFS2_LZO=y -CONFIG_JFFS2_RTIME=y -CONFIG_JFFS2_RUBIN=y -# CONFIG_JFFS2_CMODE_NONE is not set -CONFIG_JFFS2_CMODE_PRIORITY=y -# CONFIG_JFFS2_CMODE_SIZE is not set -# CONFIG_JFFS2_CMODE_FAVOURLZO is not set -CONFIG_UBIFS_FS=m -CONFIG_UBIFS_FS_ADVANCED_COMPR=y -CONFIG_UBIFS_FS_LZO=y -CONFIG_UBIFS_FS_ZLIB=y -CONFIG_UBIFS_ATIME_SUPPORT=y -CONFIG_UBIFS_FS_XATTR=y -CONFIG_UBIFS_FS_ENCRYPTION=y -CONFIG_UBIFS_FS_SECURITY=y -CONFIG_CRAMFS=m -CONFIG_CRAMFS_BLOCKDEV=y -CONFIG_CRAMFS_MTD=y -CONFIG_SQUASHFS=m -# CONFIG_SQUASHFS_FILE_CACHE is not set -CONFIG_SQUASHFS_FILE_DIRECT=y -# CONFIG_SQUASHFS_DECOMP_SINGLE is not set -# CONFIG_SQUASHFS_DECOMP_MULTI is not set -CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU=y -CONFIG_SQUASHFS_XATTR=y -CONFIG_SQUASHFS_ZLIB=y -CONFIG_SQUASHFS_LZ4=y -CONFIG_SQUASHFS_LZO=y -CONFIG_SQUASHFS_XZ=y -CONFIG_SQUASHFS_ZSTD=y -CONFIG_SQUASHFS_4K_DEVBLK_SIZE=y -CONFIG_SQUASHFS_EMBEDDED=y -CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 -CONFIG_VXFS_FS=m -CONFIG_MINIX_FS=m -CONFIG_OMFS_FS=m -CONFIG_HPFS_FS=m -CONFIG_QNX4FS_FS=m -CONFIG_QNX6FS_FS=m -# CONFIG_QNX6FS_DEBUG is not set -CONFIG_ROMFS_FS=m -# CONFIG_ROMFS_BACKED_BY_BLOCK is not set -# CONFIG_ROMFS_BACKED_BY_MTD is not set -CONFIG_ROMFS_BACKED_BY_BOTH=y -CONFIG_ROMFS_ON_BLOCK=y -CONFIG_ROMFS_ON_MTD=y -CONFIG_PSTORE=y -CONFIG_PSTORE_DEFLATE_COMPRESS=y -CONFIG_PSTORE_LZO_COMPRESS=y -CONFIG_PSTORE_LZ4_COMPRESS=y -CONFIG_PSTORE_LZ4HC_COMPRESS=y -CONFIG_PSTORE_842_COMPRESS=y -CONFIG_PSTORE_ZSTD_COMPRESS=y -CONFIG_PSTORE_COMPRESS=y -# CONFIG_PSTORE_DEFLATE_COMPRESS_DEFAULT is not set -# CONFIG_PSTORE_LZO_COMPRESS_DEFAULT is not set -CONFIG_PSTORE_LZ4_COMPRESS_DEFAULT=y -# CONFIG_PSTORE_LZ4HC_COMPRESS_DEFAULT is not set -# CONFIG_PSTORE_842_COMPRESS_DEFAULT is not set -# CONFIG_PSTORE_ZSTD_COMPRESS_DEFAULT is not set -CONFIG_PSTORE_COMPRESS_DEFAULT="lz4" -# CONFIG_PSTORE_CONSOLE is not set -CONFIG_PSTORE_PMSG=y -# CONFIG_PSTORE_FTRACE is not set -CONFIG_PSTORE_RAM=m -CONFIG_SYSV_FS=m -CONFIG_UFS_FS=m -# CONFIG_UFS_FS_WRITE is not set -# CONFIG_UFS_DEBUG is not set -CONFIG_EXOFS_FS=m -# CONFIG_EXOFS_DEBUG is not set -CONFIG_ORE=m -CONFIG_NETWORK_FILESYSTEMS=y -CONFIG_NFS_FS=m -CONFIG_NFS_V2=m -CONFIG_NFS_V3=m -CONFIG_NFS_V3_ACL=y -CONFIG_NFS_V4=m -CONFIG_NFS_SWAP=y -CONFIG_NFS_V4_1=y -CONFIG_NFS_V4_2=y -CONFIG_PNFS_FILE_LAYOUT=m -CONFIG_PNFS_BLOCK=m -CONFIG_PNFS_FLEXFILE_LAYOUT=m -CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" -CONFIG_NFS_V4_1_MIGRATION=y -CONFIG_NFS_V4_SECURITY_LABEL=y -CONFIG_NFS_FSCACHE=y -# CONFIG_NFS_USE_LEGACY_DNS is not set -CONFIG_NFS_USE_KERNEL_DNS=y -CONFIG_NFSD=m -CONFIG_NFSD_V2_ACL=y -CONFIG_NFSD_V3=y -CONFIG_NFSD_V3_ACL=y -CONFIG_NFSD_V4=y -CONFIG_NFSD_PNFS=y -CONFIG_NFSD_BLOCKLAYOUT=y -CONFIG_NFSD_SCSILAYOUT=y -CONFIG_NFSD_FLEXFILELAYOUT=y -# CONFIG_NFSD_V4_SECURITY_LABEL is not set -# CONFIG_NFSD_FAULT_INJECTION is not set -CONFIG_GRACE_PERIOD=m -CONFIG_LOCKD=m -CONFIG_LOCKD_V4=y -CONFIG_NFS_ACL_SUPPORT=m -CONFIG_NFS_COMMON=y -CONFIG_SUNRPC=m -CONFIG_SUNRPC_GSS=m -CONFIG_SUNRPC_BACKCHANNEL=y -CONFIG_SUNRPC_SWAP=y -CONFIG_RPCSEC_GSS_KRB5=m -# CONFIG_SUNRPC_DEBUG is not set -CONFIG_SUNRPC_XPRT_RDMA=m -CONFIG_CEPH_FS=m -CONFIG_CEPH_FSCACHE=y -CONFIG_CEPH_FS_POSIX_ACL=y -CONFIG_CIFS=m -CONFIG_CIFS_STATS2=y -CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y -CONFIG_CIFS_WEAK_PW_HASH=y -CONFIG_CIFS_UPCALL=y -CONFIG_CIFS_XATTR=y -CONFIG_CIFS_POSIX=y -CONFIG_CIFS_ACL=y -# CONFIG_CIFS_DEBUG is not set -CONFIG_CIFS_DFS_UPCALL=y -# CONFIG_CIFS_SMB_DIRECT is not set -CONFIG_CIFS_FSCACHE=y -CONFIG_CODA_FS=m -CONFIG_AFS_FS=m -# CONFIG_AFS_DEBUG is not set -CONFIG_AFS_FSCACHE=y -CONFIG_9P_FS=m -CONFIG_9P_FSCACHE=y -CONFIG_9P_FS_POSIX_ACL=y -CONFIG_9P_FS_SECURITY=y -CONFIG_NLS=y -CONFIG_NLS_DEFAULT="utf8" -CONFIG_NLS_CODEPAGE_437=m -CONFIG_NLS_CODEPAGE_737=m -CONFIG_NLS_CODEPAGE_775=m -CONFIG_NLS_CODEPAGE_850=m -CONFIG_NLS_CODEPAGE_852=m -CONFIG_NLS_CODEPAGE_855=m -CONFIG_NLS_CODEPAGE_857=m -CONFIG_NLS_CODEPAGE_860=m -CONFIG_NLS_CODEPAGE_861=m -CONFIG_NLS_CODEPAGE_862=m -CONFIG_NLS_CODEPAGE_863=m -CONFIG_NLS_CODEPAGE_864=m -CONFIG_NLS_CODEPAGE_865=m -CONFIG_NLS_CODEPAGE_866=m -CONFIG_NLS_CODEPAGE_869=m -CONFIG_NLS_CODEPAGE_936=m -CONFIG_NLS_CODEPAGE_950=m -CONFIG_NLS_CODEPAGE_932=m -CONFIG_NLS_CODEPAGE_949=m -CONFIG_NLS_CODEPAGE_874=m -CONFIG_NLS_ISO8859_8=m -CONFIG_NLS_CODEPAGE_1250=m -CONFIG_NLS_CODEPAGE_1251=m -CONFIG_NLS_ASCII=m -CONFIG_NLS_ISO8859_1=m -CONFIG_NLS_ISO8859_2=m -CONFIG_NLS_ISO8859_3=m -CONFIG_NLS_ISO8859_4=m -CONFIG_NLS_ISO8859_5=m -CONFIG_NLS_ISO8859_6=m -CONFIG_NLS_ISO8859_7=m -CONFIG_NLS_ISO8859_9=m -CONFIG_NLS_ISO8859_13=m -CONFIG_NLS_ISO8859_14=m -CONFIG_NLS_ISO8859_15=m -CONFIG_NLS_KOI8_R=m -CONFIG_NLS_KOI8_U=m -CONFIG_NLS_MAC_ROMAN=m -CONFIG_NLS_MAC_CELTIC=m -CONFIG_NLS_MAC_CENTEURO=m -CONFIG_NLS_MAC_CROATIAN=m -CONFIG_NLS_MAC_CYRILLIC=m -CONFIG_NLS_MAC_GAELIC=m -CONFIG_NLS_MAC_GREEK=m -CONFIG_NLS_MAC_ICELAND=m -CONFIG_NLS_MAC_INUIT=m -CONFIG_NLS_MAC_ROMANIAN=m -CONFIG_NLS_MAC_TURKISH=m -CONFIG_NLS_UTF8=m -CONFIG_DLM=m -# CONFIG_DLM_DEBUG is not set - -# -# Security options -# -CONFIG_KEYS=y -CONFIG_KEYS_COMPAT=y -CONFIG_PERSISTENT_KEYRINGS=y -# CONFIG_BIG_KEYS is not set -CONFIG_TRUSTED_KEYS=m -CONFIG_ENCRYPTED_KEYS=m -# CONFIG_KEY_DH_OPERATIONS is not set -CONFIG_SECURITY_DMESG_RESTRICT=y -CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y -CONFIG_SECURITY_TIOCSTI_RESTRICT=y -CONFIG_SECURITY=y -CONFIG_SECURITYFS=y -CONFIG_SECURITY_NETWORK=y -CONFIG_PAGE_TABLE_ISOLATION=y -# CONFIG_SECURITY_INFINIBAND is not set -# CONFIG_SECURITY_NETWORK_XFRM is not set -CONFIG_SECURITY_PATH=y -CONFIG_INTEL_TXT=y -CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y -CONFIG_HARDENED_USERCOPY=y -CONFIG_HARDENED_USERCOPY_FALLBACK=y -CONFIG_FORTIFY_SOURCE=y -CONFIG_PAGE_SANITIZE=y -CONFIG_PAGE_SANITIZE_VERIFY=y -# CONFIG_STATIC_USERMODEHELPER is not set -# CONFIG_SECURITY_SELINUX is not set -# CONFIG_SECURITY_SMACK is not set -# CONFIG_SECURITY_TOMOYO is not set -CONFIG_SECURITY_APPARMOR=y -CONFIG_SECURITY_APPARMOR_BOOTPARAM_VALUE=1 -CONFIG_SECURITY_APPARMOR_HASH=y -CONFIG_SECURITY_APPARMOR_HASH_DEFAULT=y -# CONFIG_SECURITY_APPARMOR_DEBUG is not set -# CONFIG_SECURITY_LOADPIN is not set -CONFIG_SECURITY_YAMA=y -# CONFIG_INTEGRITY is not set -CONFIG_DEFAULT_SECURITY_APPARMOR=y -# CONFIG_DEFAULT_SECURITY_DAC is not set -CONFIG_DEFAULT_SECURITY="apparmor" -CONFIG_XOR_BLOCKS=m -CONFIG_ASYNC_CORE=m -CONFIG_ASYNC_MEMCPY=m -CONFIG_ASYNC_XOR=m -CONFIG_ASYNC_PQ=m -CONFIG_ASYNC_RAID6_RECOV=m -CONFIG_CRYPTO=y - -# -# Crypto core or helper -# -CONFIG_CRYPTO_ALGAPI=y -CONFIG_CRYPTO_ALGAPI2=y -CONFIG_CRYPTO_AEAD=m -CONFIG_CRYPTO_AEAD2=y -CONFIG_CRYPTO_BLKCIPHER=y -CONFIG_CRYPTO_BLKCIPHER2=y -CONFIG_CRYPTO_HASH=y -CONFIG_CRYPTO_HASH2=y -CONFIG_CRYPTO_RNG=m -CONFIG_CRYPTO_RNG2=y -CONFIG_CRYPTO_RNG_DEFAULT=m -CONFIG_CRYPTO_AKCIPHER2=y -CONFIG_CRYPTO_AKCIPHER=y -CONFIG_CRYPTO_KPP2=y -CONFIG_CRYPTO_KPP=m -CONFIG_CRYPTO_ACOMP2=y -CONFIG_CRYPTO_RSA=y -CONFIG_CRYPTO_DH=m -CONFIG_CRYPTO_ECDH=m -CONFIG_CRYPTO_MANAGER=y -CONFIG_CRYPTO_MANAGER2=y -CONFIG_CRYPTO_USER=m -CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y -CONFIG_CRYPTO_GF128MUL=m -CONFIG_CRYPTO_NULL=m -CONFIG_CRYPTO_NULL2=y -CONFIG_CRYPTO_PCRYPT=m -CONFIG_CRYPTO_WORKQUEUE=y -CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_MCRYPTD=m -CONFIG_CRYPTO_AUTHENC=m -CONFIG_CRYPTO_TEST=m -CONFIG_CRYPTO_SIMD=m -CONFIG_CRYPTO_GLUE_HELPER_X86=m -CONFIG_CRYPTO_ENGINE=m - -# -# Authenticated Encryption with Associated Data -# -CONFIG_CRYPTO_CCM=m -CONFIG_CRYPTO_GCM=m -CONFIG_CRYPTO_CHACHA20POLY1305=m -CONFIG_CRYPTO_AEGIS128=m -CONFIG_CRYPTO_AEGIS128L=m -CONFIG_CRYPTO_AEGIS256=m -CONFIG_CRYPTO_AEGIS128_AESNI_SSE2=m -CONFIG_CRYPTO_AEGIS128L_AESNI_SSE2=m -CONFIG_CRYPTO_AEGIS256_AESNI_SSE2=m -CONFIG_CRYPTO_MORUS640=m -CONFIG_CRYPTO_MORUS640_GLUE=m -CONFIG_CRYPTO_MORUS640_SSE2=m -CONFIG_CRYPTO_MORUS1280=m -CONFIG_CRYPTO_MORUS1280_GLUE=m -CONFIG_CRYPTO_MORUS1280_SSE2=m -CONFIG_CRYPTO_MORUS1280_AVX2=m -CONFIG_CRYPTO_SEQIV=m -CONFIG_CRYPTO_ECHAINIV=m - -# -# Block modes -# -CONFIG_CRYPTO_CBC=m -CONFIG_CRYPTO_CFB=m -CONFIG_CRYPTO_CTR=m -CONFIG_CRYPTO_CTS=m -CONFIG_CRYPTO_ECB=y -CONFIG_CRYPTO_LRW=m -CONFIG_CRYPTO_PCBC=m -CONFIG_CRYPTO_XTS=m -CONFIG_CRYPTO_KEYWRAP=m - -# -# Hash modes -# -CONFIG_CRYPTO_CMAC=m -CONFIG_CRYPTO_HMAC=m -CONFIG_CRYPTO_XCBC=m -CONFIG_CRYPTO_VMAC=m - -# -# Digest -# -CONFIG_CRYPTO_CRC32C=m -CONFIG_CRYPTO_CRC32C_INTEL=m -CONFIG_CRYPTO_CRC32=m -CONFIG_CRYPTO_CRC32_PCLMUL=m -CONFIG_CRYPTO_CRCT10DIF=y -CONFIG_CRYPTO_CRCT10DIF_PCLMUL=m -CONFIG_CRYPTO_GHASH=m -CONFIG_CRYPTO_POLY1305=m -CONFIG_CRYPTO_POLY1305_X86_64=m -CONFIG_CRYPTO_MD4=m -CONFIG_CRYPTO_MD5=y -CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_RMD128=m -CONFIG_CRYPTO_RMD160=m -CONFIG_CRYPTO_RMD256=m -CONFIG_CRYPTO_RMD320=m -CONFIG_CRYPTO_SHA1=y -CONFIG_CRYPTO_SHA1_SSSE3=m -CONFIG_CRYPTO_SHA256_SSSE3=m -CONFIG_CRYPTO_SHA512_SSSE3=m -CONFIG_CRYPTO_SHA1_MB=m -CONFIG_CRYPTO_SHA256_MB=m -CONFIG_CRYPTO_SHA512_MB=m -CONFIG_CRYPTO_SHA256=m -CONFIG_CRYPTO_SHA512=y -CONFIG_CRYPTO_SHA3=m -CONFIG_CRYPTO_SM3=m -CONFIG_CRYPTO_TGR192=m -CONFIG_CRYPTO_WP512=m -CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m - -# -# Ciphers -# -CONFIG_CRYPTO_AES=y -CONFIG_CRYPTO_AES_TI=m -CONFIG_CRYPTO_AES_X86_64=m -CONFIG_CRYPTO_AES_NI_INTEL=m -CONFIG_CRYPTO_ANUBIS=m -CONFIG_CRYPTO_ARC4=m -CONFIG_CRYPTO_BLOWFISH=m -CONFIG_CRYPTO_BLOWFISH_COMMON=m -CONFIG_CRYPTO_BLOWFISH_X86_64=m -CONFIG_CRYPTO_CAMELLIA=m -CONFIG_CRYPTO_CAMELLIA_X86_64=m -CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64=m -CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64=m -CONFIG_CRYPTO_CAST_COMMON=m -CONFIG_CRYPTO_CAST5=m -CONFIG_CRYPTO_CAST5_AVX_X86_64=m -CONFIG_CRYPTO_CAST6=m -CONFIG_CRYPTO_CAST6_AVX_X86_64=m -CONFIG_CRYPTO_DES=m -CONFIG_CRYPTO_DES3_EDE_X86_64=m -CONFIG_CRYPTO_FCRYPT=m -CONFIG_CRYPTO_KHAZAD=m -CONFIG_CRYPTO_SALSA20=m -CONFIG_CRYPTO_CHACHA20=m -CONFIG_CRYPTO_CHACHA20_X86_64=m -CONFIG_CRYPTO_SEED=m -CONFIG_CRYPTO_SERPENT=m -CONFIG_CRYPTO_SERPENT_SSE2_X86_64=m -CONFIG_CRYPTO_SERPENT_AVX_X86_64=m -CONFIG_CRYPTO_SERPENT_AVX2_X86_64=m -CONFIG_CRYPTO_SM4=m -CONFIG_CRYPTO_TEA=m -CONFIG_CRYPTO_TWOFISH=m -CONFIG_CRYPTO_TWOFISH_COMMON=m -CONFIG_CRYPTO_TWOFISH_X86_64=m -CONFIG_CRYPTO_TWOFISH_X86_64_3WAY=m -CONFIG_CRYPTO_TWOFISH_AVX_X86_64=m - -# -# Compression -# -CONFIG_CRYPTO_DEFLATE=y -CONFIG_CRYPTO_LZO=y -CONFIG_CRYPTO_842=y -CONFIG_CRYPTO_LZ4=y -CONFIG_CRYPTO_LZ4HC=y -CONFIG_CRYPTO_ZSTD=y - -# -# Random Number Generation -# -CONFIG_CRYPTO_ANSI_CPRNG=m -CONFIG_CRYPTO_DRBG_MENU=m -CONFIG_CRYPTO_DRBG_HMAC=y -CONFIG_CRYPTO_DRBG_HASH=y -CONFIG_CRYPTO_DRBG_CTR=y -CONFIG_CRYPTO_DRBG=m -CONFIG_CRYPTO_JITTERENTROPY=m -CONFIG_CRYPTO_USER_API=m -CONFIG_CRYPTO_USER_API_HASH=m -CONFIG_CRYPTO_USER_API_SKCIPHER=m -CONFIG_CRYPTO_USER_API_RNG=m -CONFIG_CRYPTO_USER_API_AEAD=m -CONFIG_CRYPTO_HASH_INFO=y -CONFIG_CRYPTO_HW=y -CONFIG_CRYPTO_DEV_PADLOCK=m -CONFIG_CRYPTO_DEV_PADLOCK_AES=m -CONFIG_CRYPTO_DEV_PADLOCK_SHA=m -CONFIG_CRYPTO_DEV_CCP=y -CONFIG_CRYPTO_DEV_CCP_DD=m -CONFIG_CRYPTO_DEV_SP_CCP=y -CONFIG_CRYPTO_DEV_CCP_CRYPTO=m -CONFIG_CRYPTO_DEV_SP_PSP=y -CONFIG_CRYPTO_DEV_QAT=m -CONFIG_CRYPTO_DEV_QAT_DH895xCC=m -CONFIG_CRYPTO_DEV_QAT_C3XXX=m -CONFIG_CRYPTO_DEV_QAT_C62X=m -CONFIG_CRYPTO_DEV_QAT_DH895xCCVF=m -CONFIG_CRYPTO_DEV_QAT_C3XXXVF=m -CONFIG_CRYPTO_DEV_QAT_C62XVF=m -CONFIG_CRYPTO_DEV_NITROX=m -CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m -CONFIG_CRYPTO_DEV_CHELSIO=m -CONFIG_CHELSIO_IPSEC_INLINE=y -CONFIG_CRYPTO_DEV_CHELSIO_TLS=m -CONFIG_CRYPTO_DEV_VIRTIO=m -CONFIG_ASYMMETRIC_KEY_TYPE=y -CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y -CONFIG_X509_CERTIFICATE_PARSER=y -CONFIG_PKCS7_MESSAGE_PARSER=y -CONFIG_PKCS7_TEST_KEY=m -CONFIG_SIGNED_PE_FILE_VERIFICATION=y - -# -# Certificates for signature checking -# -CONFIG_MODULE_SIG_KEY="certs/signing_key.pem" -CONFIG_SYSTEM_TRUSTED_KEYRING=y -CONFIG_SYSTEM_TRUSTED_KEYS="" -# CONFIG_SYSTEM_EXTRA_CERTIFICATE is not set -CONFIG_SECONDARY_TRUSTED_KEYRING=y -CONFIG_SYSTEM_BLACKLIST_KEYRING=y -CONFIG_SYSTEM_BLACKLIST_HASH_LIST="" -CONFIG_BINARY_PRINTF=y - -# -# Library routines -# -CONFIG_RAID6_PQ=m -CONFIG_BITREVERSE=y -CONFIG_RATIONAL=y -CONFIG_GENERIC_STRNCPY_FROM_USER=y -CONFIG_GENERIC_STRNLEN_USER=y -CONFIG_GENERIC_NET_UTILS=y -CONFIG_GENERIC_FIND_FIRST_BIT=y -CONFIG_GENERIC_PCI_IOMAP=y -CONFIG_GENERIC_IOMAP=y -CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y -CONFIG_ARCH_HAS_FAST_MULTIPLIER=y -CONFIG_CRC_CCITT=m -CONFIG_CRC16=m -CONFIG_CRC_T10DIF=y -CONFIG_CRC_ITU_T=m -CONFIG_CRC32=y -# CONFIG_CRC32_SELFTEST is not set -CONFIG_CRC32_SLICEBY8=y -# CONFIG_CRC32_SLICEBY4 is not set -# CONFIG_CRC32_SARWATE is not set -# CONFIG_CRC32_BIT is not set -CONFIG_CRC64=m -CONFIG_CRC4=m -CONFIG_CRC7=m -CONFIG_LIBCRC32C=m -CONFIG_CRC8=m -CONFIG_XXHASH=y -# CONFIG_RANDOM32_SELFTEST is not set -CONFIG_842_COMPRESS=y -CONFIG_842_DECOMPRESS=y -CONFIG_ZLIB_INFLATE=y -CONFIG_ZLIB_DEFLATE=y -CONFIG_LZO_COMPRESS=y -CONFIG_LZO_DECOMPRESS=y -CONFIG_LZ4_COMPRESS=y -CONFIG_LZ4HC_COMPRESS=y -CONFIG_LZ4_DECOMPRESS=y -CONFIG_ZSTD_COMPRESS=y -CONFIG_ZSTD_DECOMPRESS=y -CONFIG_XZ_DEC=y -CONFIG_XZ_DEC_X86=y -CONFIG_XZ_DEC_POWERPC=y -CONFIG_XZ_DEC_IA64=y -CONFIG_XZ_DEC_ARM=y -CONFIG_XZ_DEC_ARMTHUMB=y -CONFIG_XZ_DEC_SPARC=y -CONFIG_XZ_DEC_BCJ=y -# CONFIG_XZ_DEC_TEST is not set -CONFIG_DECOMPRESS_GZIP=y -CONFIG_DECOMPRESS_BZIP2=y -CONFIG_DECOMPRESS_LZMA=y -CONFIG_DECOMPRESS_XZ=y -CONFIG_DECOMPRESS_LZO=y -CONFIG_DECOMPRESS_LZ4=y -CONFIG_GENERIC_ALLOCATOR=y -CONFIG_REED_SOLOMON=m -CONFIG_REED_SOLOMON_ENC8=y -CONFIG_REED_SOLOMON_DEC8=y -CONFIG_REED_SOLOMON_DEC16=y -CONFIG_BCH=m -CONFIG_BCH_CONST_PARAMS=y -CONFIG_TEXTSEARCH=y -CONFIG_TEXTSEARCH_KMP=m -CONFIG_TEXTSEARCH_BM=m -CONFIG_TEXTSEARCH_FSM=m -CONFIG_BTREE=y -CONFIG_INTERVAL_TREE=y -CONFIG_RADIX_TREE_MULTIORDER=y -CONFIG_ASSOCIATIVE_ARRAY=y -CONFIG_HAS_IOMEM=y -CONFIG_HAS_IOPORT_MAP=y -CONFIG_HAS_DMA=y -CONFIG_NEED_SG_DMA_LENGTH=y -CONFIG_NEED_DMA_MAP_STATE=y -CONFIG_ARCH_DMA_ADDR_T_64BIT=y -CONFIG_DMA_DIRECT_OPS=y -CONFIG_DMA_VIRT_OPS=y -CONFIG_SWIOTLB=y -CONFIG_SGL_ALLOC=y -CONFIG_IOMMU_HELPER=y -CONFIG_CHECK_SIGNATURE=y -CONFIG_CPUMASK_OFFSTACK=y -CONFIG_CPU_RMAP=y -CONFIG_DQL=y -CONFIG_GLOB=y -# CONFIG_GLOB_SELFTEST is not set -CONFIG_NLATTR=y -CONFIG_LRU_CACHE=m -CONFIG_CLZ_TAB=y -CONFIG_CORDIC=m -CONFIG_DDR=y -CONFIG_IRQ_POLL=y -CONFIG_MPILIB=y -CONFIG_OID_REGISTRY=y -CONFIG_UCS2_STRING=y -CONFIG_FONT_SUPPORT=y -CONFIG_FONTS=y -CONFIG_FONT_8x8=y -CONFIG_FONT_8x16=y -# CONFIG_FONT_6x11 is not set -# CONFIG_FONT_7x14 is not set -# CONFIG_FONT_PEARL_8x8 is not set -# CONFIG_FONT_ACORN_8x8 is not set -# CONFIG_FONT_MINI_4x6 is not set -# CONFIG_FONT_6x10 is not set -# CONFIG_FONT_10x18 is not set -# CONFIG_FONT_SUN8x16 is not set -# CONFIG_FONT_SUN12x22 is not set -CONFIG_SG_POOL=y -CONFIG_ARCH_HAS_SG_CHAIN=y -CONFIG_ARCH_HAS_PMEM_API=y -CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y -CONFIG_ARCH_HAS_UACCESS_MCSAFE=y -CONFIG_SBITMAP=y -CONFIG_PARMAN=m -CONFIG_PRIME_NUMBERS=m -# CONFIG_STRING_SELFTEST is not set - -# -# Kernel hacking -# - -# -# printk and dmesg options -# -# CONFIG_PRINTK_TIME is not set -CONFIG_CONSOLE_LOGLEVEL_DEFAULT=1 -CONFIG_CONSOLE_LOGLEVEL_QUIET=4 -CONFIG_MESSAGE_LOGLEVEL_DEFAULT=1 -# CONFIG_BOOT_PRINTK_DELAY is not set -# CONFIG_DYNAMIC_DEBUG is not set - -# -# Compile-time checks and compiler options -# -# CONFIG_DEBUG_INFO is not set -# CONFIG_ENABLE_MUST_CHECK is not set -CONFIG_FRAME_WARN=0 -CONFIG_STRIP_ASM_SYMS=y -# CONFIG_READABLE_ASM is not set -# CONFIG_UNUSED_SYMBOLS is not set -# CONFIG_PAGE_OWNER is not set -CONFIG_DEBUG_FS=y -# CONFIG_HEADERS_CHECK is not set -# CONFIG_DEBUG_SECTION_MISMATCH is not set -CONFIG_SECTION_MISMATCH_WARN_ONLY=y -CONFIG_STACK_VALIDATION=y -# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set -CONFIG_MAGIC_SYSRQ=y -CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 -CONFIG_MAGIC_SYSRQ_SERIAL=y -CONFIG_DEBUG_KERNEL=y - -# -# Memory Debugging -# -# CONFIG_PAGE_EXTENSION is not set -# CONFIG_DEBUG_PAGEALLOC is not set -# CONFIG_PAGE_POISONING is not set -# CONFIG_DEBUG_PAGE_REF is not set -# CONFIG_DEBUG_RODATA_TEST is not set -# CONFIG_DEBUG_OBJECTS is not set -CONFIG_SLUB_DEBUG_ON=y -# CONFIG_SLUB_STATS is not set -CONFIG_HAVE_DEBUG_KMEMLEAK=y -# CONFIG_DEBUG_KMEMLEAK is not set -# CONFIG_DEBUG_STACK_USAGE is not set -# CONFIG_DEBUG_VM is not set -CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y -# CONFIG_DEBUG_VIRTUAL is not set -CONFIG_DEBUG_MEMORY_INIT=y -# CONFIG_DEBUG_PER_CPU_MAPS is not set -CONFIG_HAVE_DEBUG_STACKOVERFLOW=y -# CONFIG_DEBUG_STACKOVERFLOW is not set -CONFIG_HAVE_ARCH_KASAN=y -# CONFIG_KASAN is not set -CONFIG_ARCH_HAS_KCOV=y -CONFIG_CC_HAS_SANCOV_TRACE_PC=y -# CONFIG_KCOV is not set -# CONFIG_DEBUG_SHIRQ is not set - -# -# Debug Lockups and Hangs -# -# CONFIG_SOFTLOCKUP_DETECTOR is not set -CONFIG_HARDLOCKUP_CHECK_TIMESTAMP=y -# CONFIG_HARDLOCKUP_DETECTOR is not set -# CONFIG_DETECT_HUNG_TASK is not set -# CONFIG_WQ_WATCHDOG is not set -# CONFIG_PANIC_ON_OOPS is not set -CONFIG_PANIC_ON_OOPS_VALUE=0 -CONFIG_PANIC_TIMEOUT=0 -CONFIG_SCHED_DEBUG=y -CONFIG_SCHED_INFO=y -CONFIG_SCHEDSTATS=y -CONFIG_SCHED_STACK_END_CHECK=y -# CONFIG_DEBUG_TIMEKEEPING is not set -# CONFIG_DEBUG_PREEMPT is not set - -# -# Lock Debugging (spinlocks, mutexes, etc...) -# -CONFIG_LOCK_DEBUGGING_SUPPORT=y -# CONFIG_PROVE_LOCKING is not set -# CONFIG_LOCK_STAT is not set -# CONFIG_DEBUG_RT_MUTEXES is not set -# CONFIG_DEBUG_SPINLOCK is not set -# CONFIG_DEBUG_MUTEXES is not set -# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set -# CONFIG_DEBUG_RWSEMS is not set -# CONFIG_DEBUG_LOCK_ALLOC is not set -# CONFIG_DEBUG_ATOMIC_SLEEP is not set -# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set -# CONFIG_LOCK_TORTURE_TEST is not set -# CONFIG_WW_MUTEX_SELFTEST is not set -CONFIG_STACKTRACE=y -# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set -# CONFIG_DEBUG_KOBJECT is not set -CONFIG_DEBUG_BUGVERBOSE=y -CONFIG_DEBUG_LIST=y -CONFIG_DEBUG_PI_LIST=y -CONFIG_DEBUG_SG=y -CONFIG_DEBUG_NOTIFIERS=y -CONFIG_DEBUG_CREDENTIALS=y - -# -# RCU Debugging -# -CONFIG_TORTURE_TEST=m -CONFIG_RCU_PERF_TEST=m -# CONFIG_RCU_TORTURE_TEST is not set -CONFIG_RCU_CPU_STALL_TIMEOUT=60 -# CONFIG_RCU_TRACE is not set -# CONFIG_RCU_EQS_DEBUG is not set -# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set -# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set -# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set -# CONFIG_NOTIFIER_ERROR_INJECTION is not set -CONFIG_FUNCTION_ERROR_INJECTION=y -# CONFIG_FAULT_INJECTION is not set -CONFIG_LATENCYTOP=y -CONFIG_USER_STACKTRACE_SUPPORT=y -CONFIG_NOP_TRACER=y -CONFIG_HAVE_FUNCTION_TRACER=y -CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y -CONFIG_HAVE_DYNAMIC_FTRACE=y -CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y -CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y -CONFIG_HAVE_SYSCALL_TRACEPOINTS=y -CONFIG_HAVE_FENTRY=y -CONFIG_HAVE_C_RECORDMCOUNT=y -CONFIG_TRACE_CLOCK=y -CONFIG_RING_BUFFER=y -CONFIG_EVENT_TRACING=y -CONFIG_CONTEXT_SWITCH_TRACER=y -CONFIG_TRACING=y -CONFIG_GENERIC_TRACER=y -CONFIG_TRACING_SUPPORT=y -CONFIG_FTRACE=y -CONFIG_FUNCTION_TRACER=y -# CONFIG_FUNCTION_GRAPH_TRACER is not set -# CONFIG_PREEMPTIRQ_EVENTS is not set -# CONFIG_IRQSOFF_TRACER is not set -# CONFIG_PREEMPT_TRACER is not set -# CONFIG_SCHED_TRACER is not set -# CONFIG_HWLAT_TRACER is not set -CONFIG_FTRACE_SYSCALLS=y -# CONFIG_TRACER_SNAPSHOT is not set -CONFIG_BRANCH_PROFILE_NONE=y -# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set -CONFIG_STACK_TRACER=y -CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_KPROBE_EVENTS=y -# CONFIG_KPROBE_EVENTS_ON_NOTRACE is not set -# CONFIG_UPROBE_EVENTS is not set -CONFIG_BPF_EVENTS=y -CONFIG_PROBE_EVENTS=y -CONFIG_DYNAMIC_FTRACE=y -CONFIG_DYNAMIC_FTRACE_WITH_REGS=y -CONFIG_FUNCTION_PROFILER=y -# CONFIG_BPF_KPROBE_OVERRIDE is not set -CONFIG_FTRACE_MCOUNT_RECORD=y -# CONFIG_FTRACE_STARTUP_TEST is not set -# CONFIG_MMIOTRACE is not set -# CONFIG_HIST_TRIGGERS is not set -# CONFIG_TRACEPOINT_BENCHMARK is not set -# CONFIG_RING_BUFFER_BENCHMARK is not set -# CONFIG_RING_BUFFER_STARTUP_TEST is not set -# CONFIG_PREEMPTIRQ_DELAY_TEST is not set -# CONFIG_TRACE_EVAL_MAP_FILE is not set -CONFIG_TRACING_EVENTS_GPIO=y -# CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set -# CONFIG_DMA_API_DEBUG is not set -CONFIG_RUNTIME_TESTING_MENU=y -CONFIG_LKDTM=m -# CONFIG_TEST_LIST_SORT is not set -# CONFIG_TEST_SORT is not set -# CONFIG_KPROBES_SANITY_TEST is not set -# CONFIG_BACKTRACE_SELF_TEST is not set -# CONFIG_RBTREE_TEST is not set -# CONFIG_INTERVAL_TREE_TEST is not set -# CONFIG_PERCPU_TEST is not set -# CONFIG_ATOMIC64_SELFTEST is not set -# CONFIG_ASYNC_RAID6_TEST is not set -# CONFIG_TEST_HEXDUMP is not set -# CONFIG_TEST_STRING_HELPERS is not set -# CONFIG_TEST_KSTRTOX is not set -# CONFIG_TEST_PRINTF is not set -# CONFIG_TEST_BITMAP is not set -# CONFIG_TEST_BITFIELD is not set -# CONFIG_TEST_UUID is not set -# CONFIG_TEST_OVERFLOW is not set -# CONFIG_TEST_RHASHTABLE is not set -# CONFIG_TEST_HASH is not set -# CONFIG_TEST_IDA is not set -# CONFIG_TEST_PARMAN is not set -# CONFIG_TEST_LKM is not set -# CONFIG_TEST_USER_COPY is not set -# CONFIG_TEST_BPF is not set -# CONFIG_FIND_BIT_BENCHMARK is not set -# CONFIG_TEST_FIRMWARE is not set -# CONFIG_TEST_SYSCTL is not set -# CONFIG_TEST_UDELAY is not set -# CONFIG_TEST_STATIC_KEYS is not set -# CONFIG_TEST_KMOD is not set -CONFIG_MEMTEST=y -# CONFIG_BUG_ON_DATA_CORRUPTION is not set -# CONFIG_SAMPLES is not set -CONFIG_HAVE_ARCH_KGDB=y -# CONFIG_KGDB is not set -CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y -# CONFIG_UBSAN is not set -CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y -CONFIG_STRICT_DEVMEM=y -CONFIG_IO_STRICT_DEVMEM=y -CONFIG_TRACE_IRQFLAGS_SUPPORT=y -CONFIG_EARLY_PRINTK_USB=y -CONFIG_X86_VERBOSE_BOOTUP=y -CONFIG_EARLY_PRINTK=y -CONFIG_EARLY_PRINTK_DBGP=y -CONFIG_EARLY_PRINTK_EFI=y -CONFIG_EARLY_PRINTK_USB_XDBC=y -CONFIG_X86_PTDUMP_CORE=y -# CONFIG_X86_PTDUMP is not set -# CONFIG_EFI_PGT_DUMP is not set -CONFIG_DEBUG_WX=y -CONFIG_DOUBLEFAULT=y -# CONFIG_DEBUG_TLBFLUSH is not set -# CONFIG_IOMMU_DEBUG is not set -CONFIG_HAVE_MMIOTRACE_SUPPORT=y -# CONFIG_X86_DECODER_SELFTEST is not set -CONFIG_IO_DELAY_TYPE_0X80=0 -CONFIG_IO_DELAY_TYPE_0XED=1 -CONFIG_IO_DELAY_TYPE_UDELAY=2 -CONFIG_IO_DELAY_TYPE_NONE=3 -CONFIG_IO_DELAY_0X80=y -# CONFIG_IO_DELAY_0XED is not set -# CONFIG_IO_DELAY_UDELAY is not set -# CONFIG_IO_DELAY_NONE is not set -CONFIG_DEFAULT_IO_DELAY_TYPE=0 -# CONFIG_DEBUG_BOOT_PARAMS is not set -# CONFIG_CPA_DEBUG is not set -# CONFIG_OPTIMIZE_INLINING is not set -# CONFIG_DEBUG_ENTRY is not set -# CONFIG_DEBUG_NMI_SELFTEST is not set -CONFIG_X86_DEBUG_FPU=y -# CONFIG_PUNIT_ATOM_DEBUG is not set -CONFIG_UNWINDER_ORC=y -# CONFIG_UNWINDER_FRAME_POINTER is not set -- cgit v1.2.3