diff options
Diffstat (limited to 'block/blk-zoned.c')
| -rw-r--r-- | block/blk-zoned.c | 928 |
1 files changed, 699 insertions, 229 deletions
diff --git a/block/blk-zoned.c b/block/blk-zoned.c index 5e2a5788dc3b..dcc295721c2c 100644 --- a/block/blk-zoned.c +++ b/block/blk-zoned.c @@ -33,12 +33,18 @@ static const char *const zone_cond_name[] = { ZONE_COND_NAME(READONLY), ZONE_COND_NAME(FULL), ZONE_COND_NAME(OFFLINE), + ZONE_COND_NAME(ACTIVE), }; #undef ZONE_COND_NAME /* * Per-zone write plug. * @node: hlist_node structure for managing the plug using a hash table. + * @bio_list: The list of BIOs that are currently plugged. + * @bio_work: Work struct to handle issuing of plugged BIOs + * @rcu_head: RCU head to free zone write plugs with an RCU grace period. + * @disk: The gendisk the plug belongs to. + * @lock: Spinlock to atomically manipulate the plug. * @ref: Zone write plug reference counter. A zone write plug reference is * always at least 1 when the plug is hashed in the disk plug hash table. * The reference is incremented whenever a new BIO needing plugging is @@ -48,29 +54,44 @@ static const char *const zone_cond_name[] = { * reference is dropped whenever the zone of the zone write plug is reset, * finished and when the zone becomes full (last write BIO to the zone * completes). - * @lock: Spinlock to atomically manipulate the plug. * @flags: Flags indicating the plug state. * @zone_no: The number of the zone the plug is managing. * @wp_offset: The zone write pointer location relative to the start of the zone * as a number of 512B sectors. - * @bio_list: The list of BIOs that are currently plugged. - * @bio_work: Work struct to handle issuing of plugged BIOs - * @rcu_head: RCU head to free zone write plugs with an RCU grace period. - * @disk: The gendisk the plug belongs to. + * @cond: Condition of the zone */ struct blk_zone_wplug { struct hlist_node node; - refcount_t ref; - spinlock_t lock; - unsigned int flags; - unsigned int zone_no; - unsigned int wp_offset; struct bio_list bio_list; struct work_struct bio_work; struct rcu_head rcu_head; struct gendisk *disk; + spinlock_t lock; + refcount_t ref; + unsigned int flags; + unsigned int zone_no; + unsigned int wp_offset; + enum blk_zone_cond cond; }; +static inline bool disk_need_zone_resources(struct gendisk *disk) +{ + /* + * All request-based zoned devices need zone resources so that the + * block layer can automatically handle write BIO plugging. BIO-based + * device drivers (e.g. DM devices) are normally responsible for + * handling zone write ordering and do not need zone resources, unless + * the driver requires zone append emulation. + */ + return queue_is_mq(disk->queue) || + queue_emulates_zone_append(disk->queue); +} + +static inline unsigned int disk_zone_wplugs_hash_size(struct gendisk *disk) +{ + return 1U << disk->zone_wplugs_hash_bits; +} + /* * Zone write plug flags bits: * - BLK_ZONE_WPLUG_PLUGGED: Indicates that the zone write plug is plugged, @@ -109,28 +130,108 @@ const char *blk_zone_cond_str(enum blk_zone_cond zone_cond) } EXPORT_SYMBOL_GPL(blk_zone_cond_str); -struct disk_report_zones_cb_args { - struct gendisk *disk; - report_zones_cb user_cb; - void *user_data; -}; +static void blk_zone_set_cond(u8 *zones_cond, unsigned int zno, + enum blk_zone_cond cond) +{ + if (!zones_cond) + return; -static void disk_zone_wplug_sync_wp_offset(struct gendisk *disk, - struct blk_zone *zone); + switch (cond) { + case BLK_ZONE_COND_IMP_OPEN: + case BLK_ZONE_COND_EXP_OPEN: + case BLK_ZONE_COND_CLOSED: + zones_cond[zno] = BLK_ZONE_COND_ACTIVE; + return; + case BLK_ZONE_COND_NOT_WP: + case BLK_ZONE_COND_EMPTY: + case BLK_ZONE_COND_FULL: + case BLK_ZONE_COND_OFFLINE: + case BLK_ZONE_COND_READONLY: + default: + zones_cond[zno] = cond; + return; + } +} -static int disk_report_zones_cb(struct blk_zone *zone, unsigned int idx, - void *data) +static void disk_zone_set_cond(struct gendisk *disk, sector_t sector, + enum blk_zone_cond cond) { - struct disk_report_zones_cb_args *args = data; - struct gendisk *disk = args->disk; + u8 *zones_cond; - if (disk->zone_wplugs_hash) - disk_zone_wplug_sync_wp_offset(disk, zone); + rcu_read_lock(); + zones_cond = rcu_dereference(disk->zones_cond); + if (zones_cond) { + unsigned int zno = disk_zone_no(disk, sector); + + /* + * The condition of a conventional, readonly and offline zones + * never changes, so do nothing if the target zone is in one of + * these conditions. + */ + switch (zones_cond[zno]) { + case BLK_ZONE_COND_NOT_WP: + case BLK_ZONE_COND_READONLY: + case BLK_ZONE_COND_OFFLINE: + break; + default: + blk_zone_set_cond(zones_cond, zno, cond); + break; + } + } + rcu_read_unlock(); +} + +/** + * bdev_zone_is_seq - check if a sector belongs to a sequential write zone + * @bdev: block device to check + * @sector: sector number + * + * Check if @sector on @bdev is contained in a sequential write required zone. + */ +bool bdev_zone_is_seq(struct block_device *bdev, sector_t sector) +{ + struct gendisk *disk = bdev->bd_disk; + unsigned int zno = disk_zone_no(disk, sector); + bool is_seq = false; + u8 *zones_cond; + + if (!bdev_is_zoned(bdev)) + return false; + + rcu_read_lock(); + zones_cond = rcu_dereference(disk->zones_cond); + if (zones_cond && zno < disk->nr_zones) + is_seq = zones_cond[zno] != BLK_ZONE_COND_NOT_WP; + rcu_read_unlock(); - if (!args->user_cb) + return is_seq; +} +EXPORT_SYMBOL_GPL(bdev_zone_is_seq); + +/* + * Zone report arguments for block device drivers report_zones operation. + * @cb: report_zones_cb callback for each reported zone. + * @data: Private data passed to report_zones_cb. + */ +struct blk_report_zones_args { + report_zones_cb cb; + void *data; + bool report_active; +}; + +static int blkdev_do_report_zones(struct block_device *bdev, sector_t sector, + unsigned int nr_zones, + struct blk_report_zones_args *args) +{ + struct gendisk *disk = bdev->bd_disk; + + if (!bdev_is_zoned(bdev) || WARN_ON_ONCE(!disk->fops->report_zones)) + return -EOPNOTSUPP; + + if (!nr_zones || sector >= get_capacity(disk)) return 0; - return args->user_cb(zone, idx, args->user_data); + return disk->fops->report_zones(disk, sector, nr_zones, args); } /** @@ -155,22 +256,12 @@ static int disk_report_zones_cb(struct blk_zone *zone, unsigned int idx, int blkdev_report_zones(struct block_device *bdev, sector_t sector, unsigned int nr_zones, report_zones_cb cb, void *data) { - struct gendisk *disk = bdev->bd_disk; - sector_t capacity = get_capacity(disk); - struct disk_report_zones_cb_args args = { - .disk = disk, - .user_cb = cb, - .user_data = data, + struct blk_report_zones_args args = { + .cb = cb, + .data = data, }; - if (!bdev_is_zoned(bdev) || WARN_ON_ONCE(!disk->fops->report_zones)) - return -EOPNOTSUPP; - - if (!nr_zones || sector >= capacity) - return 0; - - return disk->fops->report_zones(disk, sector, nr_zones, - disk_report_zones_cb, &args); + return blkdev_do_report_zones(bdev, sector, nr_zones, &args); } EXPORT_SYMBOL_GPL(blkdev_report_zones); @@ -266,7 +357,12 @@ static int blkdev_copy_zone_to_user(struct blk_zone *zone, unsigned int idx, } /* - * BLKREPORTZONE ioctl processing. + * Mask of valid input flags for BLKREPORTZONEV2 ioctl. + */ +#define BLK_ZONE_REPV2_INPUT_FLAGS BLK_ZONE_REP_CACHED + +/* + * BLKREPORTZONE and BLKREPORTZONEV2 ioctl processing. * Called from blkdev_ioctl. */ int blkdev_report_zones_ioctl(struct block_device *bdev, unsigned int cmd, @@ -290,8 +386,22 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, unsigned int cmd, return -EINVAL; args.zones = argp + sizeof(struct blk_zone_report); - ret = blkdev_report_zones(bdev, rep.sector, rep.nr_zones, - blkdev_copy_zone_to_user, &args); + + switch (cmd) { + case BLKREPORTZONE: + ret = blkdev_report_zones(bdev, rep.sector, rep.nr_zones, + blkdev_copy_zone_to_user, &args); + break; + case BLKREPORTZONEV2: + if (rep.flags & ~BLK_ZONE_REPV2_INPUT_FLAGS) + return -EINVAL; + ret = blkdev_report_zones_cached(bdev, rep.sector, rep.nr_zones, + blkdev_copy_zone_to_user, &args); + break; + default: + return -EINVAL; + } + if (ret < 0) return ret; @@ -401,6 +511,7 @@ static bool disk_insert_zone_wplug(struct gendisk *disk, { struct blk_zone_wplug *zwplg; unsigned long flags; + u8 *zones_cond; unsigned int idx = hash_32(zwplug->zone_no, disk->zone_wplugs_hash_bits); @@ -416,6 +527,20 @@ static bool disk_insert_zone_wplug(struct gendisk *disk, return false; } } + + /* + * Set the zone condition: if we do not yet have a zones_cond array + * attached to the disk, then this is a zone write plug insert from the + * first call to blk_revalidate_disk_zones(), in which case the zone is + * necessarilly in the active condition. + */ + zones_cond = rcu_dereference_check(disk->zones_cond, + lockdep_is_held(&disk->zone_wplugs_lock)); + if (zones_cond) + zwplug->cond = zones_cond[zwplug->zone_no]; + else + zwplug->cond = BLK_ZONE_COND_ACTIVE; + hlist_add_head_rcu(&zwplug->node, &disk->zone_wplugs_hash[idx]); atomic_inc(&disk->nr_zone_wplugs); spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags); @@ -515,10 +640,15 @@ static void disk_remove_zone_wplug(struct gendisk *disk, /* * Mark the zone write plug as unhashed and drop the extra reference we - * took when the plug was inserted in the hash table. + * took when the plug was inserted in the hash table. Also update the + * disk zone condition array with the current condition of the zone + * write plug. */ zwplug->flags |= BLK_ZONE_WPLUG_UNHASHED; spin_lock_irqsave(&disk->zone_wplugs_lock, flags); + blk_zone_set_cond(rcu_dereference_check(disk->zones_cond, + lockdep_is_held(&disk->zone_wplugs_lock)), + zwplug->zone_no, zwplug->cond); hlist_del_init_rcu(&zwplug->node); atomic_dec(&disk->nr_zone_wplugs); spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags); @@ -600,7 +730,7 @@ static inline void blk_zone_wplug_bio_io_error(struct blk_zone_wplug *zwplug, bio_clear_flag(bio, BIO_ZONE_WRITE_PLUGGING); bio_io_error(bio); disk_put_zone_wplug(zwplug); - /* Drop the reference taken by disk_zone_wplug_add_bio(() */ + /* Drop the reference taken by disk_zone_wplug_add_bio(). */ blk_queue_exit(q); } @@ -621,6 +751,22 @@ static void disk_zone_wplug_abort(struct blk_zone_wplug *zwplug) } /* + * Update a zone write plug condition based on the write pointer offset. + */ +static void disk_zone_wplug_update_cond(struct gendisk *disk, + struct blk_zone_wplug *zwplug) +{ + lockdep_assert_held(&zwplug->lock); + + if (disk_zone_wplug_is_full(disk, zwplug)) + zwplug->cond = BLK_ZONE_COND_FULL; + else if (!zwplug->wp_offset) + zwplug->cond = BLK_ZONE_COND_EMPTY; + else + zwplug->cond = BLK_ZONE_COND_ACTIVE; +} + +/* * Set a zone write plug write pointer offset to the specified value. * This aborts all plugged BIOs, which is fine as this function is called for * a zone reset operation, a zone finish operation or if the zone needs a wp @@ -635,6 +781,8 @@ static void disk_zone_wplug_set_wp_offset(struct gendisk *disk, /* Update the zone write pointer and abort all plugged BIOs. */ zwplug->flags &= ~BLK_ZONE_WPLUG_NEED_WP_UPDATE; zwplug->wp_offset = wp_offset; + disk_zone_wplug_update_cond(disk, zwplug); + disk_zone_wplug_abort(zwplug); /* @@ -652,122 +800,399 @@ static unsigned int blk_zone_wp_offset(struct blk_zone *zone) case BLK_ZONE_COND_IMP_OPEN: case BLK_ZONE_COND_EXP_OPEN: case BLK_ZONE_COND_CLOSED: + case BLK_ZONE_COND_ACTIVE: return zone->wp - zone->start; - case BLK_ZONE_COND_FULL: - return zone->len; case BLK_ZONE_COND_EMPTY: return 0; + case BLK_ZONE_COND_FULL: case BLK_ZONE_COND_NOT_WP: case BLK_ZONE_COND_OFFLINE: case BLK_ZONE_COND_READONLY: default: /* - * Conventional, offline and read-only zones do not have a valid - * write pointer. + * Conventional, full, offline and read-only zones do not have + * a valid write pointer. */ return UINT_MAX; } } -static void disk_zone_wplug_sync_wp_offset(struct gendisk *disk, - struct blk_zone *zone) +static unsigned int disk_zone_wplug_sync_wp_offset(struct gendisk *disk, + struct blk_zone *zone) { struct blk_zone_wplug *zwplug; - unsigned long flags; + unsigned int wp_offset = blk_zone_wp_offset(zone); zwplug = disk_get_zone_wplug(disk, zone->start); - if (!zwplug) - return; + if (zwplug) { + unsigned long flags; - spin_lock_irqsave(&zwplug->lock, flags); - if (zwplug->flags & BLK_ZONE_WPLUG_NEED_WP_UPDATE) - disk_zone_wplug_set_wp_offset(disk, zwplug, - blk_zone_wp_offset(zone)); - spin_unlock_irqrestore(&zwplug->lock, flags); + spin_lock_irqsave(&zwplug->lock, flags); + if (zwplug->flags & BLK_ZONE_WPLUG_NEED_WP_UPDATE) + disk_zone_wplug_set_wp_offset(disk, zwplug, wp_offset); + spin_unlock_irqrestore(&zwplug->lock, flags); + disk_put_zone_wplug(zwplug); + } - disk_put_zone_wplug(zwplug); + return wp_offset; } -static int disk_zone_sync_wp_offset(struct gendisk *disk, sector_t sector) +/** + * disk_report_zone - Report one zone + * @disk: Target disk + * @zone: The zone to report + * @idx: The index of the zone in the overall zone report + * @args: report zones callback and data + * + * Description: + * Helper function for block device drivers to report one zone of a zone + * report initiated with blkdev_report_zones(). The zone being reported is + * specified by @zone and used to update, if necessary, the zone write plug + * information for the zone. If @args specifies a user callback function, + * this callback is executed. + */ +int disk_report_zone(struct gendisk *disk, struct blk_zone *zone, + unsigned int idx, struct blk_report_zones_args *args) { - struct disk_report_zones_cb_args args = { - .disk = disk, + if (args && args->report_active) { + /* + * If we come here, then this is a report zones as a fallback + * for a cached report. So collapse the implicit open, explicit + * open and closed conditions into the active zone condition. + */ + switch (zone->cond) { + case BLK_ZONE_COND_IMP_OPEN: + case BLK_ZONE_COND_EXP_OPEN: + case BLK_ZONE_COND_CLOSED: + zone->cond = BLK_ZONE_COND_ACTIVE; + break; + default: + break; + } + } + + if (disk->zone_wplugs_hash) + disk_zone_wplug_sync_wp_offset(disk, zone); + + if (args && args->cb) + return args->cb(zone, idx, args->data); + + return 0; +} +EXPORT_SYMBOL_GPL(disk_report_zone); + +static int blkdev_report_zone_cb(struct blk_zone *zone, unsigned int idx, + void *data) +{ + memcpy(data, zone, sizeof(struct blk_zone)); + return 0; +} + +static int blkdev_report_zone_fallback(struct block_device *bdev, + sector_t sector, struct blk_zone *zone) +{ + struct blk_report_zones_args args = { + .cb = blkdev_report_zone_cb, + .data = zone, + .report_active = true, }; + int error; + + error = blkdev_do_report_zones(bdev, sector, 1, &args); + if (error < 0) + return error; + if (error == 0) + return -EIO; + return 0; +} - return disk->fops->report_zones(disk, sector, 1, - disk_report_zones_cb, &args); +/* + * For devices that natively support zone append operations, we do not use zone + * write plugging for zone append writes, which makes the zone condition + * tracking invalid once zone append was used. In that case fall back to a + * regular report zones to get correct information. + */ +static inline bool blkdev_has_cached_report_zones(struct block_device *bdev) +{ + return disk_need_zone_resources(bdev->bd_disk) && + (bdev_emulates_zone_append(bdev) || + !test_bit(GD_ZONE_APPEND_USED, &bdev->bd_disk->state)); } -static bool blk_zone_wplug_handle_reset_or_finish(struct bio *bio, - unsigned int wp_offset) +/** + * blkdev_get_zone_info - Get a single zone information from cached data + * @bdev: Target block device + * @sector: Sector contained by the target zone + * @zone: zone structure to return the zone information + * + * Description: + * Get the zone information for the zone containing @sector using the zone + * write plug of the target zone, if one exist, or the disk zone condition + * array otherwise. The zone condition may be reported as being + * the BLK_ZONE_COND_ACTIVE condition for a zone that is in the implicit + * open, explicit open or closed condition. + * + * Returns 0 on success and a negative error code on failure. + */ +int blkdev_get_zone_info(struct block_device *bdev, sector_t sector, + struct blk_zone *zone) { - struct gendisk *disk = bio->bi_bdev->bd_disk; - sector_t sector = bio->bi_iter.bi_sector; + struct gendisk *disk = bdev->bd_disk; + sector_t zone_sectors = bdev_zone_sectors(bdev); struct blk_zone_wplug *zwplug; unsigned long flags; + u8 *zones_cond; - /* Conventional zones cannot be reset nor finished. */ - if (!bdev_zone_is_seq(bio->bi_bdev, sector)) { - bio_io_error(bio); - return true; + if (!bdev_is_zoned(bdev)) + return -EOPNOTSUPP; + + if (sector >= get_capacity(disk)) + return -EINVAL; + + memset(zone, 0, sizeof(*zone)); + sector = bdev_zone_start(bdev, sector); + + if (!blkdev_has_cached_report_zones(bdev)) + return blkdev_report_zone_fallback(bdev, sector, zone); + + rcu_read_lock(); + zones_cond = rcu_dereference(disk->zones_cond); + if (!disk->zone_wplugs_hash || !zones_cond) { + rcu_read_unlock(); + return blkdev_report_zone_fallback(bdev, sector, zone); } + zone->cond = zones_cond[disk_zone_no(disk, sector)]; + rcu_read_unlock(); + + zone->start = sector; + zone->len = zone_sectors; /* - * No-wait reset or finish BIOs do not make much sense as the callers - * issue these as blocking operations in most cases. To avoid issues - * the BIO execution potentially failing with BLK_STS_AGAIN, warn about - * REQ_NOWAIT being set and ignore that flag. + * If this is a conventional zone, we do not have a zone write plug and + * can report the zone immediately. */ - if (WARN_ON_ONCE(bio->bi_opf & REQ_NOWAIT)) - bio->bi_opf &= ~REQ_NOWAIT; + if (zone->cond == BLK_ZONE_COND_NOT_WP) { + zone->type = BLK_ZONE_TYPE_CONVENTIONAL; + zone->capacity = zone_sectors; + zone->wp = ULLONG_MAX; + return 0; + } + + /* + * This is a sequential write required zone. If the zone is read-only or + * offline, only set the zone write pointer to an invalid value and + * report the zone. + */ + zone->type = BLK_ZONE_TYPE_SEQWRITE_REQ; + if (disk_zone_is_last(disk, zone)) + zone->capacity = disk->last_zone_capacity; + else + zone->capacity = disk->zone_capacity; + + if (zone->cond == BLK_ZONE_COND_READONLY || + zone->cond == BLK_ZONE_COND_OFFLINE) { + zone->wp = ULLONG_MAX; + return 0; + } /* - * If we have a zone write plug, set its write pointer offset to 0 - * (reset case) or to the zone size (finish case). This will abort all - * BIOs plugged for the target zone. It is fine as resetting or - * finishing zones while writes are still in-flight will result in the + * If the zone does not have a zone write plug, it is either full or + * empty, as we otherwise would have a zone write plug for it. In this + * case, set the write pointer accordingly and report the zone. + * Otherwise, if we have a zone write plug, use it. + */ + zwplug = disk_get_zone_wplug(disk, sector); + if (!zwplug) { + if (zone->cond == BLK_ZONE_COND_FULL) + zone->wp = ULLONG_MAX; + else + zone->wp = sector; + return 0; + } + + spin_lock_irqsave(&zwplug->lock, flags); + if (zwplug->flags & BLK_ZONE_WPLUG_NEED_WP_UPDATE) { + spin_unlock_irqrestore(&zwplug->lock, flags); + disk_put_zone_wplug(zwplug); + return blkdev_report_zone_fallback(bdev, sector, zone); + } + zone->cond = zwplug->cond; + zone->wp = sector + zwplug->wp_offset; + spin_unlock_irqrestore(&zwplug->lock, flags); + + disk_put_zone_wplug(zwplug); + + return 0; +} +EXPORT_SYMBOL_GPL(blkdev_get_zone_info); + +/** + * blkdev_report_zones_cached - Get cached zones information + * @bdev: Target block device + * @sector: Sector from which to report zones + * @nr_zones: Maximum number of zones to report + * @cb: Callback function called for each reported zone + * @data: Private data for the callback function + * + * Description: + * Similar to blkdev_report_zones() but instead of calling into the low level + * device driver to get the zone report from the device, use + * blkdev_get_zone_info() to generate the report from the disk zone write + * plugs and zones condition array. Since calling this function without a + * callback does not make sense, @cb must be specified. + */ +int blkdev_report_zones_cached(struct block_device *bdev, sector_t sector, + unsigned int nr_zones, report_zones_cb cb, void *data) +{ + struct gendisk *disk = bdev->bd_disk; + sector_t capacity = get_capacity(disk); + sector_t zone_sectors = bdev_zone_sectors(bdev); + unsigned int idx = 0; + struct blk_zone zone; + int ret; + + if (!cb || !bdev_is_zoned(bdev) || + WARN_ON_ONCE(!disk->fops->report_zones)) + return -EOPNOTSUPP; + + if (!nr_zones || sector >= capacity) + return 0; + + if (!blkdev_has_cached_report_zones(bdev)) { + struct blk_report_zones_args args = { + .cb = cb, + .data = data, + .report_active = true, + }; + + return blkdev_do_report_zones(bdev, sector, nr_zones, &args); + } + + for (sector = bdev_zone_start(bdev, sector); + sector < capacity && idx < nr_zones; + sector += zone_sectors, idx++) { + ret = blkdev_get_zone_info(bdev, sector, &zone); + if (ret) + return ret; + + ret = cb(&zone, idx, data); + if (ret) + return ret; + } + + return idx; +} +EXPORT_SYMBOL_GPL(blkdev_report_zones_cached); + +static void blk_zone_reset_bio_endio(struct bio *bio) +{ + struct gendisk *disk = bio->bi_bdev->bd_disk; + sector_t sector = bio->bi_iter.bi_sector; + struct blk_zone_wplug *zwplug; + + /* + * If we have a zone write plug, set its write pointer offset to 0. + * This will abort all BIOs plugged for the target zone. It is fine as + * resetting zones while writes are still in-flight will result in the * writes failing anyway. */ zwplug = disk_get_zone_wplug(disk, sector); if (zwplug) { + unsigned long flags; + spin_lock_irqsave(&zwplug->lock, flags); - disk_zone_wplug_set_wp_offset(disk, zwplug, wp_offset); + disk_zone_wplug_set_wp_offset(disk, zwplug, 0); spin_unlock_irqrestore(&zwplug->lock, flags); disk_put_zone_wplug(zwplug); + } else { + disk_zone_set_cond(disk, sector, BLK_ZONE_COND_EMPTY); } - - return false; } -static bool blk_zone_wplug_handle_reset_all(struct bio *bio) +static void blk_zone_reset_all_bio_endio(struct bio *bio) { struct gendisk *disk = bio->bi_bdev->bd_disk; + sector_t capacity = get_capacity(disk); struct blk_zone_wplug *zwplug; unsigned long flags; sector_t sector; + unsigned int i; + + if (atomic_read(&disk->nr_zone_wplugs)) { + /* Update the condition of all zone write plugs. */ + rcu_read_lock(); + for (i = 0; i < disk_zone_wplugs_hash_size(disk); i++) { + hlist_for_each_entry_rcu(zwplug, + &disk->zone_wplugs_hash[i], + node) { + spin_lock_irqsave(&zwplug->lock, flags); + disk_zone_wplug_set_wp_offset(disk, zwplug, 0); + spin_unlock_irqrestore(&zwplug->lock, flags); + } + } + rcu_read_unlock(); + } + + /* Update the cached zone conditions. */ + for (sector = 0; sector < capacity; + sector += bdev_zone_sectors(bio->bi_bdev)) + disk_zone_set_cond(disk, sector, BLK_ZONE_COND_EMPTY); + clear_bit(GD_ZONE_APPEND_USED, &disk->state); +} + +static void blk_zone_finish_bio_endio(struct bio *bio) +{ + struct block_device *bdev = bio->bi_bdev; + struct gendisk *disk = bdev->bd_disk; + sector_t sector = bio->bi_iter.bi_sector; + struct blk_zone_wplug *zwplug; /* - * Set the write pointer offset of all zone write plugs to 0. This will - * abort all plugged BIOs. It is fine as resetting zones while writes - * are still in-flight will result in the writes failing anyway. + * If we have a zone write plug, set its write pointer offset to the + * zone size. This will abort all BIOs plugged for the target zone. It + * is fine as resetting zones while writes are still in-flight will + * result in the writes failing anyway. */ - for (sector = 0; sector < get_capacity(disk); - sector += disk->queue->limits.chunk_sectors) { - zwplug = disk_get_zone_wplug(disk, sector); - if (zwplug) { - spin_lock_irqsave(&zwplug->lock, flags); - disk_zone_wplug_set_wp_offset(disk, zwplug, 0); - spin_unlock_irqrestore(&zwplug->lock, flags); - disk_put_zone_wplug(zwplug); - } + zwplug = disk_get_zone_wplug(disk, sector); + if (zwplug) { + unsigned long flags; + + spin_lock_irqsave(&zwplug->lock, flags); + disk_zone_wplug_set_wp_offset(disk, zwplug, + bdev_zone_sectors(bdev)); + spin_unlock_irqrestore(&zwplug->lock, flags); + disk_put_zone_wplug(zwplug); + } else { + disk_zone_set_cond(disk, sector, BLK_ZONE_COND_FULL); } +} - return false; +void blk_zone_mgmt_bio_endio(struct bio *bio) +{ + /* If the BIO failed, we have nothing to do. */ + if (bio->bi_status != BLK_STS_OK) + return; + + switch (bio_op(bio)) { + case REQ_OP_ZONE_RESET: + blk_zone_reset_bio_endio(bio); + return; + case REQ_OP_ZONE_RESET_ALL: + blk_zone_reset_all_bio_endio(bio); + return; + case REQ_OP_ZONE_FINISH: + blk_zone_finish_bio_endio(bio); + return; + default: + return; + } } static void disk_zone_wplug_schedule_bio_work(struct gendisk *disk, struct blk_zone_wplug *zwplug) { + lockdep_assert_held(&zwplug->lock); + /* * Take a reference on the zone write plug and schedule the submission * of the next plugged BIO. blk_zone_wplug_bio_work() will release the @@ -782,8 +1207,6 @@ static inline void disk_zone_wplug_add_bio(struct gendisk *disk, struct blk_zone_wplug *zwplug, struct bio *bio, unsigned int nr_segs) { - bool schedule_bio_work = false; - /* * Grab an extra reference on the BIO request queue usage counter. * This reference will be reused to submit a request for the BIO for @@ -800,16 +1223,6 @@ static inline void disk_zone_wplug_add_bio(struct gendisk *disk, bio_clear_polled(bio); /* - * REQ_NOWAIT BIOs are always handled using the zone write plug BIO - * work, which can block. So clear the REQ_NOWAIT flag and schedule the - * work if this is the first BIO we are plugging. - */ - if (bio->bi_opf & REQ_NOWAIT) { - schedule_bio_work = !(zwplug->flags & BLK_ZONE_WPLUG_PLUGGED); - bio->bi_opf &= ~REQ_NOWAIT; - } - - /* * Reuse the poll cookie field to store the number of segments when * split to the hardware limits. */ @@ -824,11 +1237,6 @@ static inline void disk_zone_wplug_add_bio(struct gendisk *disk, bio_list_add(&zwplug->bio_list, bio); trace_disk_zone_wplug_add_bio(zwplug->disk->queue, zwplug->zone_no, bio->bi_iter.bi_sector, bio_sectors(bio)); - - zwplug->flags |= BLK_ZONE_WPLUG_PLUGGED; - - if (schedule_bio_work) - disk_zone_wplug_schedule_bio_work(disk, zwplug); } /* @@ -836,6 +1244,7 @@ static inline void disk_zone_wplug_add_bio(struct gendisk *disk, */ void blk_zone_write_plug_bio_merged(struct bio *bio) { + struct gendisk *disk = bio->bi_bdev->bd_disk; struct blk_zone_wplug *zwplug; unsigned long flags; @@ -857,13 +1266,13 @@ void blk_zone_write_plug_bio_merged(struct bio *bio) * have at least one request and one BIO referencing the zone write * plug. So this should not fail. */ - zwplug = disk_get_zone_wplug(bio->bi_bdev->bd_disk, - bio->bi_iter.bi_sector); + zwplug = disk_get_zone_wplug(disk, bio->bi_iter.bi_sector); if (WARN_ON_ONCE(!zwplug)) return; spin_lock_irqsave(&zwplug->lock, flags); zwplug->wp_offset += bio_sectors(bio); + disk_zone_wplug_update_cond(disk, zwplug); spin_unlock_irqrestore(&zwplug->lock, flags); } @@ -922,6 +1331,7 @@ void blk_zone_write_plug_init_request(struct request *req) /* Drop the reference taken by disk_zone_wplug_add_bio(). */ blk_queue_exit(q); zwplug->wp_offset += bio_sectors(bio); + disk_zone_wplug_update_cond(disk, zwplug); req_back_sector += bio_sectors(bio); } @@ -985,6 +1395,7 @@ static bool blk_zone_wplug_prepare_bio(struct blk_zone_wplug *zwplug, /* Advance the zone write pointer offset. */ zwplug->wp_offset += bio_sectors(bio); + disk_zone_wplug_update_cond(disk, zwplug); return true; } @@ -1036,14 +1447,17 @@ static bool blk_zone_wplug_handle_write(struct bio *bio, unsigned int nr_segs) bio_set_flag(bio, BIO_ZONE_WRITE_PLUGGING); /* - * If the zone is already plugged, add the BIO to the plug BIO list. - * Do the same for REQ_NOWAIT BIOs to ensure that we will not see a - * BLK_STS_AGAIN failure if we let the BIO execute. - * Otherwise, plug and let the BIO execute. + * Add REQ_NOWAIT BIOs to the plug list to ensure that we will not see a + * BLK_STS_AGAIN failure if we let the caller submit the BIO. */ - if ((zwplug->flags & BLK_ZONE_WPLUG_PLUGGED) || - (bio->bi_opf & REQ_NOWAIT)) - goto plug; + if (bio->bi_opf & REQ_NOWAIT) { + bio->bi_opf &= ~REQ_NOWAIT; + goto queue_bio; + } + + /* If the zone is already plugged, add the BIO to the BIO plug list. */ + if (zwplug->flags & BLK_ZONE_WPLUG_PLUGGED) + goto queue_bio; if (!blk_zone_wplug_prepare_bio(zwplug, bio)) { spin_unlock_irqrestore(&zwplug->lock, flags); @@ -1051,15 +1465,21 @@ static bool blk_zone_wplug_handle_write(struct bio *bio, unsigned int nr_segs) return true; } + /* Otherwise, plug and let the caller submit the BIO. */ zwplug->flags |= BLK_ZONE_WPLUG_PLUGGED; spin_unlock_irqrestore(&zwplug->lock, flags); return false; -plug: +queue_bio: disk_zone_wplug_add_bio(disk, zwplug, bio, nr_segs); + if (!(zwplug->flags & BLK_ZONE_WPLUG_PLUGGED)) { + zwplug->flags |= BLK_ZONE_WPLUG_PLUGGED; + disk_zone_wplug_schedule_bio_work(disk, zwplug); + } + spin_unlock_irqrestore(&zwplug->lock, flags); return true; @@ -1071,6 +1491,9 @@ static void blk_zone_wplug_handle_native_zone_append(struct bio *bio) struct blk_zone_wplug *zwplug; unsigned long flags; + if (!test_bit(GD_ZONE_APPEND_USED, &disk->state)) + set_bit(GD_ZONE_APPEND_USED, &disk->state); + /* * We have native support for zone append operations, so we are not * going to handle @bio through plugging. However, we may already have a @@ -1106,6 +1529,30 @@ static void blk_zone_wplug_handle_native_zone_append(struct bio *bio) disk_put_zone_wplug(zwplug); } +static bool blk_zone_wplug_handle_zone_mgmt(struct bio *bio) +{ + if (bio_op(bio) != REQ_OP_ZONE_RESET_ALL && + !bdev_zone_is_seq(bio->bi_bdev, bio->bi_iter.bi_sector)) { + /* + * Zone reset and zone finish operations do not apply to + * conventional zones. + */ + bio_io_error(bio); + return true; + } + + /* + * No-wait zone management BIOs do not make much sense as the callers + * issue these as blocking operations in most cases. To avoid issues + * with the BIO execution potentially failing with BLK_STS_AGAIN, warn + * about REQ_NOWAIT being set and ignore that flag. + */ + if (WARN_ON_ONCE(bio->bi_opf & REQ_NOWAIT)) + bio->bi_opf &= ~REQ_NOWAIT; + + return false; +} + /** * blk_zone_plug_bio - Handle a zone write BIO with zone write plugging * @bio: The BIO being submitted @@ -1153,12 +1600,9 @@ bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs) case REQ_OP_WRITE_ZEROES: return blk_zone_wplug_handle_write(bio, nr_segs); case REQ_OP_ZONE_RESET: - return blk_zone_wplug_handle_reset_or_finish(bio, 0); case REQ_OP_ZONE_FINISH: - return blk_zone_wplug_handle_reset_or_finish(bio, - bdev_zone_sectors(bdev)); case REQ_OP_ZONE_RESET_ALL: - return blk_zone_wplug_handle_reset_all(bio); + return blk_zone_wplug_handle_zone_mgmt(bio); default: return false; } @@ -1332,11 +1776,6 @@ put_zwplug: disk_put_zone_wplug(zwplug); } -static inline unsigned int disk_zone_wplugs_hash_size(struct gendisk *disk) -{ - return 1U << disk->zone_wplugs_hash_bits; -} - void disk_init_zone_resources(struct gendisk *disk) { spin_lock_init(&disk->zone_wplugs_lock); @@ -1415,31 +1854,30 @@ static void disk_destroy_zone_wplugs_hash_table(struct gendisk *disk) kfree(disk->zone_wplugs_hash); disk->zone_wplugs_hash = NULL; disk->zone_wplugs_hash_bits = 0; + + /* + * Wait for the zone write plugs to be RCU-freed before destroying the + * mempool. + */ + rcu_barrier(); + mempool_destroy(disk->zone_wplugs_pool); + disk->zone_wplugs_pool = NULL; } -static unsigned int disk_set_conv_zones_bitmap(struct gendisk *disk, - unsigned long *bitmap) +static void disk_set_zones_cond_array(struct gendisk *disk, u8 *zones_cond) { - unsigned int nr_conv_zones = 0; unsigned long flags; spin_lock_irqsave(&disk->zone_wplugs_lock, flags); - if (bitmap) - nr_conv_zones = bitmap_weight(bitmap, disk->nr_zones); - bitmap = rcu_replace_pointer(disk->conv_zones_bitmap, bitmap, - lockdep_is_held(&disk->zone_wplugs_lock)); + zones_cond = rcu_replace_pointer(disk->zones_cond, zones_cond, + lockdep_is_held(&disk->zone_wplugs_lock)); spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags); - kfree_rcu_mightsleep(bitmap); - - return nr_conv_zones; + kfree_rcu_mightsleep(zones_cond); } void disk_free_zone_resources(struct gendisk *disk) { - if (!disk->zone_wplugs_pool) - return; - if (disk->zone_wplugs_wq) { destroy_workqueue(disk->zone_wplugs_wq); disk->zone_wplugs_wq = NULL; @@ -1447,40 +1885,37 @@ void disk_free_zone_resources(struct gendisk *disk) disk_destroy_zone_wplugs_hash_table(disk); - /* - * Wait for the zone write plugs to be RCU-freed before - * destorying the mempool. - */ - rcu_barrier(); - - mempool_destroy(disk->zone_wplugs_pool); - disk->zone_wplugs_pool = NULL; - - disk_set_conv_zones_bitmap(disk, NULL); + disk_set_zones_cond_array(disk, NULL); disk->zone_capacity = 0; disk->last_zone_capacity = 0; disk->nr_zones = 0; } -static inline bool disk_need_zone_resources(struct gendisk *disk) -{ - /* - * All mq zoned devices need zone resources so that the block layer - * can automatically handle write BIO plugging. BIO-based device drivers - * (e.g. DM devices) are normally responsible for handling zone write - * ordering and do not need zone resources, unless the driver requires - * zone append emulation. - */ - return queue_is_mq(disk->queue) || - queue_emulates_zone_append(disk->queue); -} +struct blk_revalidate_zone_args { + struct gendisk *disk; + u8 *zones_cond; + unsigned int nr_zones; + unsigned int nr_conv_zones; + unsigned int zone_capacity; + unsigned int last_zone_capacity; + sector_t sector; +}; static int disk_revalidate_zone_resources(struct gendisk *disk, - unsigned int nr_zones) + struct blk_revalidate_zone_args *args) { struct queue_limits *lim = &disk->queue->limits; unsigned int pool_size; + args->disk = disk; + args->nr_zones = + DIV_ROUND_UP_ULL(get_capacity(disk), lim->chunk_sectors); + + /* Cached zone conditions: 1 byte per zone */ + args->zones_cond = kzalloc(args->nr_zones, GFP_NOIO); + if (!args->zones_cond) + return -ENOMEM; + if (!disk_need_zone_resources(disk)) return 0; @@ -1490,7 +1925,8 @@ static int disk_revalidate_zone_resources(struct gendisk *disk, */ pool_size = max(lim->max_open_zones, lim->max_active_zones); if (!pool_size) - pool_size = min(BLK_ZONE_WPLUG_DEFAULT_POOL_SIZE, nr_zones); + pool_size = + min(BLK_ZONE_WPLUG_DEFAULT_POOL_SIZE, args->nr_zones); if (!disk->zone_wplugs_hash) return disk_alloc_zone_resources(disk, pool_size); @@ -1498,15 +1934,6 @@ static int disk_revalidate_zone_resources(struct gendisk *disk, return 0; } -struct blk_revalidate_zone_args { - struct gendisk *disk; - unsigned long *conv_zones_bitmap; - unsigned int nr_zones; - unsigned int zone_capacity; - unsigned int last_zone_capacity; - sector_t sector; -}; - /* * Update the disk zone resources information and device queue limits. * The disk queue is frozen when this is executed. @@ -1515,30 +1942,34 @@ static int disk_update_zone_resources(struct gendisk *disk, struct blk_revalidate_zone_args *args) { struct request_queue *q = disk->queue; - unsigned int nr_seq_zones, nr_conv_zones; - unsigned int pool_size; + unsigned int nr_seq_zones; + unsigned int pool_size, memflags; struct queue_limits lim; + int ret = 0; + + lim = queue_limits_start_update(q); + + memflags = blk_mq_freeze_queue(q); disk->nr_zones = args->nr_zones; - disk->zone_capacity = args->zone_capacity; - disk->last_zone_capacity = args->last_zone_capacity; - nr_conv_zones = - disk_set_conv_zones_bitmap(disk, args->conv_zones_bitmap); - if (nr_conv_zones >= disk->nr_zones) { + if (args->nr_conv_zones >= disk->nr_zones) { pr_warn("%s: Invalid number of conventional zones %u / %u\n", - disk->disk_name, nr_conv_zones, disk->nr_zones); - return -ENODEV; + disk->disk_name, args->nr_conv_zones, disk->nr_zones); + ret = -ENODEV; + goto unfreeze; } - lim = queue_limits_start_update(q); + disk->zone_capacity = args->zone_capacity; + disk->last_zone_capacity = args->last_zone_capacity; + disk_set_zones_cond_array(disk, args->zones_cond); /* - * Some devices can advertize zone resource limits that are larger than + * Some devices can advertise zone resource limits that are larger than * the number of sequential zones of the zoned block device, e.g. a * small ZNS namespace. For such case, assume that the zoned device has * no zone resource limits. */ - nr_seq_zones = disk->nr_zones - nr_conv_zones; + nr_seq_zones = disk->nr_zones - args->nr_conv_zones; if (lim.max_open_zones >= nr_seq_zones) lim.max_open_zones = 0; if (lim.max_active_zones >= nr_seq_zones) @@ -1568,7 +1999,53 @@ static int disk_update_zone_resources(struct gendisk *disk, } commit: - return queue_limits_commit_update_frozen(q, &lim); + ret = queue_limits_commit_update(q, &lim); + +unfreeze: + if (ret) + disk_free_zone_resources(disk); + + blk_mq_unfreeze_queue(q, memflags); + + return ret; +} + +static int blk_revalidate_zone_cond(struct blk_zone *zone, unsigned int idx, + struct blk_revalidate_zone_args *args) +{ + enum blk_zone_cond cond = zone->cond; + + /* Check that the zone condition is consistent with the zone type. */ + switch (cond) { + case BLK_ZONE_COND_NOT_WP: + if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) + goto invalid_condition; + break; + case BLK_ZONE_COND_IMP_OPEN: + case BLK_ZONE_COND_EXP_OPEN: + case BLK_ZONE_COND_CLOSED: + case BLK_ZONE_COND_EMPTY: + case BLK_ZONE_COND_FULL: + case BLK_ZONE_COND_OFFLINE: + case BLK_ZONE_COND_READONLY: + if (zone->type != BLK_ZONE_TYPE_SEQWRITE_REQ) + goto invalid_condition; + break; + default: + pr_warn("%s: Invalid zone condition 0x%X\n", + args->disk->disk_name, cond); + return -ENODEV; + } + + blk_zone_set_cond(args->zones_cond, idx, cond); + + return 0; + +invalid_condition: + pr_warn("%s: Invalid zone condition 0x%x for type 0x%x\n", + args->disk->disk_name, cond, zone->type); + + return -ENODEV; } static int blk_revalidate_conv_zone(struct blk_zone *zone, unsigned int idx, @@ -1585,17 +2062,7 @@ static int blk_revalidate_conv_zone(struct blk_zone *zone, unsigned int idx, if (disk_zone_is_last(disk, zone)) args->last_zone_capacity = zone->capacity; - if (!disk_need_zone_resources(disk)) - return 0; - - if (!args->conv_zones_bitmap) { - args->conv_zones_bitmap = - bitmap_zalloc(args->nr_zones, GFP_NOIO); - if (!args->conv_zones_bitmap) - return -ENOMEM; - } - - set_bit(idx, args->conv_zones_bitmap); + args->nr_conv_zones++; return 0; } @@ -1632,9 +2099,7 @@ static int blk_revalidate_seq_zone(struct blk_zone *zone, unsigned int idx, if (!queue_emulates_zone_append(disk->queue) || !disk->zone_wplugs_hash) return 0; - disk_zone_wplug_sync_wp_offset(disk, zone); - - wp_offset = blk_zone_wp_offset(zone); + wp_offset = disk_zone_wplug_sync_wp_offset(disk, zone); if (!wp_offset || wp_offset >= zone->capacity) return 0; @@ -1693,6 +2158,11 @@ static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx, return -ENODEV; } + /* Check zone condition */ + ret = blk_revalidate_zone_cond(zone, idx, args); + if (ret) + return ret; + /* Check zone type */ switch (zone->type) { case BLK_ZONE_TYPE_CONVENTIONAL: @@ -1733,7 +2203,11 @@ int blk_revalidate_disk_zones(struct gendisk *disk) sector_t zone_sectors = q->limits.chunk_sectors; sector_t capacity = get_capacity(disk); struct blk_revalidate_zone_args args = { }; - unsigned int noio_flag; + unsigned int memflags, noio_flag; + struct blk_report_zones_args rep_args = { + .cb = blk_revalidate_zone_cb, + .data = &args, + }; int ret = -ENOMEM; if (WARN_ON_ONCE(!blk_queue_is_zoned(q))) @@ -1756,17 +2230,14 @@ int blk_revalidate_disk_zones(struct gendisk *disk) * Ensure that all memory allocations in this context are done as if * GFP_NOIO was specified. */ - args.disk = disk; - args.nr_zones = (capacity + zone_sectors - 1) >> ilog2(zone_sectors); noio_flag = memalloc_noio_save(); - ret = disk_revalidate_zone_resources(disk, args.nr_zones); + ret = disk_revalidate_zone_resources(disk, &args); if (ret) { memalloc_noio_restore(noio_flag); return ret; } - ret = disk->fops->report_zones(disk, 0, UINT_MAX, - blk_revalidate_zone_cb, &args); + ret = disk->fops->report_zones(disk, 0, UINT_MAX, &rep_args); if (!ret) { pr_warn("%s: No zones reported\n", disk->disk_name); ret = -ENODEV; @@ -1783,20 +2254,14 @@ int blk_revalidate_disk_zones(struct gendisk *disk) ret = -ENODEV; } - /* - * Set the new disk zone parameters only once the queue is frozen and - * all I/Os are completed. - */ if (ret > 0) - ret = disk_update_zone_resources(disk, &args); - else - pr_warn("%s: failed to revalidate zones\n", disk->disk_name); - if (ret) { - unsigned int memflags = blk_mq_freeze_queue(q); + return disk_update_zone_resources(disk, &args); - disk_free_zone_resources(disk); - blk_mq_unfreeze_queue(q, memflags); - } + pr_warn("%s: failed to revalidate zones\n", disk->disk_name); + + memflags = blk_mq_freeze_queue(q); + disk_free_zone_resources(disk); + blk_mq_unfreeze_queue(q, memflags); return ret; } @@ -1817,6 +2282,7 @@ EXPORT_SYMBOL_GPL(blk_revalidate_disk_zones); int blk_zone_issue_zeroout(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask) { + struct gendisk *disk = bdev->bd_disk; int ret; if (WARN_ON_ONCE(!bdev_is_zoned(bdev))) @@ -1832,7 +2298,7 @@ int blk_zone_issue_zeroout(struct block_device *bdev, sector_t sector, * pointer. Undo this using a report zone to update the zone write * pointer to the correct current value. */ - ret = disk_zone_sync_wp_offset(bdev->bd_disk, sector); + ret = disk->fops->report_zones(disk, sector, 1, NULL); if (ret != 1) return ret < 0 ? ret : -EIO; @@ -1851,18 +2317,22 @@ static void queue_zone_wplug_show(struct blk_zone_wplug *zwplug, unsigned int zwp_wp_offset, zwp_flags; unsigned int zwp_zone_no, zwp_ref; unsigned int zwp_bio_list_size; + enum blk_zone_cond zwp_cond; unsigned long flags; spin_lock_irqsave(&zwplug->lock, flags); zwp_zone_no = zwplug->zone_no; zwp_flags = zwplug->flags; zwp_ref = refcount_read(&zwplug->ref); + zwp_cond = zwplug->cond; zwp_wp_offset = zwplug->wp_offset; zwp_bio_list_size = bio_list_size(&zwplug->bio_list); spin_unlock_irqrestore(&zwplug->lock, flags); - seq_printf(m, "%u 0x%x %u %u %u\n", zwp_zone_no, zwp_flags, zwp_ref, - zwp_wp_offset, zwp_bio_list_size); + seq_printf(m, + "Zone no: %u, flags: 0x%x, ref: %u, cond: %s, wp ofst: %u, pending BIO: %u\n", + zwp_zone_no, zwp_flags, zwp_ref, blk_zone_cond_str(zwp_cond), + zwp_wp_offset, zwp_bio_list_size); } int queue_zone_wplugs_show(void *data, struct seq_file *m) |
