summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/extent_io.c8
-rw-r--r--fs/btrfs/file.c10
-rw-r--r--fs/btrfs/inode.c5
-rw-r--r--fs/btrfs/qgroup.c4
-rw-r--r--fs/btrfs/scrub.c2
-rw-r--r--fs/btrfs/tree-log.c5
-rw-r--r--fs/btrfs/zoned.c60
-rw-r--r--fs/crypto/inline_crypt.c3
-rw-r--r--fs/erofs/decompressor_zstd.c11
-rw-r--r--fs/nfsd/nfs4state.c68
-rw-r--r--fs/nfsd/nfs4xdr.c5
-rw-r--r--fs/nfsd/nfsd.h1
-rw-r--r--fs/nfsd/nfsfh.c6
-rw-r--r--fs/nfsd/xdr4.h3
-rw-r--r--fs/nilfs2/segment.c7
-rw-r--r--fs/proc/generic.c12
-rw-r--r--fs/smb/client/cached_dir.c16
-rw-r--r--fs/smb/client/smb2inode.c2
-rw-r--r--fs/smb/client/smb2pdu.c7
-rw-r--r--fs/smb/server/transport_rdma.c38
-rw-r--r--fs/smb/server/transport_tcp.c5
-rw-r--r--fs/xfs/xfs_discard.c4
-rw-r--r--fs/xfs/xfs_iomap.c82
-rw-r--r--fs/xfs/xfs_zone_alloc.c6
24 files changed, 270 insertions, 100 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 755ec6dfd51c..23273d0e6f22 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2228,6 +2228,14 @@ static noinline_for_stack void write_one_eb(struct extent_buffer *eb,
wbc_account_cgroup_owner(wbc, folio, range_len);
folio_unlock(folio);
}
+ /*
+ * If the fs is already in error status, do not submit any writeback
+ * but immediately finish it.
+ */
+ if (unlikely(BTRFS_FS_ERROR(fs_info))) {
+ btrfs_bio_end_io(bbio, errno_to_blk_status(BTRFS_FS_ERROR(fs_info)));
+ return;
+ }
btrfs_submit_bbio(bbio, 0);
}
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 7efd1f8a1912..fa82def46e39 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -2854,12 +2854,22 @@ static int btrfs_fallocate_update_isize(struct inode *inode,
{
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(inode)->root;
+ u64 range_start;
+ u64 range_end;
int ret;
int ret2;
if (mode & FALLOC_FL_KEEP_SIZE || end <= i_size_read(inode))
return 0;
+ range_start = round_down(i_size_read(inode), root->fs_info->sectorsize);
+ range_end = round_up(end, root->fs_info->sectorsize);
+
+ ret = btrfs_inode_set_file_extent_range(BTRFS_I(inode), range_start,
+ range_end - range_start);
+ if (ret)
+ return ret;
+
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans))
return PTR_ERR(trans);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 3b1b3a0553ee..6282911e536f 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -177,8 +177,10 @@ static int data_reloc_print_warning_inode(u64 inum, u64 offset, u64 num_bytes,
return ret;
}
ret = paths_from_inode(inum, ipath);
- if (ret < 0)
+ if (ret < 0) {
+ btrfs_put_root(local_root);
goto err;
+ }
/*
* We deliberately ignore the bit ipath might have been too small to
@@ -6873,7 +6875,6 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
BTRFS_I(inode)->dir_index = 0ULL;
inode_inc_iversion(inode);
inode_set_ctime_current(inode);
- set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags);
ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
&fname.disk_name, 1, index);
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 1175b8192cd7..31ad8580322a 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1539,8 +1539,10 @@ int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, u64 dst
ASSERT(prealloc);
/* Check the level of src and dst first */
- if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst))
+ if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst)) {
+ kfree(prealloc);
return -EINVAL;
+ }
mutex_lock(&fs_info->qgroup_ioctl_lock);
if (!fs_info->quota_root) {
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 651b11884f82..ba20d9286a34 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -2203,6 +2203,7 @@ static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, full_stripe_start,
&length, &bioc, NULL, NULL);
if (ret < 0) {
+ bio_put(bio);
btrfs_put_bioc(bioc);
btrfs_bio_counter_dec(fs_info);
goto out;
@@ -2212,6 +2213,7 @@ static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
btrfs_put_bioc(bioc);
if (!rbio) {
ret = -ENOMEM;
+ bio_put(bio);
btrfs_bio_counter_dec(fs_info);
goto out;
}
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 621e0df097e3..30f3c3b849c1 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -7122,7 +7122,7 @@ log_extents:
* a power failure unless the log was synced as part of an fsync
* against any other unrelated inode.
*/
- if (inode_only != LOG_INODE_EXISTS)
+ if (!ctx->logging_new_name && inode_only != LOG_INODE_EXISTS)
inode->last_log_commit = inode->last_sub_trans;
spin_unlock(&inode->lock);
@@ -7910,6 +7910,9 @@ void btrfs_log_new_name(struct btrfs_trans_handle *trans,
bool log_pinned = false;
int ret;
+ /* The inode has a new name (ref/extref), so make sure we log it. */
+ set_bit(BTRFS_INODE_COPY_EVERYTHING, &inode->runtime_flags);
+
btrfs_init_log_ctx(&ctx, inode);
ctx.logging_new_name = true;
diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
index 0ea0df18a8e4..d1db7fa1fe58 100644
--- a/fs/btrfs/zoned.c
+++ b/fs/btrfs/zoned.c
@@ -1317,6 +1317,7 @@ static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx,
if (!btrfs_dev_is_sequential(device, info->physical)) {
up_read(&dev_replace->rwsem);
info->alloc_offset = WP_CONVENTIONAL;
+ info->capacity = device->zone_info->zone_size;
return 0;
}
@@ -1522,6 +1523,8 @@ static int btrfs_load_block_group_raid0(struct btrfs_block_group *bg,
u64 last_alloc)
{
struct btrfs_fs_info *fs_info = bg->fs_info;
+ u64 stripe_nr = 0, stripe_offset = 0;
+ u32 stripe_index = 0;
if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
@@ -1529,28 +1532,26 @@ static int btrfs_load_block_group_raid0(struct btrfs_block_group *bg,
return -EINVAL;
}
+ if (last_alloc) {
+ u32 factor = map->num_stripes;
+
+ stripe_nr = last_alloc >> BTRFS_STRIPE_LEN_SHIFT;
+ stripe_offset = last_alloc & BTRFS_STRIPE_LEN_MASK;
+ stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
+ }
+
for (int i = 0; i < map->num_stripes; i++) {
if (zone_info[i].alloc_offset == WP_MISSING_DEV)
continue;
if (zone_info[i].alloc_offset == WP_CONVENTIONAL) {
- u64 stripe_nr, full_stripe_nr;
- u64 stripe_offset;
- int stripe_index;
- stripe_nr = div64_u64(last_alloc, map->stripe_size);
- stripe_offset = stripe_nr * map->stripe_size;
- full_stripe_nr = div_u64(stripe_nr, map->num_stripes);
- div_u64_rem(stripe_nr, map->num_stripes, &stripe_index);
-
- zone_info[i].alloc_offset =
- full_stripe_nr * map->stripe_size;
+ zone_info[i].alloc_offset = btrfs_stripe_nr_to_offset(stripe_nr);
if (stripe_index > i)
- zone_info[i].alloc_offset += map->stripe_size;
+ zone_info[i].alloc_offset += BTRFS_STRIPE_LEN;
else if (stripe_index == i)
- zone_info[i].alloc_offset +=
- (last_alloc - stripe_offset);
+ zone_info[i].alloc_offset += stripe_offset;
}
if (test_bit(0, active) != test_bit(i, active)) {
@@ -1574,6 +1575,8 @@ static int btrfs_load_block_group_raid10(struct btrfs_block_group *bg,
u64 last_alloc)
{
struct btrfs_fs_info *fs_info = bg->fs_info;
+ u64 stripe_nr = 0, stripe_offset = 0;
+ u32 stripe_index = 0;
if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
@@ -1581,6 +1584,14 @@ static int btrfs_load_block_group_raid10(struct btrfs_block_group *bg,
return -EINVAL;
}
+ if (last_alloc) {
+ u32 factor = map->num_stripes / map->sub_stripes;
+
+ stripe_nr = last_alloc >> BTRFS_STRIPE_LEN_SHIFT;
+ stripe_offset = last_alloc & BTRFS_STRIPE_LEN_MASK;
+ stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
+ }
+
for (int i = 0; i < map->num_stripes; i++) {
if (zone_info[i].alloc_offset == WP_MISSING_DEV)
continue;
@@ -1594,26 +1605,12 @@ static int btrfs_load_block_group_raid10(struct btrfs_block_group *bg,
}
if (zone_info[i].alloc_offset == WP_CONVENTIONAL) {
- u64 stripe_nr, full_stripe_nr;
- u64 stripe_offset;
- int stripe_index;
-
- stripe_nr = div64_u64(last_alloc, map->stripe_size);
- stripe_offset = stripe_nr * map->stripe_size;
- full_stripe_nr = div_u64(stripe_nr,
- map->num_stripes / map->sub_stripes);
- div_u64_rem(stripe_nr,
- (map->num_stripes / map->sub_stripes),
- &stripe_index);
-
- zone_info[i].alloc_offset =
- full_stripe_nr * map->stripe_size;
+ zone_info[i].alloc_offset = btrfs_stripe_nr_to_offset(stripe_nr);
if (stripe_index > (i / map->sub_stripes))
- zone_info[i].alloc_offset += map->stripe_size;
+ zone_info[i].alloc_offset += BTRFS_STRIPE_LEN;
else if (stripe_index == (i / map->sub_stripes))
- zone_info[i].alloc_offset +=
- (last_alloc - stripe_offset);
+ zone_info[i].alloc_offset += stripe_offset;
}
if ((i % map->sub_stripes) == 0) {
@@ -1683,8 +1680,6 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
set_bit(BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, &cache->runtime_flags);
if (num_conventional > 0) {
- /* Zone capacity is always zone size in emulation */
- cache->zone_capacity = cache->length;
ret = calculate_alloc_pointer(cache, &last_alloc, new);
if (ret) {
btrfs_err(fs_info,
@@ -1693,6 +1688,7 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
goto out;
} else if (map->num_stripes == num_conventional) {
cache->alloc_offset = last_alloc;
+ cache->zone_capacity = cache->length;
set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags);
goto out;
}
diff --git a/fs/crypto/inline_crypt.c b/fs/crypto/inline_crypt.c
index 5dee7c498bc8..ed6e926226b5 100644
--- a/fs/crypto/inline_crypt.c
+++ b/fs/crypto/inline_crypt.c
@@ -333,8 +333,7 @@ static bool bh_get_inode_and_lblk_num(const struct buffer_head *bh,
inode = mapping->host;
*inode_ret = inode;
- *lblk_num_ret = ((u64)folio->index << (PAGE_SHIFT - inode->i_blkbits)) +
- (bh_offset(bh) >> inode->i_blkbits);
+ *lblk_num_ret = (folio_pos(folio) + bh_offset(bh)) >> inode->i_blkbits;
return true;
}
diff --git a/fs/erofs/decompressor_zstd.c b/fs/erofs/decompressor_zstd.c
index b4bfe14229f9..e38d93bb2104 100644
--- a/fs/erofs/decompressor_zstd.c
+++ b/fs/erofs/decompressor_zstd.c
@@ -172,7 +172,6 @@ static int z_erofs_zstd_decompress(struct z_erofs_decompress_req *rq,
dctx.bounce = strm->bounce;
do {
- dctx.avail_out = out_buf.size - out_buf.pos;
dctx.inbuf_sz = in_buf.size;
dctx.inbuf_pos = in_buf.pos;
err = z_erofs_stream_switch_bufs(&dctx, &out_buf.dst,
@@ -188,14 +187,18 @@ static int z_erofs_zstd_decompress(struct z_erofs_decompress_req *rq,
in_buf.pos = dctx.inbuf_pos;
zerr = zstd_decompress_stream(stream, &out_buf, &in_buf);
- if (zstd_is_error(zerr) || (!zerr && rq->outputsize)) {
+ dctx.avail_out = out_buf.size - out_buf.pos;
+ if (zstd_is_error(zerr) ||
+ ((rq->outputsize + dctx.avail_out) && (!zerr || (zerr > 0 &&
+ !(rq->inputsize + in_buf.size - in_buf.pos))))) {
erofs_err(sb, "failed to decompress in[%u] out[%u]: %s",
rq->inputsize, rq->outputsize,
- zerr ? zstd_get_error_name(zerr) : "unexpected end of stream");
+ zstd_is_error(zerr) ? zstd_get_error_name(zerr) :
+ "unexpected end of stream");
err = -EFSCORRUPTED;
break;
}
- } while (rq->outputsize || out_buf.pos < out_buf.size);
+ } while (rq->outputsize + dctx.avail_out);
if (dctx.kout)
kunmap_local(dctx.kout);
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index c1b54322c412..8a6960500217 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -1542,7 +1542,8 @@ static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
release_all_access(stp);
if (stp->st_stateowner)
nfs4_put_stateowner(stp->st_stateowner);
- WARN_ON(!list_empty(&stid->sc_cp_list));
+ if (!list_empty(&stid->sc_cp_list))
+ nfs4_free_cpntf_statelist(stid->sc_client->net, stid);
kmem_cache_free(stateid_slab, stid);
}
@@ -3486,7 +3487,20 @@ nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
struct nfsd4_slot *slot = resp->cstate.slot;
unsigned int base;
- dprintk("--> %s slot %p\n", __func__, slot);
+ /*
+ * RFC 5661 Section 2.10.6.1.2:
+ *
+ * Any time SEQUENCE ... returns an error ... [t]he replier MUST NOT
+ * modify the reply cache entry for the slot whenever an error is
+ * returned from SEQUENCE ...
+ *
+ * Because nfsd4_store_cache_entry is called only by
+ * nfsd4_sequence_done(), nfsd4_store_cache_entry() is called only
+ * when a SEQUENCE operation was part of the COMPOUND.
+ * nfs41_check_op_ordering() ensures SEQUENCE is the first op.
+ */
+ if (resp->opcnt == 1 && resp->cstate.status != nfs_ok)
+ return;
slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
slot->sl_opcnt = resp->opcnt;
@@ -4349,6 +4363,36 @@ static bool replay_matches_cache(struct svc_rqst *rqstp,
return true;
}
+/*
+ * Note that the response is constructed here both for the case
+ * of a new SEQUENCE request and for a replayed SEQUENCE request.
+ * We do not cache SEQUENCE responses as SEQUENCE is idempotent.
+ */
+static void nfsd4_construct_sequence_response(struct nfsd4_session *session,
+ struct nfsd4_sequence *seq)
+{
+ struct nfs4_client *clp = session->se_client;
+
+ seq->maxslots_response = max(session->se_target_maxslots,
+ seq->maxslots);
+ seq->target_maxslots = session->se_target_maxslots;
+
+ switch (clp->cl_cb_state) {
+ case NFSD4_CB_DOWN:
+ seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
+ break;
+ case NFSD4_CB_FAULT:
+ seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
+ break;
+ default:
+ seq->status_flags = 0;
+ }
+ if (!list_empty(&clp->cl_revoked))
+ seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED;
+ if (atomic_read(&clp->cl_admin_revoked))
+ seq->status_flags |= SEQ4_STATUS_ADMIN_STATE_REVOKED;
+}
+
__be32
nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
@@ -4398,6 +4442,9 @@ nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
dprintk("%s: slotid %d\n", __func__, seq->slotid);
trace_nfsd_slot_seqid_sequence(clp, seq, slot);
+
+ nfsd4_construct_sequence_response(session, seq);
+
status = check_slot_seqid(seq->seqid, slot->sl_seqid, slot->sl_flags);
if (status == nfserr_replay_cache) {
status = nfserr_seq_misordered;
@@ -4495,23 +4542,6 @@ nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
}
out:
- seq->maxslots = max(session->se_target_maxslots, seq->maxslots);
- seq->target_maxslots = session->se_target_maxslots;
-
- switch (clp->cl_cb_state) {
- case NFSD4_CB_DOWN:
- seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
- break;
- case NFSD4_CB_FAULT:
- seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
- break;
- default:
- seq->status_flags = 0;
- }
- if (!list_empty(&clp->cl_revoked))
- seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED;
- if (atomic_read(&clp->cl_admin_revoked))
- seq->status_flags |= SEQ4_STATUS_ADMIN_STATE_REVOKED;
trace_nfsd_seq4_status(rqstp, seq);
out_no_session:
if (conn)
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 6040a6145dad..67bb9c0b9fcb 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -5073,7 +5073,7 @@ nfsd4_encode_sequence(struct nfsd4_compoundres *resp, __be32 nfserr,
return nfserr;
/* Note slotid's are numbered from zero: */
/* sr_highest_slotid */
- nfserr = nfsd4_encode_slotid4(xdr, seq->maxslots - 1);
+ nfserr = nfsd4_encode_slotid4(xdr, seq->maxslots_response - 1);
if (nfserr != nfs_ok)
return nfserr;
/* sr_target_highest_slotid */
@@ -5925,8 +5925,7 @@ nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
*/
warn_on_nonidempotent_op(op);
xdr_truncate_encode(xdr, op_status_offset + XDR_UNIT);
- }
- if (so) {
+ } else if (so) {
int len = xdr->buf->len - (op_status_offset + XDR_UNIT);
so->so_replay.rp_status = op->status;
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index f19320018639..b752433c3c2c 100644
--- a/fs/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -458,6 +458,7 @@ enum {
#define NFSD4_2_SUPPORTED_ATTRS_WORD2 \
(NFSD4_1_SUPPORTED_ATTRS_WORD2 | \
FATTR4_WORD2_MODE_UMASK | \
+ FATTR4_WORD2_CLONE_BLKSIZE | \
NFSD4_2_SECURITY_ATTRS | \
FATTR4_WORD2_XATTR_SUPPORT | \
FATTR4_WORD2_TIME_DELEG_ACCESS | \
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index 3eb724ec9566..ed85dd43da18 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -269,9 +269,6 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct net *net,
dentry);
}
- fhp->fh_dentry = dentry;
- fhp->fh_export = exp;
-
switch (fhp->fh_maxsize) {
case NFS4_FHSIZE:
if (dentry->d_sb->s_export_op->flags & EXPORT_OP_NOATOMIC_ATTR)
@@ -293,6 +290,9 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct net *net,
goto out;
}
+ fhp->fh_dentry = dentry;
+ fhp->fh_export = exp;
+
return 0;
out:
exp_put(exp);
diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
index ee0570cbdd9e..1ce8e12ae335 100644
--- a/fs/nfsd/xdr4.h
+++ b/fs/nfsd/xdr4.h
@@ -574,8 +574,9 @@ struct nfsd4_sequence {
struct nfs4_sessionid sessionid; /* request/response */
u32 seqid; /* request/response */
u32 slotid; /* request/response */
- u32 maxslots; /* request/response */
+ u32 maxslots; /* request */
u32 cachethis; /* request */
+ u32 maxslots_response; /* response */
u32 target_maxslots; /* response */
u32 status_flags; /* response */
};
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index f15ca6fc400d..deee16bc9d4e 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -2768,7 +2768,12 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
if (sci->sc_task) {
wake_up(&sci->sc_wait_daemon);
- kthread_stop(sci->sc_task);
+ if (kthread_stop(sci->sc_task)) {
+ spin_lock(&sci->sc_state_lock);
+ sci->sc_task = NULL;
+ timer_shutdown_sync(&sci->sc_timer);
+ spin_unlock(&sci->sc_state_lock);
+ }
}
spin_lock(&sci->sc_state_lock);
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 176281112273..501889856461 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -698,6 +698,12 @@ void pde_put(struct proc_dir_entry *pde)
}
}
+static void pde_erase(struct proc_dir_entry *pde, struct proc_dir_entry *parent)
+{
+ rb_erase(&pde->subdir_node, &parent->subdir);
+ RB_CLEAR_NODE(&pde->subdir_node);
+}
+
/*
* Remove a /proc entry and free it if it's not currently in use.
*/
@@ -720,7 +726,7 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
WARN(1, "removing permanent /proc entry '%s'", de->name);
de = NULL;
} else {
- rb_erase(&de->subdir_node, &parent->subdir);
+ pde_erase(de, parent);
if (S_ISDIR(de->mode))
parent->nlink--;
}
@@ -764,7 +770,7 @@ int remove_proc_subtree(const char *name, struct proc_dir_entry *parent)
root->parent->name, root->name);
return -EINVAL;
}
- rb_erase(&root->subdir_node, &parent->subdir);
+ pde_erase(root, parent);
de = root;
while (1) {
@@ -776,7 +782,7 @@ int remove_proc_subtree(const char *name, struct proc_dir_entry *parent)
next->parent->name, next->name);
return -EINVAL;
}
- rb_erase(&next->subdir_node, &de->subdir);
+ pde_erase(next, de);
de = next;
continue;
}
diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c
index b8ac7b7faf61..018055fd2cdb 100644
--- a/fs/smb/client/cached_dir.c
+++ b/fs/smb/client/cached_dir.c
@@ -388,11 +388,11 @@ out:
* lease. Release one here, and the second below.
*/
cfid->has_lease = false;
- kref_put(&cfid->refcount, smb2_close_cached_fid);
+ close_cached_dir(cfid);
}
spin_unlock(&cfids->cfid_list_lock);
- kref_put(&cfid->refcount, smb2_close_cached_fid);
+ close_cached_dir(cfid);
} else {
*ret_cfid = cfid;
atomic_inc(&tcon->num_remote_opens);
@@ -438,12 +438,14 @@ int open_cached_dir_by_dentry(struct cifs_tcon *tcon,
static void
smb2_close_cached_fid(struct kref *ref)
+__releases(&cfid->cfids->cfid_list_lock)
{
struct cached_fid *cfid = container_of(ref, struct cached_fid,
refcount);
int rc;
- spin_lock(&cfid->cfids->cfid_list_lock);
+ lockdep_assert_held(&cfid->cfids->cfid_list_lock);
+
if (cfid->on_list) {
list_del(&cfid->entry);
cfid->on_list = false;
@@ -478,7 +480,7 @@ void drop_cached_dir_by_name(const unsigned int xid, struct cifs_tcon *tcon,
spin_lock(&cfid->cfids->cfid_list_lock);
if (cfid->has_lease) {
cfid->has_lease = false;
- kref_put(&cfid->refcount, smb2_close_cached_fid);
+ close_cached_dir(cfid);
}
spin_unlock(&cfid->cfids->cfid_list_lock);
close_cached_dir(cfid);
@@ -487,7 +489,7 @@ void drop_cached_dir_by_name(const unsigned int xid, struct cifs_tcon *tcon,
void close_cached_dir(struct cached_fid *cfid)
{
- kref_put(&cfid->refcount, smb2_close_cached_fid);
+ kref_put_lock(&cfid->refcount, smb2_close_cached_fid, &cfid->cfids->cfid_list_lock);
}
/*
@@ -596,7 +598,7 @@ cached_dir_offload_close(struct work_struct *work)
WARN_ON(cfid->on_list);
- kref_put(&cfid->refcount, smb2_close_cached_fid);
+ close_cached_dir(cfid);
cifs_put_tcon(tcon, netfs_trace_tcon_ref_put_cached_close);
}
@@ -762,7 +764,7 @@ static void cfids_laundromat_worker(struct work_struct *work)
* Drop the ref-count from above, either the lease-ref (if there
* was one) or the extra one acquired.
*/
- kref_put(&cfid->refcount, smb2_close_cached_fid);
+ close_cached_dir(cfid);
}
queue_delayed_work(cfid_put_wq, &cfids->laundromat_work,
dir_cache_timeout * HZ);
diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c
index 09e3fc81d7cb..69cb81fa0d3a 100644
--- a/fs/smb/client/smb2inode.c
+++ b/fs/smb/client/smb2inode.c
@@ -1294,6 +1294,8 @@ static int smb2_set_path_attr(const unsigned int xid, struct cifs_tcon *tcon,
smb2_to_name = cifs_convert_path_to_utf16(to_name, cifs_sb);
if (smb2_to_name == NULL) {
rc = -ENOMEM;
+ if (cfile)
+ cifsFileInfo_put(cfile);
goto smb2_rename_path;
}
in_iov.iov_base = smb2_to_name;
diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
index b0739a2661bf..8b4a4573e9c3 100644
--- a/fs/smb/client/smb2pdu.c
+++ b/fs/smb/client/smb2pdu.c
@@ -4054,9 +4054,12 @@ replay_again:
smb_rsp = (struct smb2_change_notify_rsp *)rsp_iov.iov_base;
- smb2_validate_iov(le16_to_cpu(smb_rsp->OutputBufferOffset),
- le32_to_cpu(smb_rsp->OutputBufferLength), &rsp_iov,
+ rc = smb2_validate_iov(le16_to_cpu(smb_rsp->OutputBufferOffset),
+ le32_to_cpu(smb_rsp->OutputBufferLength),
+ &rsp_iov,
sizeof(struct file_notify_information));
+ if (rc)
+ goto cnotify_exit;
*out_data = kmemdup((char *)smb_rsp + le16_to_cpu(smb_rsp->OutputBufferOffset),
le32_to_cpu(smb_rsp->OutputBufferLength), GFP_KERNEL);
diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c
index 7d86553fcc7c..e2be9a496154 100644
--- a/fs/smb/server/transport_rdma.c
+++ b/fs/smb/server/transport_rdma.c
@@ -334,6 +334,9 @@ smb_direct_disconnect_rdma_connection(struct smbdirect_socket *sc)
break;
case SMBDIRECT_SOCKET_CREATED:
+ sc->status = SMBDIRECT_SOCKET_DISCONNECTED;
+ break;
+
case SMBDIRECT_SOCKET_CONNECTED:
sc->status = SMBDIRECT_SOCKET_ERROR;
break;
@@ -1883,6 +1886,7 @@ static int smb_direct_accept_client(struct smbdirect_socket *sc)
static int smb_direct_prepare_negotiation(struct smbdirect_socket *sc)
{
struct smbdirect_recv_io *recvmsg;
+ bool recv_posted = false;
int ret;
WARN_ON_ONCE(sc->status != SMBDIRECT_SOCKET_CREATED);
@@ -1899,6 +1903,7 @@ static int smb_direct_prepare_negotiation(struct smbdirect_socket *sc)
pr_err("Can't post recv: %d\n", ret);
goto out_err;
}
+ recv_posted = true;
ret = smb_direct_accept_client(sc);
if (ret) {
@@ -1908,7 +1913,14 @@ static int smb_direct_prepare_negotiation(struct smbdirect_socket *sc)
return 0;
out_err:
- put_recvmsg(sc, recvmsg);
+ /*
+ * If the recv was never posted, return it to the free list.
+ * If it was posted, leave it alone so disconnect teardown can
+ * drain the QP and complete it (flush) and the completion path
+ * will unmap it exactly once.
+ */
+ if (!recv_posted)
+ put_recvmsg(sc, recvmsg);
return ret;
}
@@ -2606,7 +2618,7 @@ void ksmbd_rdma_destroy(void)
}
}
-bool ksmbd_rdma_capable_netdev(struct net_device *netdev)
+static bool ksmbd_find_rdma_capable_netdev(struct net_device *netdev)
{
struct smb_direct_device *smb_dev;
int i;
@@ -2648,6 +2660,28 @@ out:
return rdma_capable;
}
+bool ksmbd_rdma_capable_netdev(struct net_device *netdev)
+{
+ struct net_device *lower_dev;
+ struct list_head *iter;
+
+ if (ksmbd_find_rdma_capable_netdev(netdev))
+ return true;
+
+ /* check if netdev is bridge or VLAN */
+ if (netif_is_bridge_master(netdev) ||
+ netdev->priv_flags & IFF_802_1Q_VLAN)
+ netdev_for_each_lower_dev(netdev, lower_dev, iter)
+ if (ksmbd_find_rdma_capable_netdev(lower_dev))
+ return true;
+
+ /* check if netdev is IPoIB safely without layer violation */
+ if (netdev->type == ARPHRD_INFINIBAND)
+ return true;
+
+ return false;
+}
+
static const struct ksmbd_transport_ops ksmbd_smb_direct_transport_ops = {
.prepare = smb_direct_prepare,
.disconnect = smb_direct_disconnect,
diff --git a/fs/smb/server/transport_tcp.c b/fs/smb/server/transport_tcp.c
index 7a1e3dcc2cde..d2e391c29464 100644
--- a/fs/smb/server/transport_tcp.c
+++ b/fs/smb/server/transport_tcp.c
@@ -290,8 +290,11 @@ static int ksmbd_kthread_fn(void *p)
}
}
up_read(&conn_list_lock);
- if (ret == -EAGAIN)
+ if (ret == -EAGAIN) {
+ /* Per-IP limit hit: release the just-accepted socket. */
+ sock_release(client_sk);
continue;
+ }
skip_max_ip_conns_limit:
if (server_conf.max_connections &&
diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
index ee49f20875af..6917de832191 100644
--- a/fs/xfs/xfs_discard.c
+++ b/fs/xfs/xfs_discard.c
@@ -726,8 +726,10 @@ xfs_trim_rtgroup_extents(
break;
}
- if (!tr.queued)
+ if (!tr.queued) {
+ kfree(tr.extents);
break;
+ }
/*
* We hand the extent list to the discard function here so the
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index d3f6e3e42a11..490e12cb99be 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -1091,6 +1091,29 @@ const struct iomap_ops xfs_zoned_direct_write_iomap_ops = {
};
#endif /* CONFIG_XFS_RT */
+#ifdef DEBUG
+static void
+xfs_check_atomic_cow_conversion(
+ struct xfs_inode *ip,
+ xfs_fileoff_t offset_fsb,
+ xfs_filblks_t count_fsb,
+ const struct xfs_bmbt_irec *cmap)
+{
+ struct xfs_iext_cursor icur;
+ struct xfs_bmbt_irec cmap2 = { };
+
+ if (xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &cmap2))
+ xfs_trim_extent(&cmap2, offset_fsb, count_fsb);
+
+ ASSERT(cmap2.br_startoff == cmap->br_startoff);
+ ASSERT(cmap2.br_blockcount == cmap->br_blockcount);
+ ASSERT(cmap2.br_startblock == cmap->br_startblock);
+ ASSERT(cmap2.br_state == cmap->br_state);
+}
+#else
+# define xfs_check_atomic_cow_conversion(...) ((void)0)
+#endif
+
static int
xfs_atomic_write_cow_iomap_begin(
struct inode *inode,
@@ -1102,9 +1125,10 @@ xfs_atomic_write_cow_iomap_begin(
{
struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount;
- const xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
- xfs_fileoff_t end_fsb = xfs_iomap_end_fsb(mp, offset, length);
- xfs_filblks_t count_fsb = end_fsb - offset_fsb;
+ const xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
+ const xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + length);
+ const xfs_filblks_t count_fsb = end_fsb - offset_fsb;
+ xfs_filblks_t hole_count_fsb;
int nmaps = 1;
xfs_filblks_t resaligned;
struct xfs_bmbt_irec cmap;
@@ -1130,7 +1154,7 @@ xfs_atomic_write_cow_iomap_begin(
return -EAGAIN;
trace_xfs_iomap_atomic_write_cow(ip, offset, length);
-
+retry:
xfs_ilock(ip, XFS_ILOCK_EXCL);
if (!ip->i_cowfp) {
@@ -1141,14 +1165,22 @@ xfs_atomic_write_cow_iomap_begin(
if (!xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &cmap))
cmap.br_startoff = end_fsb;
if (cmap.br_startoff <= offset_fsb) {
+ if (isnullstartblock(cmap.br_startblock))
+ goto convert_delay;
+
+ /*
+ * cmap could extend outside the write range due to previous
+ * speculative preallocations. We must trim cmap to the write
+ * range because the cow fork treats written mappings to mean
+ * "write in progress".
+ */
xfs_trim_extent(&cmap, offset_fsb, count_fsb);
goto found;
}
- end_fsb = cmap.br_startoff;
- count_fsb = end_fsb - offset_fsb;
+ hole_count_fsb = cmap.br_startoff - offset_fsb;
- resaligned = xfs_aligned_fsb_count(offset_fsb, count_fsb,
+ resaligned = xfs_aligned_fsb_count(offset_fsb, hole_count_fsb,
xfs_get_cowextsz_hint(ip));
xfs_iunlock(ip, XFS_ILOCK_EXCL);
@@ -1169,8 +1201,10 @@ xfs_atomic_write_cow_iomap_begin(
if (!xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &cmap))
cmap.br_startoff = end_fsb;
if (cmap.br_startoff <= offset_fsb) {
- xfs_trim_extent(&cmap, offset_fsb, count_fsb);
xfs_trans_cancel(tp);
+ if (isnullstartblock(cmap.br_startblock))
+ goto convert_delay;
+ xfs_trim_extent(&cmap, offset_fsb, count_fsb);
goto found;
}
@@ -1182,7 +1216,7 @@ xfs_atomic_write_cow_iomap_begin(
* atomic writes to that same range will be aligned (and don't require
* this COW-based method).
*/
- error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
+ error = xfs_bmapi_write(tp, ip, offset_fsb, hole_count_fsb,
XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC |
XFS_BMAPI_EXTSZALIGN, 0, &cmap, &nmaps);
if (error) {
@@ -1195,21 +1229,43 @@ xfs_atomic_write_cow_iomap_begin(
if (error)
goto out_unlock;
+ /*
+ * cmap could map more blocks than the range we passed into bmapi_write
+ * because of EXTSZALIGN or adjacent pre-existing unwritten mappings
+ * that were merged. Trim cmap to the original write range so that we
+ * don't convert more than we were asked to do for this write.
+ */
+ xfs_trim_extent(&cmap, offset_fsb, count_fsb);
+
found:
if (cmap.br_state != XFS_EXT_NORM) {
- error = xfs_reflink_convert_cow_locked(ip, offset_fsb,
- count_fsb);
+ error = xfs_reflink_convert_cow_locked(ip, cmap.br_startoff,
+ cmap.br_blockcount);
if (error)
goto out_unlock;
cmap.br_state = XFS_EXT_NORM;
+ xfs_check_atomic_cow_conversion(ip, offset_fsb, count_fsb,
+ &cmap);
}
- length = XFS_FSB_TO_B(mp, cmap.br_startoff + cmap.br_blockcount);
- trace_xfs_iomap_found(ip, offset, length - offset, XFS_COW_FORK, &cmap);
+ trace_xfs_iomap_found(ip, offset, length, XFS_COW_FORK, &cmap);
seq = xfs_iomap_inode_sequence(ip, IOMAP_F_SHARED);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags, IOMAP_F_SHARED, seq);
+convert_delay:
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ error = xfs_bmapi_convert_delalloc(ip, XFS_COW_FORK, offset, iomap,
+ NULL);
+ if (error)
+ return error;
+
+ /*
+ * Try the lookup again, because the delalloc conversion might have
+ * turned the COW mapping into unwritten, but we need it to be in
+ * written state.
+ */
+ goto retry;
out_unlock:
xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
diff --git a/fs/xfs/xfs_zone_alloc.c b/fs/xfs/xfs_zone_alloc.c
index 040402240807..ef7a931ebde5 100644
--- a/fs/xfs/xfs_zone_alloc.c
+++ b/fs/xfs/xfs_zone_alloc.c
@@ -615,7 +615,7 @@ xfs_select_open_zone_mru(
lockdep_assert_held(&zi->zi_open_zones_lock);
list_for_each_entry_reverse(oz, &zi->zi_open_zones, oz_entry)
- if (xfs_try_use_zone(zi, file_hint, oz, false))
+ if (xfs_try_use_zone(zi, file_hint, oz, XFS_ZONE_ALLOC_OK))
return oz;
cond_resched_lock(&zi->zi_open_zones_lock);
@@ -1249,8 +1249,10 @@ xfs_mount_zones(
while ((rtg = xfs_rtgroup_next(mp, rtg))) {
error = xfs_init_zone(&iz, rtg, NULL);
- if (error)
+ if (error) {
+ xfs_rtgroup_rele(rtg);
goto out_free_zone_info;
+ }
}
}