summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Sterba <dsterba@suse.com>2026-01-06 17:20:34 +0100
committerDavid Sterba <dsterba@suse.com>2026-04-07 18:56:08 +0200
commitf0d3b4c7b82b6bc8bf23be58150d49ecc51ec897 (patch)
tree291e9914e9fc4a0b29016d1c0501f8ade528777b
parentefcf0898a6d01724fc8ea15e55fc39bfb1ecf347 (diff)
downloadlinux-f0d3b4c7b82b6bc8bf23be58150d49ecc51ec897.tar.gz
linux-f0d3b4c7b82b6bc8bf23be58150d49ecc51ec897.zip
btrfs: zstd: don't cache sectorsize in a local variable
The sectorsize is used once or at most twice in the callbacks, no need to cache it on stack. Minor effect on zstd_compress_folios() where it saves 8 bytes of stack. Signed-off-by: David Sterba <dsterba@suse.com>
-rw-r--r--fs/btrfs/zstd.c12
1 files changed, 4 insertions, 8 deletions
diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c
index 41547ff187f6..128646521ea8 100644
--- a/fs/btrfs/zstd.c
+++ b/fs/btrfs/zstd.c
@@ -370,7 +370,6 @@ void zstd_free_workspace(struct list_head *ws)
struct list_head *zstd_alloc_workspace(struct btrfs_fs_info *fs_info, int level)
{
- const u32 blocksize = fs_info->sectorsize;
struct workspace *workspace;
workspace = kzalloc_obj(*workspace);
@@ -383,7 +382,7 @@ struct list_head *zstd_alloc_workspace(struct btrfs_fs_info *fs_info, int level)
workspace->req_level = level;
workspace->last_used = jiffies;
workspace->mem = kvmalloc(workspace->size, GFP_KERNEL | __GFP_NOWARN);
- workspace->buf = kmalloc(blocksize, GFP_KERNEL);
+ workspace->buf = kmalloc(fs_info->sectorsize, GFP_KERNEL);
if (!workspace->mem || !workspace->buf)
goto fail;
@@ -414,7 +413,6 @@ int zstd_compress_bio(struct list_head *ws, struct compressed_bio *cb)
const u64 start = cb->start;
const u32 len = cb->len;
const u64 end = start + len;
- const u32 blocksize = fs_info->sectorsize;
const u32 min_folio_size = btrfs_min_folio_size(fs_info);
workspace->params = zstd_get_btrfs_parameters(workspace->req_level, len);
@@ -463,7 +461,7 @@ int zstd_compress_bio(struct list_head *ws, struct compressed_bio *cb)
}
/* Check to see if we are making it bigger. */
- if (tot_in + workspace->in_buf.pos > blocksize * 2 &&
+ if (tot_in + workspace->in_buf.pos > fs_info->sectorsize * 2 &&
tot_in + workspace->in_buf.pos < tot_out + workspace->out_buf.pos) {
ret = -E2BIG;
goto out;
@@ -590,7 +588,6 @@ int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
size_t srclen = bio_get_size(&cb->bbio.bio);
zstd_dstream *stream;
int ret = 0;
- const u32 blocksize = fs_info->sectorsize;
const unsigned int min_folio_size = btrfs_min_folio_size(fs_info);
unsigned long folio_in_index = 0;
unsigned long total_folios_in = DIV_ROUND_UP(srclen, min_folio_size);
@@ -620,7 +617,7 @@ int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
workspace->out_buf.dst = workspace->buf;
workspace->out_buf.pos = 0;
- workspace->out_buf.size = blocksize;
+ workspace->out_buf.size = fs_info->sectorsize;
while (1) {
size_t ret2;
@@ -682,7 +679,6 @@ int zstd_decompress(struct list_head *ws, const u8 *data_in,
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
struct btrfs_fs_info *fs_info = btrfs_sb(folio_inode(dest_folio)->i_sb);
- const u32 sectorsize = fs_info->sectorsize;
zstd_dstream *stream;
int ret = 0;
unsigned long to_copy = 0;
@@ -706,7 +702,7 @@ int zstd_decompress(struct list_head *ws, const u8 *data_in,
workspace->out_buf.dst = workspace->buf;
workspace->out_buf.pos = 0;
- workspace->out_buf.size = sectorsize;
+ workspace->out_buf.size = fs_info->sectorsize;
/*
* Since both input and output buffers should not exceed one sector,