summaryrefslogtreecommitdiff
path: root/mm/zswap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/zswap.c')
-rw-r--r--mm/zswap.c7
1 files changed, 3 insertions, 4 deletions
diff --git a/mm/zswap.c b/mm/zswap.c
index c1af782e54ec..5d0f8b13a958 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -175,7 +175,7 @@ static struct shrinker *zswap_shrinker;
* This structure contains the metadata for tracking a single compressed
* page within zswap.
*
- * swpentry - associated swap entry, the offset indexes into the red-black tree
+ * swpentry - associated swap entry, the offset indexes into the xarray
* length - the length in bytes of the compressed page data. Needed during
* decompression.
* referenced - true if the entry recently entered the zswap pool. Unset by the
@@ -879,7 +879,7 @@ static bool zswap_compress(struct page *page, struct zswap_entry *entry,
* acomp instance, then get those requests done simultaneously. but in this
* case, zswap actually does store and load page by page, there is no
* existing method to send the second page before the first page is done
- * in one thread doing zwap.
+ * in one thread doing zswap.
* but in different threads running on different cpu, we have different
* acomp instance, so multiple threads can do (de)compression in parallel.
*/
@@ -894,7 +894,6 @@ static bool zswap_compress(struct page *page, struct zswap_entry *entry,
* to the active LRU list in the case.
*/
if (comp_ret || !dlen || dlen >= PAGE_SIZE) {
- dlen = PAGE_SIZE;
if (!mem_cgroup_zswap_writeback_enabled(
folio_memcg(page_folio(page)))) {
comp_ret = comp_ret ? comp_ret : -EINVAL;
@@ -1129,7 +1128,7 @@ static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_o
*
* 1. We extract the swp_entry_t to the stack, allowing
* zswap_writeback_entry() to pin the swap entry and
- * then validate the zwap entry against that swap entry's
+ * then validate the zswap entry against that swap entry's
* tree using pointer value comparison. Only when that
* is successful can the entry be dereferenced.
*