Skip to content

Commit 5831891

Browse files
jankaratytso
authored andcommitted
mbcache: don't reclaim used entries
Do not reclaim entries that are currently used by somebody from a shrinker. Firstly, these entries are likely useful. Secondly, we will need to keep such entries to protect pending increment of xattr block refcount. CC: stable@vger.kernel.org Fixes: 82939d7 ("ext4: convert to mbcache2") Signed-off-by: Jan Kara <jack@suse.cz> Link: https://lore.kernel.org/r/20220712105436.32204-1-jack@suse.cz Signed-off-by: Theodore Ts'o <tytso@mit.edu>
1 parent b8a04fe commit 5831891

1 file changed

Lines changed: 9 additions & 1 deletion

File tree

fs/mbcache.c

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -288,7 +288,7 @@ static unsigned long mb_cache_shrink(struct mb_cache *cache,
288288
while (nr_to_scan-- && !list_empty(&cache->c_list)) {
289289
entry = list_first_entry(&cache->c_list,
290290
struct mb_cache_entry, e_list);
291-
if (entry->e_referenced) {
291+
if (entry->e_referenced || atomic_read(&entry->e_refcnt) > 2) {
292292
entry->e_referenced = 0;
293293
list_move_tail(&entry->e_list, &cache->c_list);
294294
continue;
@@ -302,6 +302,14 @@ static unsigned long mb_cache_shrink(struct mb_cache *cache,
302302
spin_unlock(&cache->c_list_lock);
303303
head = mb_cache_entry_head(cache, entry->e_key);
304304
hlist_bl_lock(head);
305+
/* Now a reliable check if the entry didn't get used... */
306+
if (atomic_read(&entry->e_refcnt) > 2) {
307+
hlist_bl_unlock(head);
308+
spin_lock(&cache->c_list_lock);
309+
list_add_tail(&entry->e_list, &cache->c_list);
310+
cache->c_entry_count++;
311+
continue;
312+
}
305313
if (!hlist_bl_unhashed(&entry->e_hash_list)) {
306314
hlist_bl_del_init(&entry->e_hash_list);
307315
atomic_dec(&entry->e_refcnt);

0 commit comments

Comments
 (0)