Skip to content

Commit 3480008

Browse files
dgchinnerdchinner
authored andcommitted
xfs: merge xfs_buf_find() and xfs_buf_get_map()
Now that we factored xfs_buf_find(), we can start separating into distinct fast and slow paths from xfs_buf_get_map(). We start by moving the lookup map and perag setup to _get_map(), and then move all the specifics of the fast path lookup into xfs_buf_lookup() and call it directly from _get_map(). We the move all the slow path code to xfs_buf_find_insert(), which is now also called directly from _get_map(). As such, xfs_buf_find() now goes away. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org>
1 parent de67dc5 commit 3480008

1 file changed

Lines changed: 93 additions & 109 deletions

File tree

fs/xfs/xfs_buf.c

Lines changed: 93 additions & 109 deletions
Original file line numberDiff line numberDiff line change
@@ -537,7 +537,6 @@ xfs_buf_find_lock(
537537
if (!xfs_buf_trylock(bp)) {
538538
if (flags & XBF_TRYLOCK) {
539539
XFS_STATS_INC(bp->b_mount, xb_busy_locked);
540-
xfs_buf_rele(bp);
541540
return -EAGAIN;
542541
}
543542
xfs_buf_lock(bp);
@@ -557,113 +556,97 @@ xfs_buf_find_lock(
557556
return 0;
558557
}
559558

560-
static inline struct xfs_buf *
559+
static inline int
561560
xfs_buf_lookup(
562561
struct xfs_perag *pag,
563-
struct xfs_buf_map *map)
562+
struct xfs_buf_map *map,
563+
xfs_buf_flags_t flags,
564+
struct xfs_buf **bpp)
564565
{
565566
struct xfs_buf *bp;
567+
int error;
566568

569+
spin_lock(&pag->pag_buf_lock);
567570
bp = rhashtable_lookup(&pag->pag_buf_hash, map, xfs_buf_hash_params);
568-
if (!bp)
569-
return NULL;
571+
if (!bp) {
572+
spin_unlock(&pag->pag_buf_lock);
573+
return -ENOENT;
574+
}
570575
atomic_inc(&bp->b_hold);
571-
return bp;
572-
}
576+
spin_unlock(&pag->pag_buf_lock);
573577

574-
/*
575-
* Insert the new_bp into the hash table. This consumes the perag reference
576-
* taken for the lookup.
577-
*/
578-
static int
579-
xfs_buf_find_insert(
580-
struct xfs_buftarg *btp,
581-
struct xfs_perag *pag,
582-
struct xfs_buf *new_bp)
583-
{
584-
/* No match found */
585-
if (!new_bp) {
586-
xfs_perag_put(pag);
587-
XFS_STATS_INC(btp->bt_mount, xb_miss_locked);
588-
return -ENOENT;
578+
error = xfs_buf_find_lock(bp, flags);
579+
if (error) {
580+
xfs_buf_rele(bp);
581+
return error;
589582
}
590583

591-
/* the buffer keeps the perag reference until it is freed */
592-
new_bp->b_pag = pag;
593-
rhashtable_insert_fast(&pag->pag_buf_hash, &new_bp->b_rhash_head,
594-
xfs_buf_hash_params);
584+
trace_xfs_buf_find(bp, flags, _RET_IP_);
585+
*bpp = bp;
595586
return 0;
596587
}
597588

598589
/*
599-
* Look up a buffer in the buffer cache and return it referenced and locked
600-
* in @found_bp.
601-
*
602-
* If @new_bp is supplied and we have a lookup miss, insert @new_bp into the
603-
* cache.
604-
*
605-
* If XBF_TRYLOCK is set in @flags, only try to lock the buffer and return
606-
* -EAGAIN if we fail to lock it.
607-
*
608-
* Return values are:
609-
* -EFSCORRUPTED if have been supplied with an invalid address
610-
* -EAGAIN on trylock failure
611-
* -ENOENT if we fail to find a match and @new_bp was NULL
612-
* 0, with @found_bp:
613-
* - @new_bp if we inserted it into the cache
614-
* - the buffer we found and locked.
590+
* Insert the new_bp into the hash table. This consumes the perag reference
591+
* taken for the lookup regardless of the result of the insert.
615592
*/
616593
static int
617-
xfs_buf_find(
594+
xfs_buf_find_insert(
618595
struct xfs_buftarg *btp,
596+
struct xfs_perag *pag,
597+
struct xfs_buf_map *cmap,
619598
struct xfs_buf_map *map,
620599
int nmaps,
621600
xfs_buf_flags_t flags,
622-
struct xfs_buf *new_bp,
623-
struct xfs_buf **found_bp)
601+
struct xfs_buf **bpp)
624602
{
625-
struct xfs_perag *pag;
603+
struct xfs_buf *new_bp;
626604
struct xfs_buf *bp;
627-
struct xfs_buf_map cmap = { .bm_bn = map[0].bm_bn };
628605
int error;
629-
int i;
630-
631-
*found_bp = NULL;
632-
633-
for (i = 0; i < nmaps; i++)
634-
cmap.bm_len += map[i].bm_len;
635606

636-
error = xfs_buf_map_verify(btp, &cmap);
607+
error = _xfs_buf_alloc(btp, map, nmaps, flags, &new_bp);
637608
if (error)
638-
return error;
609+
goto out_drop_pag;
639610

640-
pag = xfs_perag_get(btp->bt_mount,
641-
xfs_daddr_to_agno(btp->bt_mount, cmap.bm_bn));
611+
/*
612+
* For buffers that fit entirely within a single page, first attempt to
613+
* allocate the memory from the heap to minimise memory usage. If we
614+
* can't get heap memory for these small buffers, we fall back to using
615+
* the page allocator.
616+
*/
617+
if (BBTOB(new_bp->b_length) >= PAGE_SIZE ||
618+
xfs_buf_alloc_kmem(new_bp, flags) < 0) {
619+
error = xfs_buf_alloc_pages(new_bp, flags);
620+
if (error)
621+
goto out_free_buf;
622+
}
642623

643624
spin_lock(&pag->pag_buf_lock);
644-
bp = xfs_buf_lookup(pag, &cmap);
645-
if (bp)
646-
goto found;
625+
bp = rhashtable_lookup(&pag->pag_buf_hash, cmap, xfs_buf_hash_params);
626+
if (bp) {
627+
atomic_inc(&bp->b_hold);
628+
spin_unlock(&pag->pag_buf_lock);
629+
error = xfs_buf_find_lock(bp, flags);
630+
if (error)
631+
xfs_buf_rele(bp);
632+
else
633+
*bpp = bp;
634+
goto out_free_buf;
635+
}
647636

648-
error = xfs_buf_find_insert(btp, pag, new_bp);
637+
/* The buffer keeps the perag reference until it is freed. */
638+
new_bp->b_pag = pag;
639+
rhashtable_insert_fast(&pag->pag_buf_hash, &new_bp->b_rhash_head,
640+
xfs_buf_hash_params);
649641
spin_unlock(&pag->pag_buf_lock);
650-
if (error)
651-
return error;
652-
*found_bp = new_bp;
642+
*bpp = new_bp;
653643
return 0;
654644

655-
found:
656-
spin_unlock(&pag->pag_buf_lock);
645+
out_free_buf:
646+
xfs_buf_free(new_bp);
647+
out_drop_pag:
657648
xfs_perag_put(pag);
658-
659-
error = xfs_buf_find_lock(bp, flags);
660-
if (error)
661-
return error;
662-
663-
trace_xfs_buf_find(bp, flags, _RET_IP_);
664-
XFS_STATS_INC(btp->bt_mount, xb_get_locked);
665-
*found_bp = bp;
666-
return 0;
649+
return error;
667650
}
668651

669652
/*
@@ -673,54 +656,54 @@ xfs_buf_find(
673656
*/
674657
int
675658
xfs_buf_get_map(
676-
struct xfs_buftarg *target,
659+
struct xfs_buftarg *btp,
677660
struct xfs_buf_map *map,
678661
int nmaps,
679662
xfs_buf_flags_t flags,
680663
struct xfs_buf **bpp)
681664
{
682-
struct xfs_buf *bp;
683-
struct xfs_buf *new_bp;
665+
struct xfs_perag *pag;
666+
struct xfs_buf *bp = NULL;
667+
struct xfs_buf_map cmap = { .bm_bn = map[0].bm_bn };
684668
int error;
669+
int i;
685670

686-
*bpp = NULL;
687-
error = xfs_buf_find(target, map, nmaps, flags, NULL, &bp);
688-
if (!error)
689-
goto found;
690-
if (error != -ENOENT)
691-
return error;
692-
if (flags & XBF_INCORE)
693-
return -ENOENT;
671+
for (i = 0; i < nmaps; i++)
672+
cmap.bm_len += map[i].bm_len;
694673

695-
error = _xfs_buf_alloc(target, map, nmaps, flags, &new_bp);
674+
error = xfs_buf_map_verify(btp, &cmap);
696675
if (error)
697676
return error;
698677

699-
/*
700-
* For buffers that fit entirely within a single page, first attempt to
701-
* allocate the memory from the heap to minimise memory usage. If we
702-
* can't get heap memory for these small buffers, we fall back to using
703-
* the page allocator.
704-
*/
705-
if (BBTOB(new_bp->b_length) >= PAGE_SIZE ||
706-
xfs_buf_alloc_kmem(new_bp, flags) < 0) {
707-
error = xfs_buf_alloc_pages(new_bp, flags);
708-
if (error)
709-
goto out_free_buf;
710-
}
678+
pag = xfs_perag_get(btp->bt_mount,
679+
xfs_daddr_to_agno(btp->bt_mount, cmap.bm_bn));
711680

712-
error = xfs_buf_find(target, map, nmaps, flags, new_bp, &bp);
713-
if (error)
714-
goto out_free_buf;
681+
error = xfs_buf_lookup(pag, &cmap, flags, &bp);
682+
if (error && error != -ENOENT)
683+
goto out_put_perag;
715684

716-
if (bp != new_bp)
717-
xfs_buf_free(new_bp);
685+
/* cache hits always outnumber misses by at least 10:1 */
686+
if (unlikely(!bp)) {
687+
XFS_STATS_INC(btp->bt_mount, xb_miss_locked);
688+
689+
if (flags & XBF_INCORE)
690+
goto out_put_perag;
691+
692+
/* xfs_buf_find_insert() consumes the perag reference. */
693+
error = xfs_buf_find_insert(btp, pag, &cmap, map, nmaps,
694+
flags, &bp);
695+
if (error)
696+
return error;
697+
} else {
698+
XFS_STATS_INC(btp->bt_mount, xb_get_locked);
699+
xfs_perag_put(pag);
700+
}
718701

719-
found:
702+
/* We do not hold a perag reference anymore. */
720703
if (!bp->b_addr) {
721704
error = _xfs_buf_map_pages(bp, flags);
722705
if (unlikely(error)) {
723-
xfs_warn_ratelimited(target->bt_mount,
706+
xfs_warn_ratelimited(btp->bt_mount,
724707
"%s: failed to map %u pages", __func__,
725708
bp->b_page_count);
726709
xfs_buf_relse(bp);
@@ -735,12 +718,13 @@ xfs_buf_get_map(
735718
if (!(flags & XBF_READ))
736719
xfs_buf_ioerror(bp, 0);
737720

738-
XFS_STATS_INC(target->bt_mount, xb_get);
721+
XFS_STATS_INC(btp->bt_mount, xb_get);
739722
trace_xfs_buf_get(bp, flags, _RET_IP_);
740723
*bpp = bp;
741724
return 0;
742-
out_free_buf:
743-
xfs_buf_free(new_bp);
725+
726+
out_put_perag:
727+
xfs_perag_put(pag);
744728
return error;
745729
}
746730

0 commit comments

Comments
 (0)