@@ -651,6 +651,42 @@ ext2_xattr_set(struct inode *inode, int name_index, const char *name,
651651 return error ;
652652}
653653
654+ static void ext2_xattr_release_block (struct inode * inode ,
655+ struct buffer_head * bh )
656+ {
657+ struct mb_cache * ea_block_cache = EA_BLOCK_CACHE (inode );
658+
659+ lock_buffer (bh );
660+ if (HDR (bh )-> h_refcount == cpu_to_le32 (1 )) {
661+ __u32 hash = le32_to_cpu (HDR (bh )-> h_hash );
662+
663+ /*
664+ * This must happen under buffer lock for
665+ * ext2_xattr_set2() to reliably detect freed block
666+ */
667+ mb_cache_entry_delete (ea_block_cache , hash ,
668+ bh -> b_blocknr );
669+ /* Free the old block. */
670+ ea_bdebug (bh , "freeing" );
671+ ext2_free_blocks (inode , bh -> b_blocknr , 1 );
672+ /* We let our caller release bh, so we
673+ * need to duplicate the buffer before. */
674+ get_bh (bh );
675+ bforget (bh );
676+ unlock_buffer (bh );
677+ } else {
678+ /* Decrement the refcount only. */
679+ le32_add_cpu (& HDR (bh )-> h_refcount , -1 );
680+ dquot_free_block (inode , 1 );
681+ mark_buffer_dirty (bh );
682+ unlock_buffer (bh );
683+ ea_bdebug (bh , "refcount now=%d" ,
684+ le32_to_cpu (HDR (bh )-> h_refcount ));
685+ if (IS_SYNC (inode ))
686+ sync_dirty_buffer (bh );
687+ }
688+ }
689+
654690/*
655691 * Second half of ext2_xattr_set(): Update the file system.
656692 */
@@ -747,34 +783,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
747783 * If there was an old block and we are no longer using it,
748784 * release the old block.
749785 */
750- lock_buffer (old_bh );
751- if (HDR (old_bh )-> h_refcount == cpu_to_le32 (1 )) {
752- __u32 hash = le32_to_cpu (HDR (old_bh )-> h_hash );
753-
754- /*
755- * This must happen under buffer lock for
756- * ext2_xattr_set2() to reliably detect freed block
757- */
758- mb_cache_entry_delete (ea_block_cache , hash ,
759- old_bh -> b_blocknr );
760- /* Free the old block. */
761- ea_bdebug (old_bh , "freeing" );
762- ext2_free_blocks (inode , old_bh -> b_blocknr , 1 );
763- mark_inode_dirty (inode );
764- /* We let our caller release old_bh, so we
765- * need to duplicate the buffer before. */
766- get_bh (old_bh );
767- bforget (old_bh );
768- } else {
769- /* Decrement the refcount only. */
770- le32_add_cpu (& HDR (old_bh )-> h_refcount , -1 );
771- dquot_free_block_nodirty (inode , 1 );
772- mark_inode_dirty (inode );
773- mark_buffer_dirty (old_bh );
774- ea_bdebug (old_bh , "refcount now=%d" ,
775- le32_to_cpu (HDR (old_bh )-> h_refcount ));
776- }
777- unlock_buffer (old_bh );
786+ ext2_xattr_release_block (inode , old_bh );
778787 }
779788
780789cleanup :
@@ -828,30 +837,7 @@ ext2_xattr_delete_inode(struct inode *inode)
828837 EXT2_I (inode )-> i_file_acl );
829838 goto cleanup ;
830839 }
831- lock_buffer (bh );
832- if (HDR (bh )-> h_refcount == cpu_to_le32 (1 )) {
833- __u32 hash = le32_to_cpu (HDR (bh )-> h_hash );
834-
835- /*
836- * This must happen under buffer lock for ext2_xattr_set2() to
837- * reliably detect freed block
838- */
839- mb_cache_entry_delete (EA_BLOCK_CACHE (inode ), hash ,
840- bh -> b_blocknr );
841- ext2_free_blocks (inode , EXT2_I (inode )-> i_file_acl , 1 );
842- get_bh (bh );
843- bforget (bh );
844- unlock_buffer (bh );
845- } else {
846- le32_add_cpu (& HDR (bh )-> h_refcount , -1 );
847- ea_bdebug (bh , "refcount now=%d" ,
848- le32_to_cpu (HDR (bh )-> h_refcount ));
849- unlock_buffer (bh );
850- mark_buffer_dirty (bh );
851- if (IS_SYNC (inode ))
852- sync_dirty_buffer (bh );
853- dquot_free_block_nodirty (inode , 1 );
854- }
840+ ext2_xattr_release_block (inode , bh );
855841 EXT2_I (inode )-> i_file_acl = 0 ;
856842
857843cleanup :
0 commit comments