@@ -118,71 +118,6 @@ xfs_buf_free_maps(
118118 }
119119}
120120
121- static int
122- _xfs_buf_alloc (
123- struct xfs_buftarg * target ,
124- struct xfs_buf_map * map ,
125- int nmaps ,
126- xfs_buf_flags_t flags ,
127- struct xfs_buf * * bpp )
128- {
129- struct xfs_buf * bp ;
130- int error ;
131- int i ;
132-
133- * bpp = NULL ;
134- bp = kmem_cache_zalloc (xfs_buf_cache ,
135- GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL );
136-
137- /*
138- * We don't want certain flags to appear in b_flags unless they are
139- * specifically set by later operations on the buffer.
140- */
141- flags &= ~(XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD );
142-
143- /*
144- * A new buffer is held and locked by the owner. This ensures that the
145- * buffer is owned by the caller and racing RCU lookups right after
146- * inserting into the hash table are safe (and will have to wait for
147- * the unlock to do anything non-trivial).
148- */
149- bp -> b_hold = 1 ;
150- sema_init (& bp -> b_sema , 0 ); /* held, no waiters */
151-
152- spin_lock_init (& bp -> b_lock );
153- atomic_set (& bp -> b_lru_ref , 1 );
154- init_completion (& bp -> b_iowait );
155- INIT_LIST_HEAD (& bp -> b_lru );
156- INIT_LIST_HEAD (& bp -> b_list );
157- INIT_LIST_HEAD (& bp -> b_li_list );
158- bp -> b_target = target ;
159- bp -> b_mount = target -> bt_mount ;
160- bp -> b_flags = flags ;
161-
162- error = xfs_buf_get_maps (bp , nmaps );
163- if (error ) {
164- kmem_cache_free (xfs_buf_cache , bp );
165- return error ;
166- }
167-
168- bp -> b_rhash_key = map [0 ].bm_bn ;
169- bp -> b_length = 0 ;
170- for (i = 0 ; i < nmaps ; i ++ ) {
171- bp -> b_maps [i ].bm_bn = map [i ].bm_bn ;
172- bp -> b_maps [i ].bm_len = map [i ].bm_len ;
173- bp -> b_length += map [i ].bm_len ;
174- }
175-
176- atomic_set (& bp -> b_pin_count , 0 );
177- init_waitqueue_head (& bp -> b_waiters );
178-
179- XFS_STATS_INC (bp -> b_mount , xb_create );
180- trace_xfs_buf_init (bp , _RET_IP_ );
181-
182- * bpp = bp ;
183- return 0 ;
184- }
185-
186121static void
187122xfs_buf_free_callback (
188123 struct callback_head * cb )
@@ -342,6 +277,77 @@ xfs_buf_alloc_backing_mem(
342277 return 0 ;
343278}
344279
280+ static int
281+ xfs_buf_alloc (
282+ struct xfs_buftarg * target ,
283+ struct xfs_buf_map * map ,
284+ int nmaps ,
285+ xfs_buf_flags_t flags ,
286+ struct xfs_buf * * bpp )
287+ {
288+ struct xfs_buf * bp ;
289+ int error ;
290+ int i ;
291+
292+ * bpp = NULL ;
293+ bp = kmem_cache_zalloc (xfs_buf_cache ,
294+ GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL );
295+
296+ /*
297+ * We don't want certain flags to appear in b_flags unless they are
298+ * specifically set by later operations on the buffer.
299+ */
300+ flags &= ~(XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD );
301+
302+ /*
303+ * A new buffer is held and locked by the owner. This ensures that the
304+ * buffer is owned by the caller and racing RCU lookups right after
305+ * inserting into the hash table are safe (and will have to wait for
306+ * the unlock to do anything non-trivial).
307+ */
308+ bp -> b_hold = 1 ;
309+ sema_init (& bp -> b_sema , 0 ); /* held, no waiters */
310+
311+ spin_lock_init (& bp -> b_lock );
312+ atomic_set (& bp -> b_lru_ref , 1 );
313+ init_completion (& bp -> b_iowait );
314+ INIT_LIST_HEAD (& bp -> b_lru );
315+ INIT_LIST_HEAD (& bp -> b_list );
316+ INIT_LIST_HEAD (& bp -> b_li_list );
317+ bp -> b_target = target ;
318+ bp -> b_mount = target -> bt_mount ;
319+ bp -> b_flags = flags ;
320+
321+ error = xfs_buf_get_maps (bp , nmaps );
322+ if (error ) {
323+ kmem_cache_free (xfs_buf_cache , bp );
324+ return error ;
325+ }
326+
327+ bp -> b_rhash_key = map [0 ].bm_bn ;
328+ bp -> b_length = 0 ;
329+ for (i = 0 ; i < nmaps ; i ++ ) {
330+ bp -> b_maps [i ].bm_bn = map [i ].bm_bn ;
331+ bp -> b_maps [i ].bm_len = map [i ].bm_len ;
332+ bp -> b_length += map [i ].bm_len ;
333+ }
334+
335+ atomic_set (& bp -> b_pin_count , 0 );
336+ init_waitqueue_head (& bp -> b_waiters );
337+
338+ XFS_STATS_INC (bp -> b_mount , xb_create );
339+ trace_xfs_buf_init (bp , _RET_IP_ );
340+
341+ error = xfs_buf_alloc_backing_mem (bp , flags );
342+ if (error ) {
343+ xfs_buf_free (bp );
344+ return error ;
345+ }
346+
347+ * bpp = bp ;
348+ return 0 ;
349+ }
350+
345351/*
346352 * Finding and Reading Buffers
347353 */
@@ -525,14 +531,10 @@ xfs_buf_find_insert(
525531 struct xfs_buf * bp ;
526532 int error ;
527533
528- error = _xfs_buf_alloc (btp , map , nmaps , flags , & new_bp );
534+ error = xfs_buf_alloc (btp , map , nmaps , flags , & new_bp );
529535 if (error )
530536 goto out_drop_pag ;
531537
532- error = xfs_buf_alloc_backing_mem (new_bp , flags );
533- if (error )
534- goto out_free_buf ;
535-
536538 /* The new buffer keeps the perag reference until it is freed. */
537539 new_bp -> b_pag = pag ;
538540
@@ -869,28 +871,11 @@ xfs_buf_get_uncached(
869871 struct xfs_buf * * bpp )
870872{
871873 int error ;
872- struct xfs_buf * bp ;
873874 DEFINE_SINGLE_BUF_MAP (map , XFS_BUF_DADDR_NULL , numblks );
874875
875- /* there are currently no valid flags for xfs_buf_get_uncached */
876- ASSERT (flags == 0 );
877-
878- * bpp = NULL ;
879-
880- error = _xfs_buf_alloc (target , & map , 1 , flags , & bp );
881- if (error )
882- return error ;
883-
884- error = xfs_buf_alloc_backing_mem (bp , flags );
885- if (error )
886- goto fail_free_buf ;
887-
888- trace_xfs_buf_get_uncached (bp , _RET_IP_ );
889- * bpp = bp ;
890- return 0 ;
891-
892- fail_free_buf :
893- xfs_buf_free (bp );
876+ error = xfs_buf_alloc (target , & map , 1 , flags , bpp );
877+ if (!error )
878+ trace_xfs_buf_get_uncached (* bpp , _RET_IP_ );
894879 return error ;
895880}
896881
0 commit comments