@@ -159,7 +159,7 @@ static inline struct io_wq_acct *io_get_acct(struct io_wq *wq, bool bound)
159159static inline struct io_wq_acct * io_work_get_acct (struct io_wq * wq ,
160160 struct io_wq_work * work )
161161{
162- return io_get_acct (wq , !(work -> flags & IO_WQ_WORK_UNBOUND ));
162+ return io_get_acct (wq , !(atomic_read ( & work -> flags ) & IO_WQ_WORK_UNBOUND ));
163163}
164164
165165static inline struct io_wq_acct * io_wq_get_acct (struct io_worker * worker )
@@ -451,7 +451,7 @@ static void __io_worker_idle(struct io_wq *wq, struct io_worker *worker)
451451
452452static inline unsigned int io_get_work_hash (struct io_wq_work * work )
453453{
454- return work -> flags >> IO_WQ_HASH_SHIFT ;
454+ return atomic_read ( & work -> flags ) >> IO_WQ_HASH_SHIFT ;
455455}
456456
457457static bool io_wait_on_hash (struct io_wq * wq , unsigned int hash )
@@ -592,8 +592,9 @@ static void io_worker_handle_work(struct io_wq_acct *acct,
592592
593593 next_hashed = wq_next_work (work );
594594
595- if (unlikely (do_kill ) && (work -> flags & IO_WQ_WORK_UNBOUND ))
596- work -> flags |= IO_WQ_WORK_CANCEL ;
595+ if (do_kill &&
596+ (atomic_read (& work -> flags ) & IO_WQ_WORK_UNBOUND ))
597+ atomic_or (IO_WQ_WORK_CANCEL , & work -> flags );
597598 wq -> do_work (work );
598599 io_assign_current_work (worker , NULL );
599600
@@ -891,7 +892,7 @@ static bool io_wq_worker_wake(struct io_worker *worker, void *data)
891892static void io_run_cancel (struct io_wq_work * work , struct io_wq * wq )
892893{
893894 do {
894- work -> flags |= IO_WQ_WORK_CANCEL ;
895+ atomic_or ( IO_WQ_WORK_CANCEL , & work -> flags ) ;
895896 wq -> do_work (work );
896897 work = wq -> free_work (work );
897898 } while (work );
@@ -926,7 +927,7 @@ static bool io_wq_work_match_item(struct io_wq_work *work, void *data)
926927void io_wq_enqueue (struct io_wq * wq , struct io_wq_work * work )
927928{
928929 struct io_wq_acct * acct = io_work_get_acct (wq , work );
929- unsigned long work_flags = work -> flags ;
930+ unsigned int work_flags = atomic_read ( & work -> flags ) ;
930931 struct io_cb_cancel_data match = {
931932 .fn = io_wq_work_match_item ,
932933 .data = work ,
@@ -939,7 +940,7 @@ void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
939940 * been marked as one that should not get executed, cancel it here.
940941 */
941942 if (test_bit (IO_WQ_BIT_EXIT , & wq -> state ) ||
942- (work -> flags & IO_WQ_WORK_CANCEL )) {
943+ (work_flags & IO_WQ_WORK_CANCEL )) {
943944 io_run_cancel (work , wq );
944945 return ;
945946 }
@@ -982,15 +983,15 @@ void io_wq_hash_work(struct io_wq_work *work, void *val)
982983 unsigned int bit ;
983984
984985 bit = hash_ptr (val , IO_WQ_HASH_ORDER );
985- work -> flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT ));
986+ atomic_or (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT ), & work -> flags );
986987}
987988
988989static bool __io_wq_worker_cancel (struct io_worker * worker ,
989990 struct io_cb_cancel_data * match ,
990991 struct io_wq_work * work )
991992{
992993 if (work && match -> fn (work , match -> data )) {
993- work -> flags |= IO_WQ_WORK_CANCEL ;
994+ atomic_or ( IO_WQ_WORK_CANCEL , & work -> flags ) ;
994995 __set_notify_signal (worker -> task );
995996 return true;
996997 }
0 commit comments