@@ -551,8 +551,8 @@ static void kvm_null_fn(void)
551551 node; \
552552 node = interval_tree_iter_next(node, start, last)) \
553553
554- static __always_inline kvm_mn_ret_t __kvm_handle_hva_range (struct kvm * kvm ,
555- const struct kvm_mmu_notifier_range * range )
554+ static __always_inline kvm_mn_ret_t kvm_handle_hva_range (struct kvm * kvm ,
555+ const struct kvm_mmu_notifier_range * range )
556556{
557557 struct kvm_mmu_notifier_return r = {
558558 .ret = false,
@@ -633,7 +633,7 @@ static __always_inline kvm_mn_ret_t __kvm_handle_hva_range(struct kvm *kvm,
633633 return r ;
634634}
635635
636- static __always_inline int kvm_handle_hva_range (struct mmu_notifier * mn ,
636+ static __always_inline int kvm_age_hva_range (struct mmu_notifier * mn ,
637637 unsigned long start ,
638638 unsigned long end ,
639639 gfn_handler_t handler ,
@@ -649,15 +649,15 @@ static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn,
649649 .may_block = false,
650650 };
651651
652- return __kvm_handle_hva_range (kvm , & range ).ret ;
652+ return kvm_handle_hva_range (kvm , & range ).ret ;
653653}
654654
655- static __always_inline int kvm_handle_hva_range_no_flush (struct mmu_notifier * mn ,
656- unsigned long start ,
657- unsigned long end ,
658- gfn_handler_t handler )
655+ static __always_inline int kvm_age_hva_range_no_flush (struct mmu_notifier * mn ,
656+ unsigned long start ,
657+ unsigned long end ,
658+ gfn_handler_t handler )
659659{
660- return kvm_handle_hva_range (mn , start , end , handler , false);
660+ return kvm_age_hva_range (mn , start , end , handler , false);
661661}
662662
663663void kvm_mmu_invalidate_begin (struct kvm * kvm )
@@ -752,7 +752,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
752752 * that guest memory has been reclaimed. This needs to be done *after*
753753 * dropping mmu_lock, as x86's reclaim path is slooooow.
754754 */
755- if (__kvm_handle_hva_range (kvm , & hva_range ).found_memslot )
755+ if (kvm_handle_hva_range (kvm , & hva_range ).found_memslot )
756756 kvm_arch_guest_memory_reclaimed (kvm );
757757
758758 return 0 ;
@@ -798,7 +798,7 @@ static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
798798 };
799799 bool wake ;
800800
801- __kvm_handle_hva_range (kvm , & hva_range );
801+ kvm_handle_hva_range (kvm , & hva_range );
802802
803803 /* Pairs with the increment in range_start(). */
804804 spin_lock (& kvm -> mn_invalidate_lock );
@@ -822,8 +822,8 @@ static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
822822{
823823 trace_kvm_age_hva (start , end );
824824
825- return kvm_handle_hva_range (mn , start , end , kvm_age_gfn ,
826- !IS_ENABLED (CONFIG_KVM_ELIDE_TLB_FLUSH_IF_YOUNG ));
825+ return kvm_age_hva_range (mn , start , end , kvm_age_gfn ,
826+ !IS_ENABLED (CONFIG_KVM_ELIDE_TLB_FLUSH_IF_YOUNG ));
827827}
828828
829829static int kvm_mmu_notifier_clear_young (struct mmu_notifier * mn ,
@@ -846,7 +846,7 @@ static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
846846 * cadence. If we find this inaccurate, we might come up with a
847847 * more sophisticated heuristic later.
848848 */
849- return kvm_handle_hva_range_no_flush (mn , start , end , kvm_age_gfn );
849+ return kvm_age_hva_range_no_flush (mn , start , end , kvm_age_gfn );
850850}
851851
852852static int kvm_mmu_notifier_test_young (struct mmu_notifier * mn ,
@@ -855,8 +855,8 @@ static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
855855{
856856 trace_kvm_test_age_hva (address );
857857
858- return kvm_handle_hva_range_no_flush (mn , address , address + 1 ,
859- kvm_test_age_gfn );
858+ return kvm_age_hva_range_no_flush (mn , address , address + 1 ,
859+ kvm_test_age_gfn );
860860}
861861
862862static void kvm_mmu_notifier_release (struct mmu_notifier * mn ,
0 commit comments