77
88#include "abi/guc_actions_sriov_abi.h"
99#include "xe_bo.h"
10+ #include "xe_gt_sriov_pf_control.h"
1011#include "xe_gt_sriov_pf_helpers.h"
1112#include "xe_gt_sriov_pf_migration.h"
1213#include "xe_gt_sriov_printk.h"
1314#include "xe_guc.h"
1415#include "xe_guc_ct.h"
1516#include "xe_sriov.h"
17+ #include "xe_sriov_packet_types.h"
1618#include "xe_sriov_pf_migration.h"
1719
20+ #define XE_GT_SRIOV_PF_MIGRATION_RING_SIZE 5
21+
22+ static struct xe_gt_sriov_migration_data * pf_pick_gt_migration (struct xe_gt * gt , unsigned int vfid )
23+ {
24+ xe_gt_assert (gt , IS_SRIOV_PF (gt_to_xe (gt )));
25+ xe_gt_assert (gt , vfid != PFID );
26+ xe_gt_assert (gt , vfid <= xe_sriov_pf_get_totalvfs (gt_to_xe (gt )));
27+
28+ return & gt -> sriov .pf .vfs [vfid ].migration ;
29+ }
30+
1831/* Return: number of dwords saved/restored/required or a negative error code on failure */
1932static int guc_action_vf_save_restore (struct xe_guc * guc , u32 vfid , u32 opcode ,
2033 u64 addr , u32 ndwords )
@@ -382,6 +395,178 @@ ssize_t xe_gt_sriov_pf_migration_write_guc_state(struct xe_gt *gt, unsigned int
382395}
383396#endif /* CONFIG_DEBUG_FS */
384397
398+ /**
399+ * xe_gt_sriov_pf_migration_ring_empty() - Check if a migration ring is empty.
400+ * @gt: the &xe_gt
401+ * @vfid: the VF identifier
402+ *
403+ * Return: true if the ring is empty, otherwise false.
404+ */
405+ bool xe_gt_sriov_pf_migration_ring_empty (struct xe_gt * gt , unsigned int vfid )
406+ {
407+ return ptr_ring_empty (& pf_pick_gt_migration (gt , vfid )-> ring );
408+ }
409+
410+ /**
411+ * xe_gt_sriov_pf_migration_ring_full() - Check if a migration ring is full.
412+ * @gt: the &xe_gt
413+ * @vfid: the VF identifier
414+ *
415+ * Return: true if the ring is full, otherwise false.
416+ */
417+ bool xe_gt_sriov_pf_migration_ring_full (struct xe_gt * gt , unsigned int vfid )
418+ {
419+ return ptr_ring_full (& pf_pick_gt_migration (gt , vfid )-> ring );
420+ }
421+
422+ /**
423+ * xe_gt_sriov_pf_migration_save_produce() - Add VF save data packet to migration ring.
424+ * @gt: the &xe_gt
425+ * @vfid: the VF identifier
426+ * @data: the &xe_sriov_packet
427+ *
428+ * Called by the save migration data producer (PF SR-IOV Control worker) when
429+ * processing migration data.
430+ * Wakes up the save migration data consumer (userspace), that is potentially
431+ * waiting for data when the ring was empty.
432+ *
433+ * Return: 0 on success or a negative error code on failure.
434+ */
435+ int xe_gt_sriov_pf_migration_save_produce (struct xe_gt * gt , unsigned int vfid ,
436+ struct xe_sriov_packet * data )
437+ {
438+ int ret ;
439+
440+ ret = ptr_ring_produce (& pf_pick_gt_migration (gt , vfid )-> ring , data );
441+ if (ret )
442+ return ret ;
443+
444+ wake_up_all (xe_sriov_pf_migration_waitqueue (gt_to_xe (gt ), vfid ));
445+
446+ return 0 ;
447+ }
448+
449+ /**
450+ * xe_gt_sriov_pf_migration_restore_consume() - Get VF restore data packet from migration ring.
451+ * @gt: the &xe_gt
452+ * @vfid: the VF identifier
453+ *
454+ * Called by the restore migration data consumer (PF SR-IOV Control worker) when
455+ * processing migration data.
456+ * Wakes up the restore migration data producer (userspace), that is
457+ * potentially waiting to add more data when the ring is full.
458+ *
459+ * Return: Pointer to &xe_sriov_packet on success,
460+ * NULL if ring is empty.
461+ */
462+ struct xe_sriov_packet *
463+ xe_gt_sriov_pf_migration_restore_consume (struct xe_gt * gt , unsigned int vfid )
464+ {
465+ struct xe_gt_sriov_migration_data * migration = pf_pick_gt_migration (gt , vfid );
466+ struct wait_queue_head * wq = xe_sriov_pf_migration_waitqueue (gt_to_xe (gt ), vfid );
467+ struct xe_sriov_packet * data ;
468+
469+ data = ptr_ring_consume (& migration -> ring );
470+ if (data )
471+ wake_up_all (wq );
472+
473+ return data ;
474+ }
475+
476+ static bool pf_restore_data_ready (struct xe_gt * gt , unsigned int vfid )
477+ {
478+ if (xe_gt_sriov_pf_control_check_restore_failed (gt , vfid ) ||
479+ !ptr_ring_full (& pf_pick_gt_migration (gt , vfid )-> ring ))
480+ return true;
481+
482+ return false;
483+ }
484+
485+ /**
486+ * xe_gt_sriov_pf_migration_restore_produce() - Add VF restore data packet to migration ring.
487+ * @gt: the &xe_gt
488+ * @vfid: the VF identifier
489+ * @data: the &xe_sriov_packet
490+ *
491+ * Called by the restore migration data producer (userspace) when processing
492+ * migration data.
493+ * If the ring is full, waits until there is space.
494+ * Queues the restore migration data consumer (PF SR-IOV Control worker), that
495+ * is potentially waiting for data when the ring was empty.
496+ *
497+ * Return: 0 on success or a negative error code on failure.
498+ */
499+ int xe_gt_sriov_pf_migration_restore_produce (struct xe_gt * gt , unsigned int vfid ,
500+ struct xe_sriov_packet * data )
501+ {
502+ int ret ;
503+
504+ xe_gt_assert (gt , data -> hdr .tile_id == gt -> tile -> id );
505+ xe_gt_assert (gt , data -> hdr .gt_id == gt -> info .id );
506+
507+ for (;;) {
508+ if (xe_gt_sriov_pf_control_check_restore_failed (gt , vfid ))
509+ return - EIO ;
510+
511+ ret = ptr_ring_produce (& pf_pick_gt_migration (gt , vfid )-> ring , data );
512+ if (!ret )
513+ break ;
514+
515+ ret = wait_event_interruptible (* xe_sriov_pf_migration_waitqueue (gt_to_xe (gt ), vfid ),
516+ pf_restore_data_ready (gt , vfid ));
517+ if (ret )
518+ return ret ;
519+ }
520+
521+ return xe_gt_sriov_pf_control_process_restore_data (gt , vfid );
522+ }
523+
524+ /**
525+ * xe_gt_sriov_pf_migration_save_consume() - Get VF save data packet from migration ring.
526+ * @gt: the &xe_gt
527+ * @vfid: the VF identifier
528+ *
529+ * Called by the save migration data consumer (userspace) when
530+ * processing migration data.
531+ * Queues the save migration data producer (PF SR-IOV Control worker), that is
532+ * potentially waiting to add more data when the ring is full.
533+ *
534+ * Return: Pointer to &xe_sriov_packet on success,
535+ * NULL if ring is empty and there's no more data available,
536+ * ERR_PTR(-EAGAIN) if the ring is empty, but data is still produced.
537+ */
538+ struct xe_sriov_packet *
539+ xe_gt_sriov_pf_migration_save_consume (struct xe_gt * gt , unsigned int vfid )
540+ {
541+ struct xe_gt_sriov_migration_data * migration = pf_pick_gt_migration (gt , vfid );
542+ struct xe_sriov_packet * data ;
543+ int ret ;
544+
545+ data = ptr_ring_consume (& migration -> ring );
546+ if (data ) {
547+ ret = xe_gt_sriov_pf_control_process_save_data (gt , vfid );
548+ if (ret )
549+ return ERR_PTR (ret );
550+
551+ return data ;
552+ }
553+
554+ if (xe_gt_sriov_pf_control_check_save_data_done (gt , vfid ))
555+ return NULL ;
556+
557+ if (xe_gt_sriov_pf_control_check_save_failed (gt , vfid ))
558+ return ERR_PTR (- EIO );
559+
560+ return ERR_PTR (- EAGAIN );
561+ }
562+
563+ static void action_ring_cleanup (void * arg )
564+ {
565+ struct ptr_ring * r = arg ;
566+
567+ ptr_ring_cleanup (r , NULL );
568+ }
569+
385570/**
386571 * xe_gt_sriov_pf_migration_init() - Initialize support for VF migration.
387572 * @gt: the &xe_gt
@@ -393,6 +578,7 @@ ssize_t xe_gt_sriov_pf_migration_write_guc_state(struct xe_gt *gt, unsigned int
393578int xe_gt_sriov_pf_migration_init (struct xe_gt * gt )
394579{
395580 struct xe_device * xe = gt_to_xe (gt );
581+ unsigned int n , totalvfs ;
396582 int err ;
397583
398584 xe_gt_assert (gt , IS_SRIOV_PF (xe ));
@@ -404,5 +590,19 @@ int xe_gt_sriov_pf_migration_init(struct xe_gt *gt)
404590 if (err )
405591 return err ;
406592
593+ totalvfs = xe_sriov_pf_get_totalvfs (xe );
594+ for (n = 1 ; n <= totalvfs ; n ++ ) {
595+ struct xe_gt_sriov_migration_data * migration = pf_pick_gt_migration (gt , n );
596+
597+ err = ptr_ring_init (& migration -> ring ,
598+ XE_GT_SRIOV_PF_MIGRATION_RING_SIZE , GFP_KERNEL );
599+ if (err )
600+ return err ;
601+
602+ err = devm_add_action_or_reset (xe -> drm .dev , action_ring_cleanup , & migration -> ring );
603+ if (err )
604+ return err ;
605+ }
606+
407607 return 0 ;
408608}
0 commit comments