@@ -744,6 +744,117 @@ enum {
744744 OSWB_MAX_DEPTH = 5 , // up to 5 different classes
745745};
746746
747+ // Codegen for setting an instance variable.
748+ // Preconditions:
749+ // - receiver is in REG0
750+ // - receiver has the same class as CLASS_OF(comptime_receiver)
751+ // - no stack push or pops to ctx since the entry to the codegen of the instruction being compiled
752+ static codegen_status_t
753+ gen_set_ivar (jitstate_t * jit , ctx_t * ctx , const int max_chain_depth , VALUE comptime_receiver , ID ivar_name , insn_opnd_t reg0_opnd , uint8_t * side_exit )
754+ {
755+ VALUE comptime_val_klass = CLASS_OF (comptime_receiver );
756+ const ctx_t starting_context = * ctx ; // make a copy for use with jit_chain_guard
757+
758+ // If the class uses the default allocator, instances should all be T_OBJECT
759+ // NOTE: This assumes nobody changes the allocator of the class after allocation.
760+ // Eventually, we can encode whether an object is T_OBJECT or not
761+ // inside object shapes.
762+ if (rb_get_alloc_func (comptime_val_klass ) != rb_class_allocate_instance ) {
763+ GEN_COUNTER_INC (cb , setivar_not_object );
764+ return YJIT_CANT_COMPILE ;
765+ }
766+ RUBY_ASSERT (BUILTIN_TYPE (comptime_receiver ) == T_OBJECT ); // because we checked the allocator
767+
768+ // ID for the name of the ivar
769+ ID id = ivar_name ;
770+ struct rb_iv_index_tbl_entry * ent ;
771+ struct st_table * iv_index_tbl = ROBJECT_IV_INDEX_TBL (comptime_receiver );
772+
773+ // Lookup index for the ivar the instruction loads
774+ if (iv_index_tbl && rb_iv_index_tbl_lookup (iv_index_tbl , id , & ent )) {
775+ uint32_t ivar_index = ent -> index ;
776+
777+ val_type_t val_type = ctx_get_opnd_type (ctx , OPND_STACK (0 ));
778+ x86opnd_t val_to_write = ctx_stack_opnd (ctx , 0 );
779+ mov (cb , REG1 , val_to_write );
780+
781+ // Bail if the value to write is a heap object, because this needs a write barrier
782+ if (!val_type .is_imm ) {
783+ ADD_COMMENT (cb , "guard value is immediate" );
784+ test (cb , REG1 , imm_opnd (RUBY_IMMEDIATE_MASK ));
785+ jz_ptr (cb , COUNTED_EXIT (side_exit , setivar_val_heapobject ));
786+ ctx_set_opnd_type (ctx , OPND_STACK (0 ), TYPE_IMM );
787+ }
788+
789+ // Pop the value to write
790+ ctx_stack_pop (ctx , 1 );
791+
792+ // Bail if this object is frozen
793+ ADD_COMMENT (cb , "guard self is not frozen" );
794+ x86opnd_t flags_opnd = member_opnd (REG0 , struct RBasic , flags );
795+ test (cb , flags_opnd , imm_opnd (RUBY_FL_FREEZE ));
796+ jnz_ptr (cb , COUNTED_EXIT (side_exit , setivar_frozen ));
797+
798+ // Pop receiver if it's on the temp stack
799+ if (!reg0_opnd .is_self ) {
800+ (void )ctx_stack_pop (ctx , 1 );
801+ }
802+
803+ // Compile time self is embedded and the ivar index lands within the object
804+ if (RB_FL_TEST_RAW (comptime_receiver , ROBJECT_EMBED ) && ivar_index < ROBJECT_EMBED_LEN_MAX ) {
805+ // See ROBJECT_IVPTR() from include/ruby/internal/core/robject.h
806+
807+ // Guard that self is embedded
808+ // TODO: BT and JC is shorter
809+ ADD_COMMENT (cb , "guard embedded setivar" );
810+ test (cb , flags_opnd , imm_opnd (ROBJECT_EMBED ));
811+ jit_chain_guard (JCC_JZ , jit , & starting_context , max_chain_depth , side_exit );
812+
813+ // Store the ivar on the object
814+ x86opnd_t ivar_opnd = mem_opnd (64 , REG0 , offsetof(struct RObject , as .ary ) + ivar_index * SIZEOF_VALUE );
815+ mov (cb , ivar_opnd , REG1 );
816+
817+ // Push the ivar on the stack
818+ // For attr_writer we'll need to push the value on the stack
819+ //x86opnd_t out_opnd = ctx_stack_push(ctx, TYPE_UNKNOWN);
820+ }
821+ else {
822+ // Compile time value is *not* embeded.
823+
824+ // Guard that value is *not* embedded
825+ // See ROBJECT_IVPTR() from include/ruby/internal/core/robject.h
826+ ADD_COMMENT (cb , "guard extended setivar" );
827+ x86opnd_t flags_opnd = member_opnd (REG0 , struct RBasic , flags );
828+ test (cb , flags_opnd , imm_opnd (ROBJECT_EMBED ));
829+ jit_chain_guard (JCC_JNZ , jit , & starting_context , max_chain_depth , side_exit );
830+
831+ // check that the extended table is big enough
832+ if (ivar_index >= ROBJECT_EMBED_LEN_MAX + 1 ) {
833+ // Check that the slot is inside the extended table (num_slots > index)
834+ ADD_COMMENT (cb , "check index in extended table" );
835+ x86opnd_t num_slots = mem_opnd (32 , REG0 , offsetof(struct RObject , as .heap .numiv ));
836+ cmp (cb , num_slots , imm_opnd (ivar_index ));
837+ jle_ptr (cb , COUNTED_EXIT (side_exit , setivar_idx_out_of_range ));
838+ }
839+
840+ // Get a pointer to the extended table
841+ x86opnd_t tbl_opnd = mem_opnd (64 , REG0 , offsetof(struct RObject , as .heap .ivptr ));
842+ mov (cb , REG0 , tbl_opnd );
843+
844+ // Write the ivar to the extended table
845+ x86opnd_t ivar_opnd = mem_opnd (64 , REG0 , sizeof (VALUE ) * ivar_index );
846+ mov (cb , ivar_opnd , REG1 );
847+ }
848+
849+ // Jump to next instruction. This allows guard chains to share the same successor.
850+ jit_jump_to_next_insn (jit , ctx );
851+ return YJIT_END_BLOCK ;
852+ }
853+
854+ GEN_COUNTER_INC (cb , setivar_name_not_mapped );
855+ return YJIT_CANT_COMPILE ;
856+ }
857+
747858// Codegen for getting an instance variable.
748859// Preconditions:
749860// - receiver is in REG0
@@ -866,7 +977,7 @@ gen_getinstancevariable(jitstate_t *jit, ctx_t *ctx)
866977
867978 // Guard that the receiver has the same class as the one from compile time.
868979 mov (cb , REG0 , member_opnd (REG_CFP , rb_control_frame_t , self ));
869- guard_self_is_heap (cb , REG0 , side_exit , ctx );
980+ guard_self_is_heap (cb , REG0 , COUNTED_EXIT ( side_exit , getivar_se_self_not_heap ) , ctx );
870981
871982 jit_guard_known_klass (jit , ctx , comptime_val_klass , OPND_SELF , GETIVAR_MAX_DEPTH , side_exit );
872983
@@ -876,69 +987,27 @@ gen_getinstancevariable(jitstate_t *jit, ctx_t *ctx)
876987static codegen_status_t
877988gen_setinstancevariable (jitstate_t * jit , ctx_t * ctx )
878989{
879- IVC ic = (IVC )jit_get_arg (jit , 1 );
880-
881- // Check that the inline cache has been set, slot index is known
882- if (!ic -> entry ) {
883- return YJIT_CANT_COMPILE ;
990+ // Defer compilation so we can specialize on a runtime `self`
991+ if (!jit_at_current_insn (jit )) {
992+ defer_compilation (jit -> block , jit -> insn_idx , ctx );
993+ return YJIT_END_BLOCK ;
884994 }
885995
886- // If the class uses the default allocator, instances should all be T_OBJECT
887- // NOTE: This assumes nobody changes the allocator of the class after allocation.
888- // Eventually, we can encode whether an object is T_OBJECT or not
889- // inside object shapes.
890- if (rb_get_alloc_func (ic -> entry -> class_value ) != rb_class_allocate_instance ) {
891- return YJIT_CANT_COMPILE ;
892- }
996+ ID ivar_name = (ID )jit_get_arg (jit , 0 );
893997
894- uint32_t ivar_index = ic -> entry -> index ;
998+ VALUE comptime_val = jit_peek_at_self (jit , ctx );
999+ VALUE comptime_val_klass = CLASS_OF (comptime_val );
8951000
896- // Create a size-exit to fall back to the interpreter
897- uint8_t * side_exit = yjit_side_exit (jit , ctx );
1001+ // Generate a side exit
1002+ uint8_t * side_exit = yjit_side_exit (jit , ctx );
8981003
899- // Load self from CFP
1004+ // Guard that the receiver has the same class as the one from compile time.
9001005 mov (cb , REG0 , member_opnd (REG_CFP , rb_control_frame_t , self ));
1006+ guard_self_is_heap (cb , REG0 , COUNTED_EXIT (side_exit , setivar_se_self_not_heap ), ctx );
9011007
902- guard_self_is_heap (cb , REG0 , side_exit , ctx );
903-
904- // Bail if receiver class is different from compiled time call cache class
905- x86opnd_t klass_opnd = mem_opnd (64 , REG0 , offsetof(struct RBasic , klass ));
906- mov (cb , REG1 , klass_opnd );
907- x86opnd_t serial_opnd = mem_opnd (64 , REG1 , offsetof(struct RClass , class_serial ));
908- cmp (cb , serial_opnd , imm_opnd (ic -> entry -> class_serial ));
909- jne_ptr (cb , side_exit );
910-
911- // Bail if the ivars are not on the extended table
912- // See ROBJECT_IVPTR() from include/ruby/internal/core/robject.h
913- x86opnd_t flags_opnd = member_opnd (REG0 , struct RBasic , flags );
914- test (cb , flags_opnd , imm_opnd (ROBJECT_EMBED ));
915- jnz_ptr (cb , side_exit );
916-
917- // If we can't guarantee that the extended table is big enoughg
918- if (ivar_index >= ROBJECT_EMBED_LEN_MAX + 1 ) {
919- // Check that the slot is inside the extended table (num_slots > index)
920- x86opnd_t num_slots = mem_opnd (32 , REG0 , offsetof(struct RObject , as .heap .numiv ));
921- cmp (cb , num_slots , imm_opnd (ivar_index ));
922- jle_ptr (cb , side_exit );
923- }
924-
925- // Get a pointer to the extended table
926- x86opnd_t tbl_opnd = mem_opnd (64 , REG0 , offsetof(struct RObject , as .heap .ivptr ));
927- mov (cb , REG0 , tbl_opnd );
928-
929- // Pop the value to write from the stack
930- x86opnd_t stack_top = ctx_stack_pop (ctx , 1 );
931- mov (cb , REG1 , stack_top );
932-
933- // Bail if this is a heap object, because this needs a write barrier
934- test (cb , REG1 , imm_opnd (RUBY_IMMEDIATE_MASK ));
935- jz_ptr (cb , side_exit );
936-
937- // Write the ivar to the extended table
938- x86opnd_t ivar_opnd = mem_opnd (64 , REG0 , sizeof (VALUE ) * ivar_index );
939- mov (cb , ivar_opnd , REG1 );
1008+ jit_guard_known_klass (jit , ctx , comptime_val_klass , OPND_SELF , GETIVAR_MAX_DEPTH , side_exit );
9401009
941- return YJIT_KEEP_COMPILING ;
1010+ return gen_set_ivar ( jit , ctx , GETIVAR_MAX_DEPTH , comptime_val , ivar_name , OPND_SELF , side_exit ) ;
9421011}
9431012
9441013static void
0 commit comments