@@ -2139,6 +2139,16 @@ interp_handle_intrinsics (TransformData *td, MonoMethod *target_method, MonoClas
21392139 return TRUE;
21402140 }
21412141 }
2142+ } else if (in_corlib && !strcmp (klass_name_space , "System" ) && (!strcmp (klass_name , "BitConverter" ))) {
2143+ if (!strcmp (tm , "DoubleToInt64Bits" ) || !strcmp (tm , "DoubleToUInt64Bits" )) {
2144+ * op = MINT_MOV_8 ;
2145+ } else if (!strcmp (tm , "Int32BitsToSingle" ) || !strcmp (tm , "UInt32BitsToSingle" )) {
2146+ * op = MINT_MOV_4 ;
2147+ } else if (!strcmp (tm , "Int64BitsToDouble" ) || !strcmp (tm , "UInt64BitsToDouble" )) {
2148+ * op = MINT_MOV_8 ;
2149+ } else if (!strcmp (tm , "SingleToInt32Bits" ) || !strcmp (tm , "SingleToUInt32Bits" )) {
2150+ * op = MINT_MOV_4 ;
2151+ }
21422152 } else if (in_corlib && !strcmp (klass_name_space , "System.Runtime.CompilerServices" ) && !strcmp (klass_name , "Unsafe" )) {
21432153 if (!strcmp (tm , "AddByteOffset" ))
21442154#if SIZEOF_VOID_P == 4
@@ -2156,6 +2166,73 @@ interp_handle_intrinsics (TransformData *td, MonoMethod *target_method, MonoClas
21562166 return TRUE;
21572167 } else if (!strcmp (tm , "AreSame ")) {
21582168 * op = MINT_CEQ_P ;
2169+ } else if (!strcmp (tm , "BitCast ")) {
2170+ MonoGenericContext * ctx = mono_method_get_context (target_method );
2171+ g_assert (ctx );
2172+ g_assert (ctx -> method_inst );
2173+ g_assert (ctx -> method_inst -> type_argc == 2 );
2174+ g_assert (csignature -> param_count == 1 );
2175+
2176+ // We explicitly do not handle gsharedvt as it is meant as a slow fallback strategy
2177+ // instead we fallback to the managed implementation which will do the right things
2178+
2179+ MonoType * tfrom = ctx -> method_inst -> type_argv [0 ];
2180+ MonoType * tto = ctx -> method_inst -> type_argv [1 ];
2181+ tfrom = mini_get_underlying_type (tfrom );
2182+ tto = mini_get_underlying_type (tto );
2183+
2184+ // The underlying API always throws for reference type inputs, so we
2185+ // fallback to the managed implementation to let that handling occur
2186+
2187+ if (MONO_TYPE_IS_REFERENCE (tfrom ) || MONO_TYPE_IS_REFERENCE (tto )) {
2188+ return FALSE;
2189+ }
2190+
2191+ MonoClass * tfrom_klass = mono_class_from_mono_type_internal (tfrom );
2192+ MonoClass * tto_klass = mono_class_from_mono_type_internal (tto );
2193+
2194+ // The same applies for when the type sizes do not match, as this will always throw
2195+ // and so its not an expected case and we can fallback to the managed implementation
2196+
2197+ int tfrom_align , tto_align ;
2198+ gint32 size = mono_type_size (tfrom , & tfrom_align );
2199+ if (size != mono_type_size (tto , & tto_align )) {
2200+ return FALSE;
2201+ }
2202+ g_assert (size < G_MAXUINT16 );
2203+
2204+ // We have several different move opcodes to handle the data depending on the
2205+ // source and target types, so detect and optimize the most common ones falling
2206+ // back to what is effectively `ReadUnaligned<TTo>(ref As<TFrom, byte>(ref source))`
2207+ // for anything that can't be special cased as potentially zero-cost move.
2208+
2209+ if (m_class_is_enumtype (tto_klass )) {
2210+ tto = mono_class_enum_basetype_internal (tto_klass );
2211+ }
2212+
2213+ int mov_op = interp_get_mov_for_type (mono_mint_type (tto ), TRUE);
2214+
2215+ if (mov_op == MINT_MOV_VT ) {
2216+ if (size <= 4 ) {
2217+ * op = MINT_MOV_4 ;
2218+ } else if (size <= 8 ) {
2219+ * op = MINT_MOV_8 ;
2220+ } else {
2221+ td -> sp -- ;
2222+ interp_add_ins (td , MINT_MOV_VT );
2223+ interp_ins_set_sreg (td -> last_ins , td -> sp [0 ].var );
2224+ push_type_vt (td , tto_klass , size );
2225+ interp_ins_set_dreg (td -> last_ins , td -> sp [-1 ].var );
2226+ td -> last_ins -> data [0 ] = GINT32_TO_UINT16 (size );
2227+ td -> ip += 5 ;
2228+ return TRUE;
2229+ }
2230+ } else {
2231+ if (size < 4 )
2232+ return FALSE;
2233+
2234+ * op = mov_op ;
2235+ }
21592236 } else if (!strcmp (tm , "ByteOffset ")) {
21602237#if SIZEOF_VOID_P == 4
21612238 interp_add_ins (td , MINT_SUB_I4 );
0 commit comments