From 0e80c686bd22f05b5347ae2162bb2e16e9719e93 Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Thu, 7 Mar 2024 16:39:10 -0800 Subject: [PATCH 01/10] Implement swift lowering algorithm in Mono --- src/mono/mono/metadata/marshal.c | 266 +++++++++++++++++++++++++++++++ src/mono/mono/metadata/marshal.h | 12 ++ 2 files changed, 278 insertions(+) diff --git a/src/mono/mono/metadata/marshal.c b/src/mono/mono/metadata/marshal.c index 1b7d0b4310a2bc..375dd573526307 100644 --- a/src/mono/mono/metadata/marshal.c +++ b/src/mono/mono/metadata/marshal.c @@ -6580,3 +6580,269 @@ mono_wrapper_caches_free (MonoWrapperCaches *cache) free_hash (cache->thunk_invoke_cache); free_hash (cache->unsafe_accessor_cache); } + +typedef enum { + SWIFT_EMPTY, + SWIFT_OPAQUE, + SWIFT_INT64, + SWIFT_FLOAT, + SWIFT_DOUBLE, +} SwiftPhysicalLoweringKind; + +static int get_swift_lowering_alignment (SwiftPhysicalLoweringKind kind) { + switch (kind) { + case SWIFT_INT64: + case SWIFT_DOUBLE: + return 8; + case SWIFT_FLOAT: + return 4; + default: + return 1; + } +} + +static void set_lowering_range(GArray* lowered_bytes, guint32 offset, guint32 size, SwiftPhysicalLoweringKind kind) { + bool force_opaque = false; + + if (offset != ALIGN_TO(offset, get_swift_lowering_alignment(kind))) { + // If the start of the range is not aligned, we need to force the entire range to be opaque. + force_opaque = true; + } + + // Check if any of the range is non-empty. + // If so, we need to force this range to be opaque + // and extend the range mark the existing tag's range as opaque. + + for (guint32 i = 0; i < size; ++i) { + SwiftPhysicalLoweringKind current = g_array_index(lowered_bytes, SwiftPhysicalLoweringKind, offset + i); + if (current != SWIFT_EMPTY && current != kind) { + force_opaque = true; + offset = ALIGN_DOWN_TO(offset, get_swift_lowering_alignment(current)); + size = ALIGN_TO(size + offset, get_swift_lowering_alignment(current)) - offset; + break; + } + } + + if (force_opaque) { + kind = SWIFT_OPAQUE; + } + + for (guint32 i = 0; i < size; ++i) { + g_array_index(lowered_bytes, SwiftPhysicalLoweringKind, offset + i) = kind; + } +} + +static void record_struct_field_physical_lowering (GArray* lowered_bytes, MonoType* type, guint32 offset); + +static void record_inlinearray_struct_physical_lowering (GArray* lowered_bytes, MonoClass* klass, guint32 offset) { + // Get the first field and record its physical lowering N times + MonoClassField* field = mono_class_get_fields_internal (klass, NULL); + MonoType* fieldType = field->type; + for (int i = 0; i < m_class_inlinearray_value(klass); ++i) { + record_struct_field_physical_lowering(lowered_bytes, fieldType, offset + m_field_get_offset(field) + i * mono_type_size(fieldType, NULL)); + } +} + +static void record_struct_physical_lowering (GArray* lowered_bytes, MonoClass* klass, guint32 offset) +{ + if (m_class_is_inlinearray(klass)) { + record_inlinearray_struct_physical_lowering(lowered_bytes, klass, offset); + return; + } + + // Iterate through each field of klass + gpointer iter = NULL; + MonoClassField* field; + while ((field = mono_class_get_fields_internal (klass, &iter))) { + if (field->type->attrs & FIELD_ATTRIBUTE_STATIC) + continue; + if (mono_field_is_deleted (field)) + continue; + + MonoType* fieldType = field->type; + if (fieldType->type == MONO_TYPE_VALUETYPE) { + record_struct_physical_lowering(lowered_bytes, mono_class_from_mono_type_internal(fieldType), offset + m_field_get_offset(field)); + } else { + record_struct_field_physical_lowering(lowered_bytes, fieldType, offset + m_field_get_offset(field)); + } + } +} + +static void record_struct_field_physical_lowering (GArray* lowered_bytes, MonoType* type, guint32 offset) { + if (type->type == MONO_TYPE_VALUETYPE) { + record_struct_physical_lowering(lowered_bytes, mono_class_from_mono_type_internal(type), offset); + } else { + SwiftPhysicalLoweringKind kind = SWIFT_OPAQUE; + if (type->type == MONO_TYPE_I8 || type->type == MONO_TYPE_U8 || type->type == MONO_TYPE_PTR || type->type == MONO_TYPE_FNPTR) { + kind = SWIFT_INT64; + } else if (type->type == MONO_TYPE_R4) { + kind = SWIFT_FLOAT; + } else if (type->type == MONO_TYPE_R8) { + kind = SWIFT_DOUBLE; + } + + set_lowering_range(lowered_bytes, offset, mono_type_size(type, NULL), kind); + } +} + +SwiftPhysicalLowering +mono_marshal_get_swift_physical_lowering (MonoType *type, gboolean native_layout) +{ + g_assert (!native_layout); + SwiftPhysicalLowering lowering = { 0 }; + + // Normalize pointer types to IntPtr. + // We don't need to care about specific pointer types at this ABI level. + if (type->type == MONO_TYPE_PTR || type->type == MONO_TYPE_FNPTR) { + type = m_class_get_byval_arg (mono_defaults.int_class); + } + + if (type->type != MONO_TYPE_VALUETYPE && !mono_type_is_primitive(type)) { + lowering.byReference = FALSE; + return lowering; + } + + MonoClass *klass = mono_class_from_mono_type_internal (type); + + GArray* lowered_bytes = g_array_sized_new(FALSE, TRUE, sizeof(SwiftPhysicalLoweringKind), m_class_get_instance_size(klass)); + + // Loop through all fields and get the physical lowering for each field + record_struct_physical_lowering(lowered_bytes, klass, 0); + + struct _SwiftInterval { + guint32 start; + guint32 size; + SwiftPhysicalLoweringKind kind; + }; + + GArray* intervals = g_array_new(FALSE, TRUE, sizeof(struct _SwiftInterval)); + + // Now we'll build the intervals from the lowered_bytes array + for (int i = 0; i < lowered_bytes->len; ++i) { + // Don't create an interval for empty bytes + if (g_array_index(lowered_bytes, SwiftPhysicalLoweringKind, i) == SWIFT_EMPTY) { + continue; + } + + SwiftPhysicalLoweringKind current = g_array_index(lowered_bytes, SwiftPhysicalLoweringKind, i); + + bool start_new_interval = + // We're at the start of the type + i == 0 + // We're starting a new float (as we're aligned) + || (i == ALIGN_TO(i, 4) && current == SWIFT_FLOAT) + // We're starting a new double or int64_t (as we're aligned) + || (i == ALIGN_TO(i, 8) && (current == SWIFT_DOUBLE || current == SWIFT_INT64)) + // We've changed interval types + || current != g_array_index(lowered_bytes, SwiftPhysicalLoweringKind, i - 1); + + if (start_new_interval) { + struct _SwiftInterval interval = { i, 1, current }; + g_array_append_val(intervals, interval); + } else { + // Extend the current interval + (g_array_index(intervals, struct _SwiftInterval, intervals->len - 1)).size++; + } + } + + // We've now produced the intervals, so we can release the lowered bytes + g_array_free(lowered_bytes, TRUE); + + // Merge opaque intervals that are in the same pointer-sized block + for (int i = 0; i < intervals->len - 1; ++i) { + struct _SwiftInterval current = g_array_index(intervals, struct _SwiftInterval, i); + struct _SwiftInterval next = g_array_index(intervals, struct _SwiftInterval, i + 1); + + if (current.kind == SWIFT_OPAQUE && next.kind == SWIFT_OPAQUE && current.start / TARGET_SIZEOF_VOID_P == next.start / TARGET_SIZEOF_VOID_P) { + current.size = next.start + next.size - current.start; + g_array_remove_index(intervals, i + 1); + i--; + } + } + + // Now that we have the intervals, we can calculate the lowering + MonoTypeEnum lowered_types[4]; + guint32 offsets[4]; + guint32 num_lowered_types = 0; + + for (int i = 0; i < intervals->len; ++i, ++num_lowered_types) { + if (num_lowered_types == 4) { + // We can't handle more than 4 fields + lowering.byReference = TRUE; + g_array_free(intervals, TRUE); + return lowering; + } + + struct _SwiftInterval interval = g_array_index(intervals, struct _SwiftInterval, i); + + offsets[num_lowered_types] = interval.start; + + switch (interval.kind) { + case SWIFT_INT64: + lowered_types[num_lowered_types] = MONO_TYPE_I8; + break; + case SWIFT_FLOAT: + lowered_types[num_lowered_types] = MONO_TYPE_R4; + break; + case SWIFT_DOUBLE: + lowered_types[num_lowered_types] = MONO_TYPE_R8; + break; + case SWIFT_OPAQUE: + { + // We need to split the opaque ranges into integer parameters. + // As part of this splitting, we must ensure that we don't introduce alignment padding. + // This lowering algorithm should produce a lowered type sequence that would have the same padding for + // a naturally-aligned struct with the lowered fields as the original type has. + // This algorithm intends to split the opaque range into the least number of lowered elements that covers the entire range. + // The lowered range is allowed to extend past the end of the opaque range (including past the end of the struct), + // but not into the next non-empty interval. + // However, due to the properties of the lowering (the only non-8 byte elements of the lowering are 4-byte floats), + // we'll never encounter a scneario where we need would need to account for a correctly-aligned + // opaque range of > 4 bytes that we must not pad to 8 bytes. + + + // As long as we need to fill more than 4 bytes and the sequence is currently 8-byte aligned, we'll split into 8-byte integers. + // If we have more than 2 bytes but less than 4 and the sequence is 4-byte aligned, we'll use a 4-byte integer to represent the rest of the parameters. + // If we have 2 bytes and the sequence is 2-byte aligned, we'll use a 2-byte integer to represent the rest of the parameters. + // If we have 1 byte, we'll use a 1-byte integer to represent the rest of the parameters. + guint32 opaque_interval_start = interval.start; + guint32 remaining_interval_size = interval.size; + for (;remaining_interval_size > 0; num_lowered_types++) { + if (num_lowered_types == 4) { + // We can't handle more than 4 fields + lowering.byReference = TRUE; + g_array_free(intervals, TRUE); + return lowering; + } + + offsets[num_lowered_types] = opaque_interval_start; + + if (remaining_interval_size > 8 && (opaque_interval_start % 8 == 0)) { + lowered_types[num_lowered_types] = MONO_TYPE_I8; + remaining_interval_size -= 8; + opaque_interval_start += 8; + } else if (remaining_interval_size > 4 && (opaque_interval_start % 4 == 0)) { + lowered_types[num_lowered_types] = MONO_TYPE_I4; + remaining_interval_size -= 4; + opaque_interval_start += 4; + } else if (remaining_interval_size > 2 && (opaque_interval_start % 2 == 0)) { + lowered_types[num_lowered_types] = MONO_TYPE_I2; + remaining_interval_size -= 2; + opaque_interval_start += 2; + } else { + lowered_types[num_lowered_types] = MONO_TYPE_U1; + remaining_interval_size -= 1; + opaque_interval_start += 1; + } + } + } + } + } + + memcpy(lowering.loweredElements, lowered_types, num_lowered_types * sizeof(MonoTypeEnum)); + memcpy(lowering.offsets, offsets, num_lowered_types * sizeof(guint32)); + lowering.numLoweredElements = num_lowered_types; + lowering.byReference = FALSE; + + return lowering; +} diff --git a/src/mono/mono/metadata/marshal.h b/src/mono/mono/metadata/marshal.h index 87561efe4673ae..371641d4510891 100644 --- a/src/mono/mono/metadata/marshal.h +++ b/src/mono/mono/metadata/marshal.h @@ -742,4 +742,16 @@ mono_marshal_get_mono_callbacks_for_ilgen (void); GENERATE_TRY_GET_CLASS_WITH_CACHE_DECL (swift_self) GENERATE_TRY_GET_CLASS_WITH_CACHE_DECL (swift_error) +//#ifdef MONO_ARCH_HAVE_SWIFTCALL +typedef struct { + gboolean byReference; + int numLoweredElements; + MonoTypeEnum loweredElements[4]; + uint32_t offsets[4]; +} SwiftPhysicalLowering; + +SwiftPhysicalLowering +mono_marshal_get_swift_physical_lowering (MonoType *type, gboolean native_layout); +//#endif + #endif /* __MONO_MARSHAL_H__ */ From f6653c6bbdf38590d4dae37f39408438ba823aeb Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Thu, 7 Mar 2024 18:40:31 -0800 Subject: [PATCH 02/10] Set SWIFTCALL ifdef --- src/mono/mono/metadata/marshal.c | 2 ++ src/mono/mono/metadata/marshal.h | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/mono/mono/metadata/marshal.c b/src/mono/mono/metadata/marshal.c index 375dd573526307..e8f57adbfaf6d7 100644 --- a/src/mono/mono/metadata/marshal.c +++ b/src/mono/mono/metadata/marshal.c @@ -6581,6 +6581,7 @@ mono_wrapper_caches_free (MonoWrapperCaches *cache) free_hash (cache->unsafe_accessor_cache); } +#ifdef MONO_ARCH_HAVE_SWIFTCALL typedef enum { SWIFT_EMPTY, SWIFT_OPAQUE, @@ -6846,3 +6847,4 @@ mono_marshal_get_swift_physical_lowering (MonoType *type, gboolean native_layout return lowering; } +#endif /* MONO_ARCH_HAVE_SWIFTCALL */ diff --git a/src/mono/mono/metadata/marshal.h b/src/mono/mono/metadata/marshal.h index 371641d4510891..a64c6f87107c27 100644 --- a/src/mono/mono/metadata/marshal.h +++ b/src/mono/mono/metadata/marshal.h @@ -742,7 +742,7 @@ mono_marshal_get_mono_callbacks_for_ilgen (void); GENERATE_TRY_GET_CLASS_WITH_CACHE_DECL (swift_self) GENERATE_TRY_GET_CLASS_WITH_CACHE_DECL (swift_error) -//#ifdef MONO_ARCH_HAVE_SWIFTCALL +#ifdef MONO_ARCH_HAVE_SWIFTCALL typedef struct { gboolean byReference; int numLoweredElements; @@ -752,6 +752,6 @@ typedef struct { SwiftPhysicalLowering mono_marshal_get_swift_physical_lowering (MonoType *type, gboolean native_layout); -//#endif +#endif /* MONO_ARCH_HAVE_SWIFTCALL */ #endif /* __MONO_MARSHAL_H__ */ From 35f257a0c812fa7eb56853506a9acbc0613237e0 Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Fri, 8 Mar 2024 10:26:28 -0800 Subject: [PATCH 03/10] PR feedback and fix generic handling --- src/mono/mono/metadata/marshal.c | 36 +++++++++++++++++++++----------- 1 file changed, 24 insertions(+), 12 deletions(-) diff --git a/src/mono/mono/metadata/marshal.c b/src/mono/mono/metadata/marshal.c index e8f57adbfaf6d7..783998f8c209d1 100644 --- a/src/mono/mono/metadata/marshal.c +++ b/src/mono/mono/metadata/marshal.c @@ -6612,7 +6612,7 @@ static void set_lowering_range(GArray* lowered_bytes, guint32 offset, guint32 si // Check if any of the range is non-empty. // If so, we need to force this range to be opaque - // and extend the range mark the existing tag's range as opaque. + // and extend the range to the existing tag's range and mark as opaque in addition to the requested range. for (guint32 i = 0; i < size; ++i) { SwiftPhysicalLoweringKind current = g_array_index(lowered_bytes, SwiftPhysicalLoweringKind, offset + i); @@ -6651,7 +6651,7 @@ static void record_struct_physical_lowering (GArray* lowered_bytes, MonoClass* k return; } - // Iterate through each field of klass + // For each field, we need to record the physical lowering of it. gpointer iter = NULL; MonoClassField* field; while ((field = mono_class_get_fields_internal (klass, &iter))) { @@ -6660,21 +6660,26 @@ static void record_struct_physical_lowering (GArray* lowered_bytes, MonoClass* k if (mono_field_is_deleted (field)) continue; - MonoType* fieldType = field->type; - if (fieldType->type == MONO_TYPE_VALUETYPE) { - record_struct_physical_lowering(lowered_bytes, mono_class_from_mono_type_internal(fieldType), offset + m_field_get_offset(field)); - } else { - record_struct_field_physical_lowering(lowered_bytes, fieldType, offset + m_field_get_offset(field)); - } + record_struct_field_physical_lowering(lowered_bytes, field->type, offset + m_field_get_offset(field)); } } static void record_struct_field_physical_lowering (GArray* lowered_bytes, MonoType* type, guint32 offset) { - if (type->type == MONO_TYPE_VALUETYPE) { + // Normalize pointer types to IntPtr and resolve generic classes. + // We don't need to care about specific pointer types at this ABI level. + if (type->type == MONO_TYPE_PTR || type->type == MONO_TYPE_FNPTR) { + type = m_class_get_byval_arg (mono_defaults.int_class); + } + if (type->type == MONO_TYPE_VALUETYPE || (type->type == MONO_TYPE_GENERICINST && mono_type_generic_inst_is_valuetype (type))) { + // If a struct type is encountered, we need to record the physical lowering for each field of that struct recursively record_struct_physical_lowering(lowered_bytes, mono_class_from_mono_type_internal(type), offset); } else { SwiftPhysicalLoweringKind kind = SWIFT_OPAQUE; - if (type->type == MONO_TYPE_I8 || type->type == MONO_TYPE_U8 || type->type == MONO_TYPE_PTR || type->type == MONO_TYPE_FNPTR) { + // The only types that are non-opaque are 64-bit integers, floats, doubles, and vector types. + // We currently don't support vector types, so we'll only handle the first three. + if (type->type == MONO_TYPE_I8 || type->type == MONO_TYPE_U8) { + kind = SWIFT_INT64; + } else if (TARGET_SIZEOF_VOID_P == 8 && (type->type == MONO_TYPE_PTR || type->type == MONO_TYPE_FNPTR)) { kind = SWIFT_INT64; } else if (type->type == MONO_TYPE_R4) { kind = SWIFT_FLOAT; @@ -6689,17 +6694,24 @@ static void record_struct_field_physical_lowering (GArray* lowered_bytes, MonoTy SwiftPhysicalLowering mono_marshal_get_swift_physical_lowering (MonoType *type, gboolean native_layout) { + // TODO: Add support for the native type layout. g_assert (!native_layout); SwiftPhysicalLowering lowering = { 0 }; - // Normalize pointer types to IntPtr. + // Normalize pointer types to IntPtr and resolve generic classes. // We don't need to care about specific pointer types at this ABI level. if (type->type == MONO_TYPE_PTR || type->type == MONO_TYPE_FNPTR) { type = m_class_get_byval_arg (mono_defaults.int_class); } + // Non-value types are illegal at the interop boundary. + if (type->type == MONO_TYPE_GENERICINST && !mono_type_generic_inst_is_valuetype (type)) { + lowering.byReference = TRUE; + return lowering; + } + if (type->type != MONO_TYPE_VALUETYPE && !mono_type_is_primitive(type)) { - lowering.byReference = FALSE; + lowering.byReference = TRUE; return lowering; } From a1f7e6b6925c57788ee627307f9afee707cdd6fd Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Fri, 8 Mar 2024 10:33:18 -0800 Subject: [PATCH 04/10] Use an stack buffer instead of a GArray with a TODO for updating in the future when we support vectors. --- src/mono/mono/metadata/marshal.c | 39 ++++++++++++++++++-------------- 1 file changed, 22 insertions(+), 17 deletions(-) diff --git a/src/mono/mono/metadata/marshal.c b/src/mono/mono/metadata/marshal.c index 783998f8c209d1..505aaa237cb184 100644 --- a/src/mono/mono/metadata/marshal.c +++ b/src/mono/mono/metadata/marshal.c @@ -6583,7 +6583,7 @@ mono_wrapper_caches_free (MonoWrapperCaches *cache) #ifdef MONO_ARCH_HAVE_SWIFTCALL typedef enum { - SWIFT_EMPTY, + SWIFT_EMPTY = 0, SWIFT_OPAQUE, SWIFT_INT64, SWIFT_FLOAT, @@ -6602,7 +6602,7 @@ static int get_swift_lowering_alignment (SwiftPhysicalLoweringKind kind) { } } -static void set_lowering_range(GArray* lowered_bytes, guint32 offset, guint32 size, SwiftPhysicalLoweringKind kind) { +static void set_lowering_range(guint8* lowered_bytes, guint32 offset, guint32 size, SwiftPhysicalLoweringKind kind) { bool force_opaque = false; if (offset != ALIGN_TO(offset, get_swift_lowering_alignment(kind))) { @@ -6615,7 +6615,7 @@ static void set_lowering_range(GArray* lowered_bytes, guint32 offset, guint32 si // and extend the range to the existing tag's range and mark as opaque in addition to the requested range. for (guint32 i = 0; i < size; ++i) { - SwiftPhysicalLoweringKind current = g_array_index(lowered_bytes, SwiftPhysicalLoweringKind, offset + i); + SwiftPhysicalLoweringKind current = (SwiftPhysicalLoweringKind)lowered_bytes[offset + i]; if (current != SWIFT_EMPTY && current != kind) { force_opaque = true; offset = ALIGN_DOWN_TO(offset, get_swift_lowering_alignment(current)); @@ -6628,14 +6628,12 @@ static void set_lowering_range(GArray* lowered_bytes, guint32 offset, guint32 si kind = SWIFT_OPAQUE; } - for (guint32 i = 0; i < size; ++i) { - g_array_index(lowered_bytes, SwiftPhysicalLoweringKind, offset + i) = kind; - } + memset(lowered_bytes + offset, kind, size); } -static void record_struct_field_physical_lowering (GArray* lowered_bytes, MonoType* type, guint32 offset); +static void record_struct_field_physical_lowering (guint8* lowered_bytes, MonoType* type, guint32 offset); -static void record_inlinearray_struct_physical_lowering (GArray* lowered_bytes, MonoClass* klass, guint32 offset) { +static void record_inlinearray_struct_physical_lowering (guint8* lowered_bytes, MonoClass* klass, guint32 offset) { // Get the first field and record its physical lowering N times MonoClassField* field = mono_class_get_fields_internal (klass, NULL); MonoType* fieldType = field->type; @@ -6644,7 +6642,7 @@ static void record_inlinearray_struct_physical_lowering (GArray* lowered_bytes, } } -static void record_struct_physical_lowering (GArray* lowered_bytes, MonoClass* klass, guint32 offset) +static void record_struct_physical_lowering (guint8* lowered_bytes, MonoClass* klass, guint32 offset) { if (m_class_is_inlinearray(klass)) { record_inlinearray_struct_physical_lowering(lowered_bytes, klass, offset); @@ -6664,7 +6662,7 @@ static void record_struct_physical_lowering (GArray* lowered_bytes, MonoClass* k } } -static void record_struct_field_physical_lowering (GArray* lowered_bytes, MonoType* type, guint32 offset) { +static void record_struct_field_physical_lowering (guint8* lowered_bytes, MonoType* type, guint32 offset) { // Normalize pointer types to IntPtr and resolve generic classes. // We don't need to care about specific pointer types at this ABI level. if (type->type == MONO_TYPE_PTR || type->type == MONO_TYPE_FNPTR) { @@ -6717,7 +6715,17 @@ mono_marshal_get_swift_physical_lowering (MonoType *type, gboolean native_layout MonoClass *klass = mono_class_from_mono_type_internal (type); - GArray* lowered_bytes = g_array_sized_new(FALSE, TRUE, sizeof(SwiftPhysicalLoweringKind), m_class_get_instance_size(klass)); + // TODO: We currently don't support vector types, so we can say that the maximum size of a non-byreference struct + // is 4 * PointerSize. + // Strictly, this is inaccurate in the case where a struct has a fully-empty 8 bytes of padding using explicit layout, + // but that's not possible in the Swift layout algorithm. + + if (m_class_get_instance_size(klass) > 4 * TARGET_SIZEOF_VOID_P) { + lowering.byReference = TRUE; + return lowering; + } + + guint8 lowered_bytes[TARGET_SIZEOF_VOID_P * 4] = { 0 }; // Loop through all fields and get the physical lowering for each field record_struct_physical_lowering(lowered_bytes, klass, 0); @@ -6733,11 +6741,11 @@ mono_marshal_get_swift_physical_lowering (MonoType *type, gboolean native_layout // Now we'll build the intervals from the lowered_bytes array for (int i = 0; i < lowered_bytes->len; ++i) { // Don't create an interval for empty bytes - if (g_array_index(lowered_bytes, SwiftPhysicalLoweringKind, i) == SWIFT_EMPTY) { + if (lowered_bytes[i] == SWIFT_EMPTY) { continue; } - SwiftPhysicalLoweringKind current = g_array_index(lowered_bytes, SwiftPhysicalLoweringKind, i); + SwiftPhysicalLoweringKind current = (SwiftPhysicalLoweringKind)lowered_bytes[i]; bool start_new_interval = // We're at the start of the type @@ -6747,7 +6755,7 @@ mono_marshal_get_swift_physical_lowering (MonoType *type, gboolean native_layout // We're starting a new double or int64_t (as we're aligned) || (i == ALIGN_TO(i, 8) && (current == SWIFT_DOUBLE || current == SWIFT_INT64)) // We've changed interval types - || current != g_array_index(lowered_bytes, SwiftPhysicalLoweringKind, i - 1); + || current != lowered_bytes[i - 1]; if (start_new_interval) { struct _SwiftInterval interval = { i, 1, current }; @@ -6758,9 +6766,6 @@ mono_marshal_get_swift_physical_lowering (MonoType *type, gboolean native_layout } } - // We've now produced the intervals, so we can release the lowered bytes - g_array_free(lowered_bytes, TRUE); - // Merge opaque intervals that are in the same pointer-sized block for (int i = 0; i < intervals->len - 1; ++i) { struct _SwiftInterval current = g_array_index(intervals, struct _SwiftInterval, i); From 7982213a3258e6adab8c0fc50c89ffd7e30e9a60 Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Mon, 11 Mar 2024 14:28:46 -0700 Subject: [PATCH 05/10] Style feedback --- src/mono/mono/metadata/marshal.c | 20 +++++++++----------- src/mono/mono/metadata/marshal.h | 8 +++----- 2 files changed, 12 insertions(+), 16 deletions(-) diff --git a/src/mono/mono/metadata/marshal.c b/src/mono/mono/metadata/marshal.c index 505aaa237cb184..22827fefad17b5 100644 --- a/src/mono/mono/metadata/marshal.c +++ b/src/mono/mono/metadata/marshal.c @@ -6581,7 +6581,6 @@ mono_wrapper_caches_free (MonoWrapperCaches *cache) free_hash (cache->unsafe_accessor_cache); } -#ifdef MONO_ARCH_HAVE_SWIFTCALL typedef enum { SWIFT_EMPTY = 0, SWIFT_OPAQUE, @@ -6704,24 +6703,24 @@ mono_marshal_get_swift_physical_lowering (MonoType *type, gboolean native_layout // Non-value types are illegal at the interop boundary. if (type->type == MONO_TYPE_GENERICINST && !mono_type_generic_inst_is_valuetype (type)) { - lowering.byReference = TRUE; + lowering.by_reference = TRUE; return lowering; } if (type->type != MONO_TYPE_VALUETYPE && !mono_type_is_primitive(type)) { - lowering.byReference = TRUE; + lowering.by_reference = TRUE; return lowering; } MonoClass *klass = mono_class_from_mono_type_internal (type); - // TODO: We currently don't support vector types, so we can say that the maximum size of a non-byreference struct + // TODO: We currently don't support vector types, so we can say that the maximum size of a non-by_reference struct // is 4 * PointerSize. // Strictly, this is inaccurate in the case where a struct has a fully-empty 8 bytes of padding using explicit layout, // but that's not possible in the Swift layout algorithm. if (m_class_get_instance_size(klass) > 4 * TARGET_SIZEOF_VOID_P) { - lowering.byReference = TRUE; + lowering.by_reference = TRUE; return lowering; } @@ -6786,7 +6785,7 @@ mono_marshal_get_swift_physical_lowering (MonoType *type, gboolean native_layout for (int i = 0; i < intervals->len; ++i, ++num_lowered_types) { if (num_lowered_types == 4) { // We can't handle more than 4 fields - lowering.byReference = TRUE; + lowering.by_reference = TRUE; g_array_free(intervals, TRUE); return lowering; } @@ -6828,7 +6827,7 @@ mono_marshal_get_swift_physical_lowering (MonoType *type, gboolean native_layout for (;remaining_interval_size > 0; num_lowered_types++) { if (num_lowered_types == 4) { // We can't handle more than 4 fields - lowering.byReference = TRUE; + lowering.by_reference = TRUE; g_array_free(intervals, TRUE); return lowering; } @@ -6857,11 +6856,10 @@ mono_marshal_get_swift_physical_lowering (MonoType *type, gboolean native_layout } } - memcpy(lowering.loweredElements, lowered_types, num_lowered_types * sizeof(MonoTypeEnum)); + memcpy(lowering.lowered_elements, lowered_types, num_lowered_types * sizeof(MonoTypeEnum)); memcpy(lowering.offsets, offsets, num_lowered_types * sizeof(guint32)); - lowering.numLoweredElements = num_lowered_types; - lowering.byReference = FALSE; + lowering.num_lowered_elements = num_lowered_types; + lowering.by_reference = FALSE; return lowering; } -#endif /* MONO_ARCH_HAVE_SWIFTCALL */ diff --git a/src/mono/mono/metadata/marshal.h b/src/mono/mono/metadata/marshal.h index a64c6f87107c27..8d545fcc25dea6 100644 --- a/src/mono/mono/metadata/marshal.h +++ b/src/mono/mono/metadata/marshal.h @@ -742,16 +742,14 @@ mono_marshal_get_mono_callbacks_for_ilgen (void); GENERATE_TRY_GET_CLASS_WITH_CACHE_DECL (swift_self) GENERATE_TRY_GET_CLASS_WITH_CACHE_DECL (swift_error) -#ifdef MONO_ARCH_HAVE_SWIFTCALL typedef struct { - gboolean byReference; - int numLoweredElements; - MonoTypeEnum loweredElements[4]; + gboolean by_reference; + int num_lowered_elements; + MonoTypeEnum lowered_elements[4]; uint32_t offsets[4]; } SwiftPhysicalLowering; SwiftPhysicalLowering mono_marshal_get_swift_physical_lowering (MonoType *type, gboolean native_layout); -#endif /* MONO_ARCH_HAVE_SWIFTCALL */ #endif /* __MONO_MARSHAL_H__ */ From 7b6788243c6bb599ff477a0290ec07aad1963ab1 Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Tue, 12 Mar 2024 12:00:00 -0700 Subject: [PATCH 06/10] Copy adjustments from the CoreCLR + RyuJIT integration PR needed to get tests to pass --- src/mono/mono/metadata/marshal.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/src/mono/mono/metadata/marshal.c b/src/mono/mono/metadata/marshal.c index 22827fefad17b5..c5e5eb740c8f46 100644 --- a/src/mono/mono/metadata/marshal.c +++ b/src/mono/mono/metadata/marshal.c @@ -6782,7 +6782,7 @@ mono_marshal_get_swift_physical_lowering (MonoType *type, gboolean native_layout guint32 offsets[4]; guint32 num_lowered_types = 0; - for (int i = 0; i < intervals->len; ++i, ++num_lowered_types) { + for (int i = 0; i < intervals->len; ++i) { if (num_lowered_types == 4) { // We can't handle more than 4 fields lowering.by_reference = TRUE; @@ -6796,13 +6796,13 @@ mono_marshal_get_swift_physical_lowering (MonoType *type, gboolean native_layout switch (interval.kind) { case SWIFT_INT64: - lowered_types[num_lowered_types] = MONO_TYPE_I8; + lowered_types[num_lowered_types++] = MONO_TYPE_I8; break; case SWIFT_FLOAT: - lowered_types[num_lowered_types] = MONO_TYPE_R4; + lowered_types[num_lowered_types++] = MONO_TYPE_R4; break; case SWIFT_DOUBLE: - lowered_types[num_lowered_types] = MONO_TYPE_R8; + lowered_types[num_lowered_types++] = MONO_TYPE_R8; break; case SWIFT_OPAQUE: { @@ -6823,8 +6823,9 @@ mono_marshal_get_swift_physical_lowering (MonoType *type, gboolean native_layout // If we have 2 bytes and the sequence is 2-byte aligned, we'll use a 2-byte integer to represent the rest of the parameters. // If we have 1 byte, we'll use a 1-byte integer to represent the rest of the parameters. guint32 opaque_interval_start = interval.start; - guint32 remaining_interval_size = interval.size; - for (;remaining_interval_size > 0; num_lowered_types++) { + // The remaining size here may become negative, so use a signed type. + gint32 remaining_interval_size = (gint32)interval.size; + while (remaining_interval_size > 0) { if (num_lowered_types == 4) { // We can't handle more than 4 fields lowering.by_reference = TRUE; @@ -6851,6 +6852,8 @@ mono_marshal_get_swift_physical_lowering (MonoType *type, gboolean native_layout remaining_interval_size -= 1; opaque_interval_start += 1; } + + num_lowered_types++; } } } From 0ee0969861e8da68b8352dda9a420e9ec3d85064 Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Tue, 12 Mar 2024 12:01:25 -0700 Subject: [PATCH 07/10] Fix bounds check --- src/mono/mono/metadata/marshal.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/mono/mono/metadata/marshal.c b/src/mono/mono/metadata/marshal.c index c5e5eb740c8f46..4edeb9faaaa35c 100644 --- a/src/mono/mono/metadata/marshal.c +++ b/src/mono/mono/metadata/marshal.c @@ -6738,7 +6738,8 @@ mono_marshal_get_swift_physical_lowering (MonoType *type, gboolean native_layout GArray* intervals = g_array_new(FALSE, TRUE, sizeof(struct _SwiftInterval)); // Now we'll build the intervals from the lowered_bytes array - for (int i = 0; i < lowered_bytes->len; ++i) { + int instance_size = m_class_get_instance_size(klass); + for (int i = 0; i < instance_size; ++i) { // Don't create an interval for empty bytes if (lowered_bytes[i] == SWIFT_EMPTY) { continue; From 50ca6f6819296574b206af9c519de772457915d9 Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Wed, 13 Mar 2024 11:45:03 -0700 Subject: [PATCH 08/10] Move constant expression out of if to make MSVC happy --- src/mono/mono/metadata/marshal.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/mono/mono/metadata/marshal.c b/src/mono/mono/metadata/marshal.c index 4edeb9faaaa35c..f4893e5503882d 100644 --- a/src/mono/mono/metadata/marshal.c +++ b/src/mono/mono/metadata/marshal.c @@ -6676,9 +6676,13 @@ static void record_struct_field_physical_lowering (guint8* lowered_bytes, MonoTy // We currently don't support vector types, so we'll only handle the first three. if (type->type == MONO_TYPE_I8 || type->type == MONO_TYPE_U8) { kind = SWIFT_INT64; - } else if (TARGET_SIZEOF_VOID_P == 8 && (type->type == MONO_TYPE_PTR || type->type == MONO_TYPE_FNPTR)) { + } +#if TARGET_SIZEOF_VOID_P == 8 + else if (type->type == MONO_TYPE_PTR || type->type == MONO_TYPE_FNPTR) { kind = SWIFT_INT64; - } else if (type->type == MONO_TYPE_R4) { + } +#endif + else if (type->type == MONO_TYPE_R4) { kind = SWIFT_FLOAT; } else if (type->type == MONO_TYPE_R8) { kind = SWIFT_DOUBLE; From f09298eb0f18ddd0cbece31bb57f6102fe4f1bd9 Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Wed, 13 Mar 2024 16:12:41 -0700 Subject: [PATCH 09/10] Update src/mono/mono/metadata/marshal.c --- src/mono/mono/metadata/marshal.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/mono/mono/metadata/marshal.c b/src/mono/mono/metadata/marshal.c index f4893e5503882d..5da4c58890a295 100644 --- a/src/mono/mono/metadata/marshal.c +++ b/src/mono/mono/metadata/marshal.c @@ -6709,9 +6709,7 @@ mono_marshal_get_swift_physical_lowering (MonoType *type, gboolean native_layout if (type->type == MONO_TYPE_GENERICINST && !mono_type_generic_inst_is_valuetype (type)) { lowering.by_reference = TRUE; return lowering; - } - - if (type->type != MONO_TYPE_VALUETYPE && !mono_type_is_primitive(type)) { + } else if (type->type != MONO_TYPE_VALUETYPE && !mono_type_is_primitive(type)) { lowering.by_reference = TRUE; return lowering; } From 6c5bedd2b0e8a016c6b4767f240c0435436f7a00 Mon Sep 17 00:00:00 2001 From: Jeremy Koritzinsky Date: Fri, 15 Mar 2024 12:12:23 -0700 Subject: [PATCH 10/10] PR feedback --- src/mono/mono/metadata/marshal.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/mono/mono/metadata/marshal.c b/src/mono/mono/metadata/marshal.c index 5da4c58890a295..cf0f9f8094ae26 100644 --- a/src/mono/mono/metadata/marshal.c +++ b/src/mono/mono/metadata/marshal.c @@ -6678,7 +6678,8 @@ static void record_struct_field_physical_lowering (guint8* lowered_bytes, MonoTy kind = SWIFT_INT64; } #if TARGET_SIZEOF_VOID_P == 8 - else if (type->type == MONO_TYPE_PTR || type->type == MONO_TYPE_FNPTR) { + else if (type->type == MONO_TYPE_PTR || type->type == MONO_TYPE_FNPTR + || type->type == MONO_TYPE_I || type->type == MONO_TYPE_U) { kind = SWIFT_INT64; } #endif @@ -6817,7 +6818,7 @@ mono_marshal_get_swift_physical_lowering (MonoType *type, gboolean native_layout // The lowered range is allowed to extend past the end of the opaque range (including past the end of the struct), // but not into the next non-empty interval. // However, due to the properties of the lowering (the only non-8 byte elements of the lowering are 4-byte floats), - // we'll never encounter a scneario where we need would need to account for a correctly-aligned + // we'll never encounter a scenario where we need would need to account for a correctly-aligned // opaque range of > 4 bytes that we must not pad to 8 bytes.