diff --git a/src/coreclr/System.Private.CoreLib/src/System/Buffer.CoreCLR.cs b/src/coreclr/System.Private.CoreLib/src/System/Buffer.CoreCLR.cs index 68e4d777aab2f3..2a375e74035f0d 100644 --- a/src/coreclr/System.Private.CoreLib/src/System/Buffer.CoreCLR.cs +++ b/src/coreclr/System.Private.CoreLib/src/System/Buffer.CoreCLR.cs @@ -9,78 +9,15 @@ namespace System { public partial class Buffer { - // Non-inlinable wrapper around the QCall that avoids polluting the fast path - // with P/Invoke prolog/epilog. - [MethodImpl(MethodImplOptions.NoInlining)] - internal static unsafe void _ZeroMemory(ref byte b, nuint byteLength) - { - fixed (byte* bytePointer = &b) - { - __ZeroMemory(bytePointer, byteLength); - } - } - [LibraryImport(RuntimeHelpers.QCall, EntryPoint = "Buffer_Clear")] private static unsafe partial void __ZeroMemory(void* b, nuint byteLength); - // The maximum block size to for __BulkMoveWithWriteBarrier FCall. This is required to avoid GC starvation. -#if DEBUG // Stress the mechanism in debug builds - private const uint BulkMoveWithWriteBarrierChunk = 0x400; -#else - private const uint BulkMoveWithWriteBarrierChunk = 0x4000; -#endif - - internal static void BulkMoveWithWriteBarrier(ref byte destination, ref byte source, nuint byteCount) - { - if (byteCount <= BulkMoveWithWriteBarrierChunk) - __BulkMoveWithWriteBarrier(ref destination, ref source, byteCount); - else - _BulkMoveWithWriteBarrier(ref destination, ref source, byteCount); - } - -#pragma warning disable IDE0060 // https://github.com/dotnet/roslyn-analyzers/issues/6228 - // Non-inlinable wrapper around the loop for copying large blocks in chunks - [MethodImpl(MethodImplOptions.NoInlining)] - private static void _BulkMoveWithWriteBarrier(ref byte destination, ref byte source, nuint byteCount) - { - Debug.Assert(byteCount > BulkMoveWithWriteBarrierChunk); - - if (Unsafe.AreSame(ref source, ref destination)) - return; - - // This is equivalent to: (destination - source) >= byteCount || (destination - source) < 0 - if ((nuint)(nint)Unsafe.ByteOffset(ref source, ref destination) >= byteCount) - { - // Copy forwards - do - { - byteCount -= BulkMoveWithWriteBarrierChunk; - __BulkMoveWithWriteBarrier(ref destination, ref source, BulkMoveWithWriteBarrierChunk); - destination = ref Unsafe.AddByteOffset(ref destination, BulkMoveWithWriteBarrierChunk); - source = ref Unsafe.AddByteOffset(ref source, BulkMoveWithWriteBarrierChunk); - } - while (byteCount > BulkMoveWithWriteBarrierChunk); - } - else - { - // Copy backwards - do - { - byteCount -= BulkMoveWithWriteBarrierChunk; - __BulkMoveWithWriteBarrier(ref Unsafe.AddByteOffset(ref destination, byteCount), ref Unsafe.AddByteOffset(ref source, byteCount), BulkMoveWithWriteBarrierChunk); - } - while (byteCount > BulkMoveWithWriteBarrierChunk); - } - __BulkMoveWithWriteBarrier(ref destination, ref source, byteCount); - } -#pragma warning restore IDE0060 // https://github.com/dotnet/roslyn-analyzers/issues/6228 + [LibraryImport(RuntimeHelpers.QCall, EntryPoint = "Buffer_MemMove")] + private static unsafe partial void __Memmove(byte* dest, byte* src, nuint len); [MethodImpl(MethodImplOptions.InternalCall)] private static extern void __BulkMoveWithWriteBarrier(ref byte destination, ref byte source, nuint byteCount); - [LibraryImport(RuntimeHelpers.QCall, EntryPoint = "Buffer_MemMove")] - private static unsafe partial void __Memmove(byte* dest, byte* src, nuint len); - // Used by ilmarshalers.cpp internal static unsafe void Memcpy(byte* dest, byte* src, int len) { @@ -96,26 +33,5 @@ internal static unsafe void Memcpy(byte* pDest, int destIndex, byte[] src, int s Memmove(ref *(pDest + (uint)destIndex), ref Unsafe.Add(ref MemoryMarshal.GetArrayDataReference(src), (nint)(uint)srcIndex /* force zero-extension */), (uint)len); } - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal static void Memmove(ref T destination, ref T source, nuint elementCount) - { - if (!RuntimeHelpers.IsReferenceOrContainsReferences()) - { - // Blittable memmove - Memmove( - ref Unsafe.As(ref destination), - ref Unsafe.As(ref source), - elementCount * (nuint)Unsafe.SizeOf()); - } - else - { - // Non-blittable memmove - BulkMoveWithWriteBarrier( - ref Unsafe.As(ref destination), - ref Unsafe.As(ref source), - elementCount * (nuint)Unsafe.SizeOf()); - } - } } } diff --git a/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/InternalCalls.cs b/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/InternalCalls.cs index 9cfde639cb60b7..6cb63b9d60d91a 100644 --- a/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/InternalCalls.cs +++ b/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/InternalCalls.cs @@ -144,10 +144,6 @@ internal static int RhEndNoGCRegion() internal static extern unsafe object RhpNewFastMisalign(MethodTable * pEEType); #endif // FEATURE_64BIT_ALIGNMENT - [RuntimeImport(Redhawk.BaseName, "RhpCopyObjectContents")] - [MethodImpl(MethodImplOptions.InternalCall)] - internal static extern unsafe void RhpCopyObjectContents(object objDest, object objSrc); - [RuntimeImport(Redhawk.BaseName, "RhpAssignRef")] [MethodImpl(MethodImplOptions.InternalCall)] internal static extern unsafe void RhpAssignRef(ref object address, object obj); diff --git a/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/RuntimeExports.cs b/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/RuntimeExports.cs index 4a4ac45f7d5f2c..c6ab819a841673 100644 --- a/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/RuntimeExports.cs +++ b/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/RuntimeExports.cs @@ -277,21 +277,6 @@ public static unsafe void RhUnbox(object? obj, ref byte data, MethodTable* pUnbo } } - [RuntimeExport("RhMemberwiseClone")] - public static unsafe object RhMemberwiseClone(object src) - { - object objClone; - - if (src.GetMethodTable()->IsArray) - objClone = RhNewArray(src.GetMethodTable(), Unsafe.As(src).Length); - else - objClone = RhNewObject(src.GetMethodTable()); - - InternalCalls.RhpCopyObjectContents(objClone, src); - - return objClone; - } - [RuntimeExport("RhGetCurrentThreadStackTrace")] [MethodImpl(MethodImplOptions.NoInlining)] // Ensures that the RhGetCurrentThreadStackTrace frame is always present public static unsafe int RhGetCurrentThreadStackTrace(IntPtr[] outputBuffer) diff --git a/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/RuntimeImports.cs b/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/RuntimeImports.cs index 2d7aac8213b14d..76835dd8b7ed95 100644 --- a/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/RuntimeImports.cs +++ b/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/RuntimeImports.cs @@ -43,11 +43,15 @@ internal static IntPtr RhGetModuleSection(TypeManagerHandle module, ReadyToRunSe [RuntimeImport(RuntimeLibrary, "RhNewObject")] internal static extern object RhNewObject(EETypePtr pEEType); + // Move memory which may be on the heap which may have object references in it. + // In general, a memcpy on the heap is unsafe, but this is able to perform the + // correct write barrier such that the GC is not incorrectly impacted. + // NOTE: it is only ok to use this directly when copying small chunks of memory (like boxing a struct) + // otherwise use helpers from System.Buffer to avoid running uninterruptible code for too long [MethodImplAttribute(MethodImplOptions.InternalCall)] [RuntimeImport(RuntimeLibrary, "RhBulkMoveWithWriteBarrier")] internal static extern unsafe void RhBulkMoveWithWriteBarrier(ref byte dmem, ref byte smem, uint size); - // Allocate handle. [MethodImpl(MethodImplOptions.InternalCall)] [RuntimeImport(RuntimeLibrary, "RhpHandleAlloc")] diff --git a/src/coreclr/nativeaot/Runtime/gcrhenv.cpp b/src/coreclr/nativeaot/Runtime/gcrhenv.cpp index 6d8b819b3a096d..56864387123e68 100644 --- a/src/coreclr/nativeaot/Runtime/gcrhenv.cpp +++ b/src/coreclr/nativeaot/Runtime/gcrhenv.cpp @@ -575,34 +575,18 @@ uint32_t RedhawkGCInterface::GetGCDescSize(void * pType) return (uint32_t)CGCDesc::GetCGCDescFromMT(pMT)->GetSize(); } -COOP_PINVOKE_HELPER(void, RhpCopyObjectContents, (Object* pobjDest, Object* pobjSrc)) -{ - size_t cbDest = pobjDest->GetSize() - sizeof(ObjHeader); - size_t cbSrc = pobjSrc->GetSize() - sizeof(ObjHeader); - if (cbSrc != cbDest) - return; - - ASSERT(pobjDest->get_EEType()->HasReferenceFields() == pobjSrc->get_EEType()->HasReferenceFields()); - - if (pobjDest->get_EEType()->HasReferenceFields()) - { - GCSafeCopyMemoryWithWriteBarrier(pobjDest, pobjSrc, cbDest); - } - else - { - memcpy(pobjDest, pobjSrc, cbDest); - } -} - COOP_PINVOKE_HELPER(FC_BOOL_RET, RhCompareObjectContentsAndPadding, (Object* pObj1, Object* pObj2)) { ASSERT(pObj1->get_EEType()->IsEquivalentTo(pObj2->get_EEType())); + ASSERT(pObj1->get_EEType()->IsValueType()); + MethodTable * pEEType = pObj1->get_EEType(); size_t cbFields = pEEType->get_BaseSize() - (sizeof(ObjHeader) + sizeof(MethodTable*)); uint8_t * pbFields1 = (uint8_t*)pObj1 + sizeof(MethodTable*); uint8_t * pbFields2 = (uint8_t*)pObj2 + sizeof(MethodTable*); + // memcmp is ok in a COOP method as we are comparing structs which are typically small. FC_RETURN_BOOL(memcmp(pbFields1, pbFields2, cbFields) == 0); } diff --git a/src/coreclr/nativeaot/System.Private.CoreLib/src/Internal/Runtime/Augments/RuntimeAugments.cs b/src/coreclr/nativeaot/System.Private.CoreLib/src/Internal/Runtime/Augments/RuntimeAugments.cs index 970a2b1e1b5bbe..8ee2c4781437ee 100644 --- a/src/coreclr/nativeaot/System.Private.CoreLib/src/Internal/Runtime/Augments/RuntimeAugments.cs +++ b/src/coreclr/nativeaot/System.Private.CoreLib/src/Internal/Runtime/Augments/RuntimeAugments.cs @@ -846,14 +846,6 @@ public static unsafe RuntimeTypeHandle GetRuntimeTypeHandleFromObjectReference(o return new RuntimeTypeHandle(obj.GetEETypePtr()); } - // Move memory which may be on the heap which may have object references in it. - // In general, a memcpy on the heap is unsafe, but this is able to perform the - // correct write barrier such that the GC is not incorrectly impacted. - public static unsafe void BulkMoveWithWriteBarrier(IntPtr dmem, IntPtr smem, int size) - { - RuntimeImports.RhBulkMoveWithWriteBarrier(ref *(byte*)dmem.ToPointer(), ref *(byte*)smem.ToPointer(), (uint)size); - } - public static IntPtr GetUniversalTransitionThunk() { return RuntimeImports.RhGetUniversalTransitionThunk(); diff --git a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Buffer.NativeAot.cs b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Buffer.NativeAot.cs index 1cd5d7c97c719e..4dd685c522136a 100644 --- a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Buffer.NativeAot.cs +++ b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Buffer.NativeAot.cs @@ -10,45 +10,16 @@ namespace System { public static partial class Buffer { - // Non-inlinable wrapper around the QCall that avoids polluting the fast path - // with P/Invoke prolog/epilog. - [MethodImpl(MethodImplOptions.NoInlining)] - internal static unsafe void _ZeroMemory(ref byte b, nuint byteLength) - { - fixed (byte* bytePointer = &b) - { - RuntimeImports.memset(bytePointer, 0, byteLength); - } - } - - internal static void BulkMoveWithWriteBarrier(ref byte dmem, ref byte smem, nuint size) - => RuntimeImports.RhBulkMoveWithWriteBarrier(ref dmem, ref smem, size); + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static unsafe void __ZeroMemory(void* b, nuint byteLength) => + RuntimeImports.memset((byte*)b, 0, byteLength); [MethodImpl(MethodImplOptions.AggressiveInlining)] private static unsafe void __Memmove(byte* dest, byte* src, nuint len) => RuntimeImports.memmove(dest, src, len); - // This method has different signature for x64 and other platforms and is done for performance reasons. [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal static unsafe void Memmove(ref T destination, ref T source, nuint elementCount) - { - if (!RuntimeHelpers.IsReferenceOrContainsReferences()) - { - // Blittable memmove - - Memmove( - ref Unsafe.As(ref destination), - ref Unsafe.As(ref source), - elementCount * (nuint)Unsafe.SizeOf()); - } - else - { - // Non-blittable memmove - RuntimeImports.RhBulkMoveWithWriteBarrier( - ref Unsafe.As(ref destination), - ref Unsafe.As(ref source), - elementCount * (nuint)Unsafe.SizeOf()); - } - } + private static void __BulkMoveWithWriteBarrier(ref byte destination, ref byte source, nuint byteCount) => + RuntimeImports.RhBulkMoveWithWriteBarrier(ref destination, ref source, byteCount); } } diff --git a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Object.NativeAot.cs b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Object.NativeAot.cs index d803542e6b7185..87ff2a7476033e 100644 --- a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Object.NativeAot.cs +++ b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Object.NativeAot.cs @@ -26,9 +26,24 @@ public Type GetType() } [Intrinsic] - protected object MemberwiseClone() + protected internal unsafe object MemberwiseClone() { - return RuntimeImports.RhMemberwiseClone(this); + object clone = this.GetMethodTable()->IsArray ? + RuntimeExports.RhNewArray(this.GetMethodTable(), Unsafe.As(this).Length) : + RuntimeExports.RhNewObject(this.GetMethodTable()); + + // copy contents of "this" to the clone + + nuint byteCount = RuntimeHelpers.GetRawObjectDataSize(this); + ref byte src = ref this.GetRawData(); + ref byte dst = ref clone.GetRawData(); + + if (this.GetMethodTable()->HasGCPointers) + Buffer.BulkMoveWithWriteBarrier(ref dst, ref src, byteCount); + else + Buffer.Memmove(ref dst, ref src, byteCount); + + return clone; } } } diff --git a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Runtime/CompilerServices/RuntimeHelpers.NativeAot.cs b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Runtime/CompilerServices/RuntimeHelpers.NativeAot.cs index 86a87d6a80a8d1..5c6bf96ff7049c 100644 --- a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Runtime/CompilerServices/RuntimeHelpers.NativeAot.cs +++ b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Runtime/CompilerServices/RuntimeHelpers.NativeAot.cs @@ -72,7 +72,7 @@ public static object GetObjectValue(object? obj) if ((!eeType.IsValueType) || eeType.IsPrimitive) return obj; - return RuntimeImports.RhMemberwiseClone(obj); + return obj.MemberwiseClone(); } public static new bool Equals(object? o1, object? o2) @@ -208,8 +208,16 @@ internal static ref byte GetRawData(this object obj) => internal static unsafe nuint GetRawObjectDataSize(this object obj) { - Debug.Assert(obj.GetEETypePtr().ComponentSize == 0); - return obj.GetEETypePtr().BaseSize - (uint)sizeof(ObjHeader) - (uint)sizeof(MethodTable*); + MethodTable* pMT = GetMethodTable(obj); + + // See comment on RawArrayData for details + nuint rawSize = pMT->BaseSize - (nuint)(2 * sizeof(IntPtr)); + if (pMT->HasComponentSize) + rawSize += (uint)Unsafe.As(obj).Length * (nuint)pMT->ComponentSize; + + GC.KeepAlive(obj); // Keep MethodTable alive + + return rawSize; } internal static unsafe ushort GetElementSize(this Array array) diff --git a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Runtime/RuntimeImports.cs b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Runtime/RuntimeImports.cs index 48b799e20fbfe9..b5f6a17156e55f 100644 --- a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Runtime/RuntimeImports.cs +++ b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Runtime/RuntimeImports.cs @@ -382,10 +382,6 @@ internal static unsafe object RhBox(EETypePtr pEEType, ref byte data) internal static unsafe void RhUnbox(object? obj, ref byte data, EETypePtr pUnboxToEEType) => RhUnbox(obj, ref data, pUnboxToEEType.ToPointer()); - [MethodImpl(MethodImplOptions.InternalCall)] - [RuntimeImport(RuntimeLibrary, "RhMemberwiseClone")] - internal static extern object RhMemberwiseClone(object obj); - // Busy spin for the given number of iterations. [LibraryImport(RuntimeLibrary, EntryPoint = "RhSpinWait")] [SuppressGCTransition] diff --git a/src/libraries/System.Private.CoreLib/src/System/Buffer.cs b/src/libraries/System.Private.CoreLib/src/System/Buffer.cs index c14bd684cb0170..7ea4502c95fff8 100644 --- a/src/libraries/System.Private.CoreLib/src/System/Buffer.cs +++ b/src/libraries/System.Private.CoreLib/src/System/Buffer.cs @@ -333,5 +333,93 @@ private struct Block16 { } [StructLayout(LayoutKind.Sequential, Size = 64)] private struct Block64 { } #endif // HAS_CUSTOM_BLOCKS + + // Non-inlinable wrapper around the QCall that avoids polluting the fast path + // with P/Invoke prolog/epilog. + [MethodImpl(MethodImplOptions.NoInlining)] + internal static unsafe void _ZeroMemory(ref byte b, nuint byteLength) + { + fixed (byte* bytePointer = &b) + { + __ZeroMemory(bytePointer, byteLength); + } + } + +#if !MONO // Mono BulkMoveWithWriteBarrier is in terms of elements (not bytes) and takes a type handle. + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal static void Memmove(ref T destination, ref T source, nuint elementCount) + { + if (!RuntimeHelpers.IsReferenceOrContainsReferences()) + { + // Blittable memmove + Memmove( + ref Unsafe.As(ref destination), + ref Unsafe.As(ref source), + elementCount * (nuint)Unsafe.SizeOf()); + } + else + { + // Non-blittable memmove + BulkMoveWithWriteBarrier( + ref Unsafe.As(ref destination), + ref Unsafe.As(ref source), + elementCount * (nuint)Unsafe.SizeOf()); + } + } + + // The maximum block size to for __BulkMoveWithWriteBarrier FCall. This is required to avoid GC starvation. +#if DEBUG // Stress the mechanism in debug builds + private const uint BulkMoveWithWriteBarrierChunk = 0x400; +#else + private const uint BulkMoveWithWriteBarrierChunk = 0x4000; +#endif + + internal static void BulkMoveWithWriteBarrier(ref byte destination, ref byte source, nuint byteCount) + { + if (byteCount <= BulkMoveWithWriteBarrierChunk) + __BulkMoveWithWriteBarrier(ref destination, ref source, byteCount); + else + _BulkMoveWithWriteBarrier(ref destination, ref source, byteCount); + } + +#pragma warning disable IDE0060 // https://github.com/dotnet/roslyn-analyzers/issues/6228 + // Non-inlinable wrapper around the loop for copying large blocks in chunks + [MethodImpl(MethodImplOptions.NoInlining)] + private static void _BulkMoveWithWriteBarrier(ref byte destination, ref byte source, nuint byteCount) + { + Debug.Assert(byteCount > BulkMoveWithWriteBarrierChunk); + + if (Unsafe.AreSame(ref source, ref destination)) + return; + + // This is equivalent to: (destination - source) >= byteCount || (destination - source) < 0 + if ((nuint)(nint)Unsafe.ByteOffset(ref source, ref destination) >= byteCount) + { + // Copy forwards + do + { + byteCount -= BulkMoveWithWriteBarrierChunk; + __BulkMoveWithWriteBarrier(ref destination, ref source, BulkMoveWithWriteBarrierChunk); + destination = ref Unsafe.AddByteOffset(ref destination, BulkMoveWithWriteBarrierChunk); + source = ref Unsafe.AddByteOffset(ref source, BulkMoveWithWriteBarrierChunk); + } + while (byteCount > BulkMoveWithWriteBarrierChunk); + } + else + { + // Copy backwards + do + { + byteCount -= BulkMoveWithWriteBarrierChunk; + __BulkMoveWithWriteBarrier(ref Unsafe.AddByteOffset(ref destination, byteCount), ref Unsafe.AddByteOffset(ref source, byteCount), BulkMoveWithWriteBarrierChunk); + } + while (byteCount > BulkMoveWithWriteBarrierChunk); + } + __BulkMoveWithWriteBarrier(ref destination, ref source, byteCount); + } +#pragma warning restore IDE0060 // https://github.com/dotnet/roslyn-analyzers/issues/6228 + +#endif // !MONO } } diff --git a/src/mono/System.Private.CoreLib/src/System/Buffer.Mono.cs b/src/mono/System.Private.CoreLib/src/System/Buffer.Mono.cs index 8ab237f1809342..ffd9f7d7165ac5 100644 --- a/src/mono/System.Private.CoreLib/src/System/Buffer.Mono.cs +++ b/src/mono/System.Private.CoreLib/src/System/Buffer.Mono.cs @@ -8,22 +8,13 @@ namespace System public partial class Buffer { [MethodImpl(MethodImplOptions.InternalCall)] - private static extern unsafe void __Memmove(byte* dest, byte* src, nuint len); + private static extern unsafe void __ZeroMemory(void* b, nuint byteLength); [MethodImpl(MethodImplOptions.InternalCall)] - private static extern void BulkMoveWithWriteBarrier(ref byte dmem, ref byte smem, nuint len, IntPtr type_handle); - - [MethodImpl(MethodImplOptions.NoInlining)] - internal static unsafe void _ZeroMemory(ref byte b, nuint byteLength) - { - fixed (byte* bytePointer = &b) - { - __ZeroMemory(bytePointer, byteLength); - } - } + private static extern unsafe void __Memmove(byte* dest, byte* src, nuint len); [MethodImpl(MethodImplOptions.InternalCall)] - private static extern unsafe void __ZeroMemory(void* p, nuint byteLength); + private static extern void BulkMoveWithWriteBarrier(ref byte dmem, ref byte smem, nuint len, IntPtr type_handle); [MethodImpl(MethodImplOptions.AggressiveInlining)] internal static void Memmove(ref T destination, ref T source, nuint elementCount)