repo_name
stringclasses 6
values | pr_number
int64 512
78.9k
| pr_title
stringlengths 3
144
| pr_description
stringlengths 0
30.3k
| author
stringlengths 2
21
| date_created
unknown | date_merged
unknown | previous_commit
stringlengths 40
40
| pr_commit
stringlengths 40
40
| query
stringlengths 17
30.4k
| filepath
stringlengths 9
210
| before_content
stringlengths 0
112M
| after_content
stringlengths 0
112M
| label
int64 -1
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dotnet/runtime | 65,932 | Update stale comments that reference GitHub issues | Fixes #65931
[Cleanup Issue-URLs in Code · Issue #63902 · dotnet/runtime](https://github.com/dotnet/runtime/issues/63902) identified stale comments that reference GitHub issues. This PR updates comments to reflect updated statuses.
_I'm marking this PR as a draft while working through that issue to see if other comments can also be quickly updated._
/cc @deeprobin | jeffhandley | "2022-02-27T20:14:23Z" | "2022-02-28T03:21:49Z" | 68fb7fc68cc1af800bee1d38af22b5027bf4ab4e | a58437f7e6794aba690e053827a5d436919c6bc3 | Update stale comments that reference GitHub issues. Fixes #65931
[Cleanup Issue-URLs in Code · Issue #63902 · dotnet/runtime](https://github.com/dotnet/runtime/issues/63902) identified stale comments that reference GitHub issues. This PR updates comments to reflect updated statuses.
_I'm marking this PR as a draft while working through that issue to see if other comments can also be quickly updated._
/cc @deeprobin | ./src/tests/JIT/HardwareIntrinsics/Arm/AdvSimd/BitwiseSelect.Vector128.Int32.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
using System.Runtime.Intrinsics.Arm;
namespace JIT.HardwareIntrinsics.Arm
{
public static partial class Program
{
private static void BitwiseSelect_Vector128_Int32()
{
var test = new SimpleTernaryOpTest__BitwiseSelect_Vector128_Int32();
if (test.IsSupported)
{
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates basic functionality works, using Load
test.RunBasicScenario_Load();
}
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates calling via reflection works, using Load
test.RunReflectionScenario_Load();
}
// Validates passing a static member works
test.RunClsVarScenario();
if (AdvSimd.IsSupported)
{
// Validates passing a static member works, using pinning and Load
test.RunClsVarScenario_Load();
}
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates passing a local works, using Load
test.RunLclVarScenario_Load();
}
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local class works, using pinning and Load
test.RunClassLclFldScenario_Load();
}
// Validates passing an instance member of a class works
test.RunClassFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a class works, using pinning and Load
test.RunClassFldScenario_Load();
}
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local struct works, using pinning and Load
test.RunStructLclFldScenario_Load();
}
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a struct works, using pinning and Load
test.RunStructFldScenario_Load();
}
}
else
{
// Validates we throw on unsupported hardware
test.RunUnsupportedScenario();
}
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class SimpleTernaryOpTest__BitwiseSelect_Vector128_Int32
{
private struct DataTable
{
private byte[] inArray1;
private byte[] inArray2;
private byte[] inArray3;
private byte[] outArray;
private GCHandle inHandle1;
private GCHandle inHandle2;
private GCHandle inHandle3;
private GCHandle outHandle;
private ulong alignment;
public DataTable(Int32[] inArray1, Int32[] inArray2, Int32[] inArray3, Int32[] outArray, int alignment)
{
int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int32>();
int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int32>();
int sizeOfinArray3 = inArray3.Length * Unsafe.SizeOf<Int32>();
int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int32>();
if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfinArray3 || (alignment * 2) < sizeOfoutArray)
{
throw new ArgumentException("Invalid value of alignment");
}
this.inArray1 = new byte[alignment * 2];
this.inArray2 = new byte[alignment * 2];
this.inArray3 = new byte[alignment * 2];
this.outArray = new byte[alignment * 2];
this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned);
this.inHandle3 = GCHandle.Alloc(this.inArray3, GCHandleType.Pinned);
this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int32, byte>(ref inArray1[0]), (uint)sizeOfinArray1);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int32, byte>(ref inArray2[0]), (uint)sizeOfinArray2);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray3Ptr), ref Unsafe.As<Int32, byte>(ref inArray3[0]), (uint)sizeOfinArray3);
}
public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray3Ptr => Align((byte*)(inHandle3.AddrOfPinnedObject().ToPointer()), alignment);
public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle1.Free();
inHandle2.Free();
inHandle3.Free();
outHandle.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
{
return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
}
}
private struct TestStruct
{
public Vector128<Int32> _fld1;
public Vector128<Int32> _fld2;
public Vector128<Int32> _fld3;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref testStruct._fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref testStruct._fld2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref testStruct._fld3), ref Unsafe.As<Int32, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
return testStruct;
}
public void RunStructFldScenario(SimpleTernaryOpTest__BitwiseSelect_Vector128_Int32 testClass)
{
var result = AdvSimd.BitwiseSelect(_fld1, _fld2, _fld3);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, _fld3, testClass._dataTable.outArrayPtr);
}
public void RunStructFldScenario_Load(SimpleTernaryOpTest__BitwiseSelect_Vector128_Int32 testClass)
{
fixed (Vector128<Int32>* pFld1 = &_fld1)
fixed (Vector128<Int32>* pFld2 = &_fld2)
fixed (Vector128<Int32>* pFld3 = &_fld3)
{
var result = AdvSimd.BitwiseSelect(
AdvSimd.LoadVector128((Int32*)(pFld1)),
AdvSimd.LoadVector128((Int32*)(pFld2)),
AdvSimd.LoadVector128((Int32*)(pFld3))
);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, _fld3, testClass._dataTable.outArrayPtr);
}
}
}
private static readonly int LargestVectorSize = 16;
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32);
private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32);
private static readonly int Op3ElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32);
private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32);
private static Int32[] _data1 = new Int32[Op1ElementCount];
private static Int32[] _data2 = new Int32[Op2ElementCount];
private static Int32[] _data3 = new Int32[Op3ElementCount];
private static Vector128<Int32> _clsVar1;
private static Vector128<Int32> _clsVar2;
private static Vector128<Int32> _clsVar3;
private Vector128<Int32> _fld1;
private Vector128<Int32> _fld2;
private Vector128<Int32> _fld3;
private DataTable _dataTable;
static SimpleTernaryOpTest__BitwiseSelect_Vector128_Int32()
{
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _clsVar1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _clsVar2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _clsVar3), ref Unsafe.As<Int32, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
}
public SimpleTernaryOpTest__BitwiseSelect_Vector128_Int32()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _fld2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _fld3), ref Unsafe.As<Int32, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); }
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); }
for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetInt32(); }
_dataTable = new DataTable(_data1, _data2, _data3, new Int32[RetElementCount], LargestVectorSize);
}
public bool IsSupported => AdvSimd.IsSupported;
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = AdvSimd.BitwiseSelect(
Unsafe.Read<Vector128<Int32>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector128<Int32>>(_dataTable.inArray2Ptr),
Unsafe.Read<Vector128<Int32>>(_dataTable.inArray3Ptr)
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr);
}
public void RunBasicScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load));
var result = AdvSimd.BitwiseSelect(
AdvSimd.LoadVector128((Int32*)(_dataTable.inArray1Ptr)),
AdvSimd.LoadVector128((Int32*)(_dataTable.inArray2Ptr)),
AdvSimd.LoadVector128((Int32*)(_dataTable.inArray3Ptr))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.BitwiseSelect), new Type[] { typeof(Vector128<Int32>), typeof(Vector128<Int32>), typeof(Vector128<Int32>) })
.Invoke(null, new object[] {
Unsafe.Read<Vector128<Int32>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector128<Int32>>(_dataTable.inArray2Ptr),
Unsafe.Read<Vector128<Int32>>(_dataTable.inArray3Ptr)
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int32>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load));
var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.BitwiseSelect), new Type[] { typeof(Vector128<Int32>), typeof(Vector128<Int32>), typeof(Vector128<Int32>) })
.Invoke(null, new object[] {
AdvSimd.LoadVector128((Int32*)(_dataTable.inArray1Ptr)),
AdvSimd.LoadVector128((Int32*)(_dataTable.inArray2Ptr)),
AdvSimd.LoadVector128((Int32*)(_dataTable.inArray3Ptr))
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int32>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr);
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = AdvSimd.BitwiseSelect(
_clsVar1,
_clsVar2,
_clsVar3
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _clsVar3, _dataTable.outArrayPtr);
}
public void RunClsVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load));
fixed (Vector128<Int32>* pClsVar1 = &_clsVar1)
fixed (Vector128<Int32>* pClsVar2 = &_clsVar2)
fixed (Vector128<Int32>* pClsVar3 = &_clsVar3)
{
var result = AdvSimd.BitwiseSelect(
AdvSimd.LoadVector128((Int32*)(pClsVar1)),
AdvSimd.LoadVector128((Int32*)(pClsVar2)),
AdvSimd.LoadVector128((Int32*)(pClsVar3))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _clsVar3, _dataTable.outArrayPtr);
}
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var op1 = Unsafe.Read<Vector128<Int32>>(_dataTable.inArray1Ptr);
var op2 = Unsafe.Read<Vector128<Int32>>(_dataTable.inArray2Ptr);
var op3 = Unsafe.Read<Vector128<Int32>>(_dataTable.inArray3Ptr);
var result = AdvSimd.BitwiseSelect(op1, op2, op3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, op3, _dataTable.outArrayPtr);
}
public void RunLclVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load));
var op1 = AdvSimd.LoadVector128((Int32*)(_dataTable.inArray1Ptr));
var op2 = AdvSimd.LoadVector128((Int32*)(_dataTable.inArray2Ptr));
var op3 = AdvSimd.LoadVector128((Int32*)(_dataTable.inArray3Ptr));
var result = AdvSimd.BitwiseSelect(op1, op2, op3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, op3, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new SimpleTernaryOpTest__BitwiseSelect_Vector128_Int32();
var result = AdvSimd.BitwiseSelect(test._fld1, test._fld2, test._fld3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load));
var test = new SimpleTernaryOpTest__BitwiseSelect_Vector128_Int32();
fixed (Vector128<Int32>* pFld1 = &test._fld1)
fixed (Vector128<Int32>* pFld2 = &test._fld2)
fixed (Vector128<Int32>* pFld3 = &test._fld3)
{
var result = AdvSimd.BitwiseSelect(
AdvSimd.LoadVector128((Int32*)(pFld1)),
AdvSimd.LoadVector128((Int32*)(pFld2)),
AdvSimd.LoadVector128((Int32*)(pFld3))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr);
}
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = AdvSimd.BitwiseSelect(_fld1, _fld2, _fld3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _fld3, _dataTable.outArrayPtr);
}
public void RunClassFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load));
fixed (Vector128<Int32>* pFld1 = &_fld1)
fixed (Vector128<Int32>* pFld2 = &_fld2)
fixed (Vector128<Int32>* pFld3 = &_fld3)
{
var result = AdvSimd.BitwiseSelect(
AdvSimd.LoadVector128((Int32*)(pFld1)),
AdvSimd.LoadVector128((Int32*)(pFld2)),
AdvSimd.LoadVector128((Int32*)(pFld3))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _fld3, _dataTable.outArrayPtr);
}
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = AdvSimd.BitwiseSelect(test._fld1, test._fld2, test._fld3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr);
}
public void RunStructLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load));
var test = TestStruct.Create();
var result = AdvSimd.BitwiseSelect(
AdvSimd.LoadVector128((Int32*)(&test._fld1)),
AdvSimd.LoadVector128((Int32*)(&test._fld2)),
AdvSimd.LoadVector128((Int32*)(&test._fld3))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
public void RunStructFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load));
var test = TestStruct.Create();
test.RunStructFldScenario_Load(this);
}
public void RunUnsupportedScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario));
bool succeeded = false;
try
{
RunBasicScenario_UnsafeRead();
}
catch (PlatformNotSupportedException)
{
succeeded = true;
}
if (!succeeded)
{
Succeeded = false;
}
}
private void ValidateResult(Vector128<Int32> op1, Vector128<Int32> op2, Vector128<Int32> op3, void* result, [CallerMemberName] string method = "")
{
Int32[] inArray1 = new Int32[Op1ElementCount];
Int32[] inArray2 = new Int32[Op2ElementCount];
Int32[] inArray3 = new Int32[Op3ElementCount];
Int32[] outArray = new Int32[RetElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), op1);
Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray2[0]), op2);
Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray3[0]), op3);
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int32>>());
ValidateResult(inArray1, inArray2, inArray3, outArray, method);
}
private void ValidateResult(void* op1, void* op2, void* op3, void* result, [CallerMemberName] string method = "")
{
Int32[] inArray1 = new Int32[Op1ElementCount];
Int32[] inArray2 = new Int32[Op2ElementCount];
Int32[] inArray3 = new Int32[Op3ElementCount];
Int32[] outArray = new Int32[RetElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<Int32>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector128<Int32>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray3[0]), ref Unsafe.AsRef<byte>(op3), (uint)Unsafe.SizeOf<Vector128<Int32>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int32>>());
ValidateResult(inArray1, inArray2, inArray3, outArray, method);
}
private void ValidateResult(Int32[] firstOp, Int32[] secondOp, Int32[] thirdOp, Int32[] result, [CallerMemberName] string method = "")
{
bool succeeded = true;
for (var i = 0; i < RetElementCount; i++)
{
if (Helpers.BitwiseSelect(firstOp[i], secondOp[i], thirdOp[i]) != result[i])
{
succeeded = false;
break;
}
}
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.BitwiseSelect)}<Int32>(Vector128<Int32>, Vector128<Int32>, Vector128<Int32>): {method} failed:");
TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})");
TestLibrary.TestFramework.LogInformation($"secondOp: ({string.Join(", ", secondOp)})");
TestLibrary.TestFramework.LogInformation($" thirdOp: ({string.Join(", ", thirdOp)})");
TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
using System.Runtime.Intrinsics.Arm;
namespace JIT.HardwareIntrinsics.Arm
{
public static partial class Program
{
private static void BitwiseSelect_Vector128_Int32()
{
var test = new SimpleTernaryOpTest__BitwiseSelect_Vector128_Int32();
if (test.IsSupported)
{
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates basic functionality works, using Load
test.RunBasicScenario_Load();
}
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates calling via reflection works, using Load
test.RunReflectionScenario_Load();
}
// Validates passing a static member works
test.RunClsVarScenario();
if (AdvSimd.IsSupported)
{
// Validates passing a static member works, using pinning and Load
test.RunClsVarScenario_Load();
}
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates passing a local works, using Load
test.RunLclVarScenario_Load();
}
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local class works, using pinning and Load
test.RunClassLclFldScenario_Load();
}
// Validates passing an instance member of a class works
test.RunClassFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a class works, using pinning and Load
test.RunClassFldScenario_Load();
}
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local struct works, using pinning and Load
test.RunStructLclFldScenario_Load();
}
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a struct works, using pinning and Load
test.RunStructFldScenario_Load();
}
}
else
{
// Validates we throw on unsupported hardware
test.RunUnsupportedScenario();
}
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class SimpleTernaryOpTest__BitwiseSelect_Vector128_Int32
{
private struct DataTable
{
private byte[] inArray1;
private byte[] inArray2;
private byte[] inArray3;
private byte[] outArray;
private GCHandle inHandle1;
private GCHandle inHandle2;
private GCHandle inHandle3;
private GCHandle outHandle;
private ulong alignment;
public DataTable(Int32[] inArray1, Int32[] inArray2, Int32[] inArray3, Int32[] outArray, int alignment)
{
int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int32>();
int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int32>();
int sizeOfinArray3 = inArray3.Length * Unsafe.SizeOf<Int32>();
int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int32>();
if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfinArray3 || (alignment * 2) < sizeOfoutArray)
{
throw new ArgumentException("Invalid value of alignment");
}
this.inArray1 = new byte[alignment * 2];
this.inArray2 = new byte[alignment * 2];
this.inArray3 = new byte[alignment * 2];
this.outArray = new byte[alignment * 2];
this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned);
this.inHandle3 = GCHandle.Alloc(this.inArray3, GCHandleType.Pinned);
this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int32, byte>(ref inArray1[0]), (uint)sizeOfinArray1);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int32, byte>(ref inArray2[0]), (uint)sizeOfinArray2);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray3Ptr), ref Unsafe.As<Int32, byte>(ref inArray3[0]), (uint)sizeOfinArray3);
}
public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray3Ptr => Align((byte*)(inHandle3.AddrOfPinnedObject().ToPointer()), alignment);
public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle1.Free();
inHandle2.Free();
inHandle3.Free();
outHandle.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
{
return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
}
}
private struct TestStruct
{
public Vector128<Int32> _fld1;
public Vector128<Int32> _fld2;
public Vector128<Int32> _fld3;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref testStruct._fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref testStruct._fld2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref testStruct._fld3), ref Unsafe.As<Int32, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
return testStruct;
}
public void RunStructFldScenario(SimpleTernaryOpTest__BitwiseSelect_Vector128_Int32 testClass)
{
var result = AdvSimd.BitwiseSelect(_fld1, _fld2, _fld3);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, _fld3, testClass._dataTable.outArrayPtr);
}
public void RunStructFldScenario_Load(SimpleTernaryOpTest__BitwiseSelect_Vector128_Int32 testClass)
{
fixed (Vector128<Int32>* pFld1 = &_fld1)
fixed (Vector128<Int32>* pFld2 = &_fld2)
fixed (Vector128<Int32>* pFld3 = &_fld3)
{
var result = AdvSimd.BitwiseSelect(
AdvSimd.LoadVector128((Int32*)(pFld1)),
AdvSimd.LoadVector128((Int32*)(pFld2)),
AdvSimd.LoadVector128((Int32*)(pFld3))
);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, _fld3, testClass._dataTable.outArrayPtr);
}
}
}
private static readonly int LargestVectorSize = 16;
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32);
private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32);
private static readonly int Op3ElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32);
private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32);
private static Int32[] _data1 = new Int32[Op1ElementCount];
private static Int32[] _data2 = new Int32[Op2ElementCount];
private static Int32[] _data3 = new Int32[Op3ElementCount];
private static Vector128<Int32> _clsVar1;
private static Vector128<Int32> _clsVar2;
private static Vector128<Int32> _clsVar3;
private Vector128<Int32> _fld1;
private Vector128<Int32> _fld2;
private Vector128<Int32> _fld3;
private DataTable _dataTable;
static SimpleTernaryOpTest__BitwiseSelect_Vector128_Int32()
{
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _clsVar1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _clsVar2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _clsVar3), ref Unsafe.As<Int32, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
}
public SimpleTernaryOpTest__BitwiseSelect_Vector128_Int32()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _fld2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _fld3), ref Unsafe.As<Int32, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); }
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); }
for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetInt32(); }
_dataTable = new DataTable(_data1, _data2, _data3, new Int32[RetElementCount], LargestVectorSize);
}
public bool IsSupported => AdvSimd.IsSupported;
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = AdvSimd.BitwiseSelect(
Unsafe.Read<Vector128<Int32>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector128<Int32>>(_dataTable.inArray2Ptr),
Unsafe.Read<Vector128<Int32>>(_dataTable.inArray3Ptr)
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr);
}
public void RunBasicScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load));
var result = AdvSimd.BitwiseSelect(
AdvSimd.LoadVector128((Int32*)(_dataTable.inArray1Ptr)),
AdvSimd.LoadVector128((Int32*)(_dataTable.inArray2Ptr)),
AdvSimd.LoadVector128((Int32*)(_dataTable.inArray3Ptr))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.BitwiseSelect), new Type[] { typeof(Vector128<Int32>), typeof(Vector128<Int32>), typeof(Vector128<Int32>) })
.Invoke(null, new object[] {
Unsafe.Read<Vector128<Int32>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector128<Int32>>(_dataTable.inArray2Ptr),
Unsafe.Read<Vector128<Int32>>(_dataTable.inArray3Ptr)
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int32>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load));
var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.BitwiseSelect), new Type[] { typeof(Vector128<Int32>), typeof(Vector128<Int32>), typeof(Vector128<Int32>) })
.Invoke(null, new object[] {
AdvSimd.LoadVector128((Int32*)(_dataTable.inArray1Ptr)),
AdvSimd.LoadVector128((Int32*)(_dataTable.inArray2Ptr)),
AdvSimd.LoadVector128((Int32*)(_dataTable.inArray3Ptr))
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int32>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr);
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = AdvSimd.BitwiseSelect(
_clsVar1,
_clsVar2,
_clsVar3
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _clsVar3, _dataTable.outArrayPtr);
}
public void RunClsVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load));
fixed (Vector128<Int32>* pClsVar1 = &_clsVar1)
fixed (Vector128<Int32>* pClsVar2 = &_clsVar2)
fixed (Vector128<Int32>* pClsVar3 = &_clsVar3)
{
var result = AdvSimd.BitwiseSelect(
AdvSimd.LoadVector128((Int32*)(pClsVar1)),
AdvSimd.LoadVector128((Int32*)(pClsVar2)),
AdvSimd.LoadVector128((Int32*)(pClsVar3))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _clsVar3, _dataTable.outArrayPtr);
}
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var op1 = Unsafe.Read<Vector128<Int32>>(_dataTable.inArray1Ptr);
var op2 = Unsafe.Read<Vector128<Int32>>(_dataTable.inArray2Ptr);
var op3 = Unsafe.Read<Vector128<Int32>>(_dataTable.inArray3Ptr);
var result = AdvSimd.BitwiseSelect(op1, op2, op3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, op3, _dataTable.outArrayPtr);
}
public void RunLclVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load));
var op1 = AdvSimd.LoadVector128((Int32*)(_dataTable.inArray1Ptr));
var op2 = AdvSimd.LoadVector128((Int32*)(_dataTable.inArray2Ptr));
var op3 = AdvSimd.LoadVector128((Int32*)(_dataTable.inArray3Ptr));
var result = AdvSimd.BitwiseSelect(op1, op2, op3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, op3, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new SimpleTernaryOpTest__BitwiseSelect_Vector128_Int32();
var result = AdvSimd.BitwiseSelect(test._fld1, test._fld2, test._fld3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load));
var test = new SimpleTernaryOpTest__BitwiseSelect_Vector128_Int32();
fixed (Vector128<Int32>* pFld1 = &test._fld1)
fixed (Vector128<Int32>* pFld2 = &test._fld2)
fixed (Vector128<Int32>* pFld3 = &test._fld3)
{
var result = AdvSimd.BitwiseSelect(
AdvSimd.LoadVector128((Int32*)(pFld1)),
AdvSimd.LoadVector128((Int32*)(pFld2)),
AdvSimd.LoadVector128((Int32*)(pFld3))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr);
}
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = AdvSimd.BitwiseSelect(_fld1, _fld2, _fld3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _fld3, _dataTable.outArrayPtr);
}
public void RunClassFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load));
fixed (Vector128<Int32>* pFld1 = &_fld1)
fixed (Vector128<Int32>* pFld2 = &_fld2)
fixed (Vector128<Int32>* pFld3 = &_fld3)
{
var result = AdvSimd.BitwiseSelect(
AdvSimd.LoadVector128((Int32*)(pFld1)),
AdvSimd.LoadVector128((Int32*)(pFld2)),
AdvSimd.LoadVector128((Int32*)(pFld3))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _fld3, _dataTable.outArrayPtr);
}
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = AdvSimd.BitwiseSelect(test._fld1, test._fld2, test._fld3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr);
}
public void RunStructLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load));
var test = TestStruct.Create();
var result = AdvSimd.BitwiseSelect(
AdvSimd.LoadVector128((Int32*)(&test._fld1)),
AdvSimd.LoadVector128((Int32*)(&test._fld2)),
AdvSimd.LoadVector128((Int32*)(&test._fld3))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
public void RunStructFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load));
var test = TestStruct.Create();
test.RunStructFldScenario_Load(this);
}
public void RunUnsupportedScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario));
bool succeeded = false;
try
{
RunBasicScenario_UnsafeRead();
}
catch (PlatformNotSupportedException)
{
succeeded = true;
}
if (!succeeded)
{
Succeeded = false;
}
}
private void ValidateResult(Vector128<Int32> op1, Vector128<Int32> op2, Vector128<Int32> op3, void* result, [CallerMemberName] string method = "")
{
Int32[] inArray1 = new Int32[Op1ElementCount];
Int32[] inArray2 = new Int32[Op2ElementCount];
Int32[] inArray3 = new Int32[Op3ElementCount];
Int32[] outArray = new Int32[RetElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), op1);
Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray2[0]), op2);
Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray3[0]), op3);
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int32>>());
ValidateResult(inArray1, inArray2, inArray3, outArray, method);
}
private void ValidateResult(void* op1, void* op2, void* op3, void* result, [CallerMemberName] string method = "")
{
Int32[] inArray1 = new Int32[Op1ElementCount];
Int32[] inArray2 = new Int32[Op2ElementCount];
Int32[] inArray3 = new Int32[Op3ElementCount];
Int32[] outArray = new Int32[RetElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<Int32>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector128<Int32>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray3[0]), ref Unsafe.AsRef<byte>(op3), (uint)Unsafe.SizeOf<Vector128<Int32>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int32>>());
ValidateResult(inArray1, inArray2, inArray3, outArray, method);
}
private void ValidateResult(Int32[] firstOp, Int32[] secondOp, Int32[] thirdOp, Int32[] result, [CallerMemberName] string method = "")
{
bool succeeded = true;
for (var i = 0; i < RetElementCount; i++)
{
if (Helpers.BitwiseSelect(firstOp[i], secondOp[i], thirdOp[i]) != result[i])
{
succeeded = false;
break;
}
}
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.BitwiseSelect)}<Int32>(Vector128<Int32>, Vector128<Int32>, Vector128<Int32>): {method} failed:");
TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})");
TestLibrary.TestFramework.LogInformation($"secondOp: ({string.Join(", ", secondOp)})");
TestLibrary.TestFramework.LogInformation($" thirdOp: ({string.Join(", ", thirdOp)})");
TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
| -1 |
dotnet/runtime | 65,932 | Update stale comments that reference GitHub issues | Fixes #65931
[Cleanup Issue-URLs in Code · Issue #63902 · dotnet/runtime](https://github.com/dotnet/runtime/issues/63902) identified stale comments that reference GitHub issues. This PR updates comments to reflect updated statuses.
_I'm marking this PR as a draft while working through that issue to see if other comments can also be quickly updated._
/cc @deeprobin | jeffhandley | "2022-02-27T20:14:23Z" | "2022-02-28T03:21:49Z" | 68fb7fc68cc1af800bee1d38af22b5027bf4ab4e | a58437f7e6794aba690e053827a5d436919c6bc3 | Update stale comments that reference GitHub issues. Fixes #65931
[Cleanup Issue-URLs in Code · Issue #63902 · dotnet/runtime](https://github.com/dotnet/runtime/issues/63902) identified stale comments that reference GitHub issues. This PR updates comments to reflect updated statuses.
_I'm marking this PR as a draft while working through that issue to see if other comments can also be quickly updated._
/cc @deeprobin | ./src/tests/JIT/Regression/VS-ia64-JIT/M00/b141358/test.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
using System;
namespace test
{
class Class1
{
static int Main()
{
try
{
Console.WriteLine(" try 1");
try
{
Console.WriteLine("\t try 1.1");
Console.WriteLine("\t throwing an exception here!");
throw new System.ArithmeticException("My ArithmeticException");
}
catch (Exception)
{
Console.WriteLine("\t catch 1.1");
goto inner_try;
throw_exception:
Console.WriteLine("\t throwing another exception here!");
throw new System.ArithmeticException("My ArithmeticException");
inner_try:
try
{
Console.WriteLine("\t\t try 1.1.1");
}
finally
{
Console.WriteLine("\t\t finally 1.1.1");
}
goto throw_exception;
}
}
catch (Exception)
{
Console.WriteLine(" catch 1");
}
finally
{
Console.WriteLine(" finally 1");
}
return 100;
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
using System;
namespace test
{
class Class1
{
static int Main()
{
try
{
Console.WriteLine(" try 1");
try
{
Console.WriteLine("\t try 1.1");
Console.WriteLine("\t throwing an exception here!");
throw new System.ArithmeticException("My ArithmeticException");
}
catch (Exception)
{
Console.WriteLine("\t catch 1.1");
goto inner_try;
throw_exception:
Console.WriteLine("\t throwing another exception here!");
throw new System.ArithmeticException("My ArithmeticException");
inner_try:
try
{
Console.WriteLine("\t\t try 1.1.1");
}
finally
{
Console.WriteLine("\t\t finally 1.1.1");
}
goto throw_exception;
}
}
catch (Exception)
{
Console.WriteLine(" catch 1");
}
finally
{
Console.WriteLine(" finally 1");
}
return 100;
}
}
}
| -1 |
dotnet/runtime | 65,932 | Update stale comments that reference GitHub issues | Fixes #65931
[Cleanup Issue-URLs in Code · Issue #63902 · dotnet/runtime](https://github.com/dotnet/runtime/issues/63902) identified stale comments that reference GitHub issues. This PR updates comments to reflect updated statuses.
_I'm marking this PR as a draft while working through that issue to see if other comments can also be quickly updated._
/cc @deeprobin | jeffhandley | "2022-02-27T20:14:23Z" | "2022-02-28T03:21:49Z" | 68fb7fc68cc1af800bee1d38af22b5027bf4ab4e | a58437f7e6794aba690e053827a5d436919c6bc3 | Update stale comments that reference GitHub issues. Fixes #65931
[Cleanup Issue-URLs in Code · Issue #63902 · dotnet/runtime](https://github.com/dotnet/runtime/issues/63902) identified stale comments that reference GitHub issues. This PR updates comments to reflect updated statuses.
_I'm marking this PR as a draft while working through that issue to see if other comments can also be quickly updated._
/cc @deeprobin | ./src/libraries/Microsoft.Extensions.DependencyInjection/tests/DI.Tests/Fakes/ClassWithOptionalArgsCtorWithStructs.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using Microsoft.Extensions.DependencyInjection.Specification.Fakes;
namespace Microsoft.Extensions.DependencyInjection.Tests.Fakes
{
public class ClassWithServiceAndOptionalArgsCtorWithStructs
{
public DateTime DateTime { get; }
public DateTime DateTimeDefault { get; }
public TimeSpan TimeSpan { get; }
public TimeSpan TimeSpanDefault { get; }
public DateTimeOffset DateTimeOffset { get; }
public DateTimeOffset DateTimeOffsetDefault { get; }
public Guid Guid { get; }
public Guid GuidDefault { get; }
public CustomStruct CustomStructValue { get; }
public CustomStruct CustomStructDefault { get; }
public ClassWithServiceAndOptionalArgsCtorWithStructs(IFakeService fake,
DateTime dateTime = new DateTime(),
DateTime dateTimeDefault = default(DateTime),
TimeSpan timeSpan = new TimeSpan(),
TimeSpan timeSpanDefault = default(TimeSpan),
DateTimeOffset dateTimeOffset = new DateTimeOffset(),
DateTimeOffset dateTimeOffsetDefault = default(DateTimeOffset),
Guid guid = new Guid(),
Guid guidDefault = default(Guid),
CustomStruct customStruct = new CustomStruct(),
CustomStruct customStructDefault = default(CustomStruct)
)
{
DateTime = dateTime;
DateTimeDefault = dateTimeDefault;
TimeSpan = timeSpan;
TimeSpanDefault = timeSpanDefault;
DateTimeOffset = dateTimeOffset;
DateTimeOffsetDefault = dateTimeOffsetDefault;
Guid = guid;
GuidDefault = guidDefault;
CustomStructValue = customStruct;
CustomStructDefault = customStructDefault;
}
public struct CustomStruct { }
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using Microsoft.Extensions.DependencyInjection.Specification.Fakes;
namespace Microsoft.Extensions.DependencyInjection.Tests.Fakes
{
public class ClassWithServiceAndOptionalArgsCtorWithStructs
{
public DateTime DateTime { get; }
public DateTime DateTimeDefault { get; }
public TimeSpan TimeSpan { get; }
public TimeSpan TimeSpanDefault { get; }
public DateTimeOffset DateTimeOffset { get; }
public DateTimeOffset DateTimeOffsetDefault { get; }
public Guid Guid { get; }
public Guid GuidDefault { get; }
public CustomStruct CustomStructValue { get; }
public CustomStruct CustomStructDefault { get; }
public ClassWithServiceAndOptionalArgsCtorWithStructs(IFakeService fake,
DateTime dateTime = new DateTime(),
DateTime dateTimeDefault = default(DateTime),
TimeSpan timeSpan = new TimeSpan(),
TimeSpan timeSpanDefault = default(TimeSpan),
DateTimeOffset dateTimeOffset = new DateTimeOffset(),
DateTimeOffset dateTimeOffsetDefault = default(DateTimeOffset),
Guid guid = new Guid(),
Guid guidDefault = default(Guid),
CustomStruct customStruct = new CustomStruct(),
CustomStruct customStructDefault = default(CustomStruct)
)
{
DateTime = dateTime;
DateTimeDefault = dateTimeDefault;
TimeSpan = timeSpan;
TimeSpanDefault = timeSpanDefault;
DateTimeOffset = dateTimeOffset;
DateTimeOffsetDefault = dateTimeOffsetDefault;
Guid = guid;
GuidDefault = guidDefault;
CustomStructValue = customStruct;
CustomStructDefault = customStructDefault;
}
public struct CustomStruct { }
}
}
| -1 |
dotnet/runtime | 65,932 | Update stale comments that reference GitHub issues | Fixes #65931
[Cleanup Issue-URLs in Code · Issue #63902 · dotnet/runtime](https://github.com/dotnet/runtime/issues/63902) identified stale comments that reference GitHub issues. This PR updates comments to reflect updated statuses.
_I'm marking this PR as a draft while working through that issue to see if other comments can also be quickly updated._
/cc @deeprobin | jeffhandley | "2022-02-27T20:14:23Z" | "2022-02-28T03:21:49Z" | 68fb7fc68cc1af800bee1d38af22b5027bf4ab4e | a58437f7e6794aba690e053827a5d436919c6bc3 | Update stale comments that reference GitHub issues. Fixes #65931
[Cleanup Issue-URLs in Code · Issue #63902 · dotnet/runtime](https://github.com/dotnet/runtime/issues/63902) identified stale comments that reference GitHub issues. This PR updates comments to reflect updated statuses.
_I'm marking this PR as a draft while working through that issue to see if other comments can also be quickly updated._
/cc @deeprobin | ./src/coreclr/pal/tests/palsuite/composite/threading/threadsuspension/readme.txt | To compile:
1) create a dat file (say threadsuspension.dat) with contents:
PAL,Composite,palsuite\composite\threading\threadsuspension,wfmo=mainWrapper.c threadsuspension.c,<SUPPORTEXE>,<TESTLANGCPP>,<COMPILEONLY>
2) perl rrunmod.pl -r threadsuspension.dat
To execute:
mainWrapper [PROCESS_COUNT] [THREAD_COUNT] [REPEAT_COUNT]
| To compile:
1) create a dat file (say threadsuspension.dat) with contents:
PAL,Composite,palsuite\composite\threading\threadsuspension,wfmo=mainWrapper.c threadsuspension.c,<SUPPORTEXE>,<TESTLANGCPP>,<COMPILEONLY>
2) perl rrunmod.pl -r threadsuspension.dat
To execute:
mainWrapper [PROCESS_COUNT] [THREAD_COUNT] [REPEAT_COUNT]
| -1 |
dotnet/runtime | 65,932 | Update stale comments that reference GitHub issues | Fixes #65931
[Cleanup Issue-URLs in Code · Issue #63902 · dotnet/runtime](https://github.com/dotnet/runtime/issues/63902) identified stale comments that reference GitHub issues. This PR updates comments to reflect updated statuses.
_I'm marking this PR as a draft while working through that issue to see if other comments can also be quickly updated._
/cc @deeprobin | jeffhandley | "2022-02-27T20:14:23Z" | "2022-02-28T03:21:49Z" | 68fb7fc68cc1af800bee1d38af22b5027bf4ab4e | a58437f7e6794aba690e053827a5d436919c6bc3 | Update stale comments that reference GitHub issues. Fixes #65931
[Cleanup Issue-URLs in Code · Issue #63902 · dotnet/runtime](https://github.com/dotnet/runtime/issues/63902) identified stale comments that reference GitHub issues. This PR updates comments to reflect updated statuses.
_I'm marking this PR as a draft while working through that issue to see if other comments can also be quickly updated._
/cc @deeprobin | ./src/tests/JIT/HardwareIntrinsics/Arm/Shared/_ImmUnaryOpTestTemplate.template | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
using System.Runtime.Intrinsics.Arm;
namespace JIT.HardwareIntrinsics.Arm
{
public static partial class Program
{
private static void {TestName}()
{
var test = new {TemplateName}UnaryOpTest__{TestName}();
if (test.IsSupported)
{
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
if ({LoadIsa}.IsSupported)
{
// Validates basic functionality works, using Load
test.RunBasicScenario_Load();
}
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
if ({LoadIsa}.IsSupported)
{
// Validates calling via reflection works, using Load
test.RunReflectionScenario_Load();
}
// Validates passing a static member works
test.RunClsVarScenario();
if ({LoadIsa}.IsSupported)
{
// Validates passing a static member works, using pinning and Load
test.RunClsVarScenario_Load();
}
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
if ({LoadIsa}.IsSupported)
{
// Validates passing a local works, using Load
test.RunLclVarScenario_Load();
}
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
if ({LoadIsa}.IsSupported)
{
// Validates passing the field of a local class works, using pinning and Load
test.RunClassLclFldScenario_Load();
}
// Validates passing an instance member of a class works
test.RunClassFldScenario();
if ({LoadIsa}.IsSupported)
{
// Validates passing an instance member of a class works, using pinning and Load
test.RunClassFldScenario_Load();
}
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
if ({LoadIsa}.IsSupported)
{
// Validates passing the field of a local struct works, using pinning and Load
test.RunStructLclFldScenario_Load();
}
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
if ({LoadIsa}.IsSupported)
{
// Validates passing an instance member of a struct works, using pinning and Load
test.RunStructFldScenario_Load();
}
}
else
{
// Validates we throw on unsupported hardware
test.RunUnsupportedScenario();
}
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class {TemplateName}UnaryOpTest__{TestName}
{
private struct DataTable
{
private byte[] inArray;
private byte[] outArray;
private GCHandle inHandle;
private GCHandle outHandle;
private ulong alignment;
public DataTable({Op1BaseType}[] inArray, {RetBaseType}[] outArray, int alignment)
{
int sizeOfinArray = inArray.Length * Unsafe.SizeOf<{Op1BaseType}>();
int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<{RetBaseType}>();
if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray || (alignment * 2) < sizeOfoutArray)
{
throw new ArgumentException("Invalid value of alignment");
}
this.inArray = new byte[alignment * 2];
this.outArray = new byte[alignment * 2];
this.inHandle = GCHandle.Alloc(this.inArray, GCHandleType.Pinned);
this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArrayPtr), ref Unsafe.As<{Op1BaseType}, byte>(ref inArray[0]), (uint)sizeOfinArray);
}
public void* inArrayPtr => Align((byte*)(inHandle.AddrOfPinnedObject().ToPointer()), alignment);
public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle.Free();
outHandle.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
{
return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
}
}
private struct TestStruct
{
public {Op1VectorType}<{Op1BaseType}> _fld;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data[i] = {NextValueOp1}; }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<{Op1VectorType}<{Op1BaseType}>, byte>(ref testStruct._fld), ref Unsafe.As<{Op1BaseType}, byte>(ref _data[0]), (uint)Unsafe.SizeOf<{Op1VectorType}<{Op1BaseType}>>());
return testStruct;
}
public void RunStructFldScenario({TemplateName}UnaryOpTest__{TestName} testClass)
{
var result = {Isa}.{Method}(_fld, {Imm});
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld, testClass._dataTable.outArrayPtr);
}
public void RunStructFldScenario_Load({TemplateName}UnaryOpTest__{TestName} testClass)
{
fixed ({Op1VectorType}<{Op1BaseType}>* pFld = &_fld)
{
var result = {Isa}.{Method}(
{LoadIsa}.Load{Op1VectorType}(({Op1BaseType}*)(pFld)),
{Imm}
);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld, testClass._dataTable.outArrayPtr);
}
}
}
private static readonly int LargestVectorSize = {LargestVectorSize};
private static readonly int Op1ElementCount = Unsafe.SizeOf<{Op1VectorType}<{Op1BaseType}>>() / sizeof({Op1BaseType});
private static readonly int RetElementCount = Unsafe.SizeOf<{RetVectorType}<{RetBaseType}>>() / sizeof({RetBaseType});
private static readonly byte Imm = {Imm};
private static {Op1BaseType}[] _data = new {Op1BaseType}[Op1ElementCount];
private static {Op1VectorType}<{Op1BaseType}> _clsVar;
private {Op1VectorType}<{Op1BaseType}> _fld;
private DataTable _dataTable;
static {TemplateName}UnaryOpTest__{TestName}()
{
for (var i = 0; i < Op1ElementCount; i++) { _data[i] = {NextValueOp1}; }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<{Op1VectorType}<{Op1BaseType}>, byte>(ref _clsVar), ref Unsafe.As<{Op1BaseType}, byte>(ref _data[0]), (uint)Unsafe.SizeOf<{Op1VectorType}<{Op1BaseType}>>());
}
public {TemplateName}UnaryOpTest__{TestName}()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data[i] = {NextValueOp1}; }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<{Op1VectorType}<{Op1BaseType}>, byte>(ref _fld), ref Unsafe.As<{Op1BaseType}, byte>(ref _data[0]), (uint)Unsafe.SizeOf<{Op1VectorType}<{Op1BaseType}>>());
for (var i = 0; i < Op1ElementCount; i++) { _data[i] = {NextValueOp1}; }
_dataTable = new DataTable(_data, new {RetBaseType}[RetElementCount], LargestVectorSize);
}
public bool IsSupported => {Isa}.IsSupported;
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = {Isa}.{Method}(
Unsafe.Read<{Op1VectorType}<{Op1BaseType}>>(_dataTable.inArrayPtr),
{Imm}
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr);
}
public void RunBasicScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load));
var result = {Isa}.{Method}(
{LoadIsa}.Load{Op1VectorType}(({Op1BaseType}*)(_dataTable.inArrayPtr)),
{Imm}
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var result = typeof({Isa}).GetMethod(nameof({Isa}.{Method}), new Type[] { typeof({Op1VectorType}<{Op1BaseType}>), typeof(byte) })
.Invoke(null, new object[] {
Unsafe.Read<{Op1VectorType}<{Op1BaseType}>>(_dataTable.inArrayPtr),
(byte){Imm}
});
Unsafe.Write(_dataTable.outArrayPtr, ({RetVectorType}<{RetBaseType}>)(result));
ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load));
var result = typeof({Isa}).GetMethod(nameof({Isa}.{Method}), new Type[] { typeof({Op1VectorType}<{Op1BaseType}>), typeof(byte) })
.Invoke(null, new object[] {
{LoadIsa}.Load{Op1VectorType}(({Op1BaseType}*)(_dataTable.inArrayPtr)),
(byte){Imm}
});
Unsafe.Write(_dataTable.outArrayPtr, ({RetVectorType}<{RetBaseType}>)(result));
ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr);
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = {Isa}.{Method}(
_clsVar,
{Imm}
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar, _dataTable.outArrayPtr);
}
public void RunClsVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load));
fixed ({Op1VectorType}<{Op1BaseType}>* pClsVar = &_clsVar)
{
var result = {Isa}.{Method}(
{LoadIsa}.Load{Op1VectorType}(({Op1BaseType}*)(pClsVar)),
{Imm}
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar, _dataTable.outArrayPtr);
}
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var firstOp = Unsafe.Read<{Op1VectorType}<{Op1BaseType}>>(_dataTable.inArrayPtr);
var result = {Isa}.{Method}(firstOp, {Imm});
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(firstOp, _dataTable.outArrayPtr);
}
public void RunLclVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load));
var firstOp = {LoadIsa}.Load{Op1VectorType}(({Op1BaseType}*)(_dataTable.inArrayPtr));
var result = {Isa}.{Method}(firstOp, {Imm});
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(firstOp, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new {TemplateName}UnaryOpTest__{TestName}();
var result = {Isa}.{Method}(test._fld, {Imm});
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load));
var test = new {TemplateName}UnaryOpTest__{TestName}();
fixed ({Op1VectorType}<{Op1BaseType}>* pFld = &test._fld)
{
var result = {Isa}.{Method}(
{LoadIsa}.Load{Op1VectorType}(({Op1BaseType}*)(pFld)),
{Imm}
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld, _dataTable.outArrayPtr);
}
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = {Isa}.{Method}(_fld, {Imm});
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld, _dataTable.outArrayPtr);
}
public void RunClassFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load));
fixed ({Op1VectorType}<{Op1BaseType}>* pFld = &_fld)
{
var result = {Isa}.{Method}(
{LoadIsa}.Load{Op1VectorType}(({Op1BaseType}*)(pFld)),
{Imm}
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld, _dataTable.outArrayPtr);
}
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = {Isa}.{Method}(test._fld, {Imm});
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld, _dataTable.outArrayPtr);
}
public void RunStructLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load));
var test = TestStruct.Create();
var result = {Isa}.{Method}(
{LoadIsa}.Load{Op1VectorType}(({Op1BaseType}*)(&test._fld)),
{Imm}
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld, _dataTable.outArrayPtr);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
public void RunStructFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load));
var test = TestStruct.Create();
test.RunStructFldScenario_Load(this);
}
public void RunUnsupportedScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario));
bool succeeded = false;
try
{
RunBasicScenario_UnsafeRead();
}
catch (PlatformNotSupportedException)
{
succeeded = true;
}
if (!succeeded)
{
Succeeded = false;
}
}
private void ValidateResult({Op1VectorType}<{Op1BaseType}> firstOp, void* result, [CallerMemberName] string method = "")
{
{Op1BaseType}[] inArray = new {Op1BaseType}[Op1ElementCount];
{RetBaseType}[] outArray = new {RetBaseType}[RetElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<{Op1BaseType}, byte>(ref inArray[0]), firstOp);
Unsafe.CopyBlockUnaligned(ref Unsafe.As<{RetBaseType}, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<{RetVectorType}<{RetBaseType}>>());
ValidateResult(inArray, outArray, method);
}
private void ValidateResult(void* firstOp, void* result, [CallerMemberName] string method = "")
{
{Op1BaseType}[] inArray = new {Op1BaseType}[Op1ElementCount];
{RetBaseType}[] outArray = new {RetBaseType}[RetElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<{Op1BaseType}, byte>(ref inArray[0]), ref Unsafe.AsRef<byte>(firstOp), (uint)Unsafe.SizeOf<{Op1VectorType}<{Op1BaseType}>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<{RetBaseType}, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<{RetVectorType}<{RetBaseType}>>());
ValidateResult(inArray, outArray, method);
}
private void ValidateResult({Op1BaseType}[] firstOp, {RetBaseType}[] result, [CallerMemberName] string method = "")
{
bool succeeded = true;
{TemplateValidationLogic}
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof({Isa})}.{nameof({Isa}.{Method})}<{RetBaseType}>({Op1VectorType}<{Op1BaseType}>, {Imm}): {method} failed:");
TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})");
TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
using System.Runtime.Intrinsics.Arm;
namespace JIT.HardwareIntrinsics.Arm
{
public static partial class Program
{
private static void {TestName}()
{
var test = new {TemplateName}UnaryOpTest__{TestName}();
if (test.IsSupported)
{
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
if ({LoadIsa}.IsSupported)
{
// Validates basic functionality works, using Load
test.RunBasicScenario_Load();
}
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
if ({LoadIsa}.IsSupported)
{
// Validates calling via reflection works, using Load
test.RunReflectionScenario_Load();
}
// Validates passing a static member works
test.RunClsVarScenario();
if ({LoadIsa}.IsSupported)
{
// Validates passing a static member works, using pinning and Load
test.RunClsVarScenario_Load();
}
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
if ({LoadIsa}.IsSupported)
{
// Validates passing a local works, using Load
test.RunLclVarScenario_Load();
}
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
if ({LoadIsa}.IsSupported)
{
// Validates passing the field of a local class works, using pinning and Load
test.RunClassLclFldScenario_Load();
}
// Validates passing an instance member of a class works
test.RunClassFldScenario();
if ({LoadIsa}.IsSupported)
{
// Validates passing an instance member of a class works, using pinning and Load
test.RunClassFldScenario_Load();
}
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
if ({LoadIsa}.IsSupported)
{
// Validates passing the field of a local struct works, using pinning and Load
test.RunStructLclFldScenario_Load();
}
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
if ({LoadIsa}.IsSupported)
{
// Validates passing an instance member of a struct works, using pinning and Load
test.RunStructFldScenario_Load();
}
}
else
{
// Validates we throw on unsupported hardware
test.RunUnsupportedScenario();
}
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class {TemplateName}UnaryOpTest__{TestName}
{
private struct DataTable
{
private byte[] inArray;
private byte[] outArray;
private GCHandle inHandle;
private GCHandle outHandle;
private ulong alignment;
public DataTable({Op1BaseType}[] inArray, {RetBaseType}[] outArray, int alignment)
{
int sizeOfinArray = inArray.Length * Unsafe.SizeOf<{Op1BaseType}>();
int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<{RetBaseType}>();
if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray || (alignment * 2) < sizeOfoutArray)
{
throw new ArgumentException("Invalid value of alignment");
}
this.inArray = new byte[alignment * 2];
this.outArray = new byte[alignment * 2];
this.inHandle = GCHandle.Alloc(this.inArray, GCHandleType.Pinned);
this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArrayPtr), ref Unsafe.As<{Op1BaseType}, byte>(ref inArray[0]), (uint)sizeOfinArray);
}
public void* inArrayPtr => Align((byte*)(inHandle.AddrOfPinnedObject().ToPointer()), alignment);
public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle.Free();
outHandle.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
{
return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
}
}
private struct TestStruct
{
public {Op1VectorType}<{Op1BaseType}> _fld;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data[i] = {NextValueOp1}; }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<{Op1VectorType}<{Op1BaseType}>, byte>(ref testStruct._fld), ref Unsafe.As<{Op1BaseType}, byte>(ref _data[0]), (uint)Unsafe.SizeOf<{Op1VectorType}<{Op1BaseType}>>());
return testStruct;
}
public void RunStructFldScenario({TemplateName}UnaryOpTest__{TestName} testClass)
{
var result = {Isa}.{Method}(_fld, {Imm});
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld, testClass._dataTable.outArrayPtr);
}
public void RunStructFldScenario_Load({TemplateName}UnaryOpTest__{TestName} testClass)
{
fixed ({Op1VectorType}<{Op1BaseType}>* pFld = &_fld)
{
var result = {Isa}.{Method}(
{LoadIsa}.Load{Op1VectorType}(({Op1BaseType}*)(pFld)),
{Imm}
);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld, testClass._dataTable.outArrayPtr);
}
}
}
private static readonly int LargestVectorSize = {LargestVectorSize};
private static readonly int Op1ElementCount = Unsafe.SizeOf<{Op1VectorType}<{Op1BaseType}>>() / sizeof({Op1BaseType});
private static readonly int RetElementCount = Unsafe.SizeOf<{RetVectorType}<{RetBaseType}>>() / sizeof({RetBaseType});
private static readonly byte Imm = {Imm};
private static {Op1BaseType}[] _data = new {Op1BaseType}[Op1ElementCount];
private static {Op1VectorType}<{Op1BaseType}> _clsVar;
private {Op1VectorType}<{Op1BaseType}> _fld;
private DataTable _dataTable;
static {TemplateName}UnaryOpTest__{TestName}()
{
for (var i = 0; i < Op1ElementCount; i++) { _data[i] = {NextValueOp1}; }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<{Op1VectorType}<{Op1BaseType}>, byte>(ref _clsVar), ref Unsafe.As<{Op1BaseType}, byte>(ref _data[0]), (uint)Unsafe.SizeOf<{Op1VectorType}<{Op1BaseType}>>());
}
public {TemplateName}UnaryOpTest__{TestName}()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data[i] = {NextValueOp1}; }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<{Op1VectorType}<{Op1BaseType}>, byte>(ref _fld), ref Unsafe.As<{Op1BaseType}, byte>(ref _data[0]), (uint)Unsafe.SizeOf<{Op1VectorType}<{Op1BaseType}>>());
for (var i = 0; i < Op1ElementCount; i++) { _data[i] = {NextValueOp1}; }
_dataTable = new DataTable(_data, new {RetBaseType}[RetElementCount], LargestVectorSize);
}
public bool IsSupported => {Isa}.IsSupported;
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = {Isa}.{Method}(
Unsafe.Read<{Op1VectorType}<{Op1BaseType}>>(_dataTable.inArrayPtr),
{Imm}
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr);
}
public void RunBasicScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load));
var result = {Isa}.{Method}(
{LoadIsa}.Load{Op1VectorType}(({Op1BaseType}*)(_dataTable.inArrayPtr)),
{Imm}
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var result = typeof({Isa}).GetMethod(nameof({Isa}.{Method}), new Type[] { typeof({Op1VectorType}<{Op1BaseType}>), typeof(byte) })
.Invoke(null, new object[] {
Unsafe.Read<{Op1VectorType}<{Op1BaseType}>>(_dataTable.inArrayPtr),
(byte){Imm}
});
Unsafe.Write(_dataTable.outArrayPtr, ({RetVectorType}<{RetBaseType}>)(result));
ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load));
var result = typeof({Isa}).GetMethod(nameof({Isa}.{Method}), new Type[] { typeof({Op1VectorType}<{Op1BaseType}>), typeof(byte) })
.Invoke(null, new object[] {
{LoadIsa}.Load{Op1VectorType}(({Op1BaseType}*)(_dataTable.inArrayPtr)),
(byte){Imm}
});
Unsafe.Write(_dataTable.outArrayPtr, ({RetVectorType}<{RetBaseType}>)(result));
ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr);
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = {Isa}.{Method}(
_clsVar,
{Imm}
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar, _dataTable.outArrayPtr);
}
public void RunClsVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load));
fixed ({Op1VectorType}<{Op1BaseType}>* pClsVar = &_clsVar)
{
var result = {Isa}.{Method}(
{LoadIsa}.Load{Op1VectorType}(({Op1BaseType}*)(pClsVar)),
{Imm}
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar, _dataTable.outArrayPtr);
}
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var firstOp = Unsafe.Read<{Op1VectorType}<{Op1BaseType}>>(_dataTable.inArrayPtr);
var result = {Isa}.{Method}(firstOp, {Imm});
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(firstOp, _dataTable.outArrayPtr);
}
public void RunLclVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load));
var firstOp = {LoadIsa}.Load{Op1VectorType}(({Op1BaseType}*)(_dataTable.inArrayPtr));
var result = {Isa}.{Method}(firstOp, {Imm});
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(firstOp, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new {TemplateName}UnaryOpTest__{TestName}();
var result = {Isa}.{Method}(test._fld, {Imm});
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load));
var test = new {TemplateName}UnaryOpTest__{TestName}();
fixed ({Op1VectorType}<{Op1BaseType}>* pFld = &test._fld)
{
var result = {Isa}.{Method}(
{LoadIsa}.Load{Op1VectorType}(({Op1BaseType}*)(pFld)),
{Imm}
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld, _dataTable.outArrayPtr);
}
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = {Isa}.{Method}(_fld, {Imm});
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld, _dataTable.outArrayPtr);
}
public void RunClassFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load));
fixed ({Op1VectorType}<{Op1BaseType}>* pFld = &_fld)
{
var result = {Isa}.{Method}(
{LoadIsa}.Load{Op1VectorType}(({Op1BaseType}*)(pFld)),
{Imm}
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld, _dataTable.outArrayPtr);
}
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = {Isa}.{Method}(test._fld, {Imm});
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld, _dataTable.outArrayPtr);
}
public void RunStructLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load));
var test = TestStruct.Create();
var result = {Isa}.{Method}(
{LoadIsa}.Load{Op1VectorType}(({Op1BaseType}*)(&test._fld)),
{Imm}
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld, _dataTable.outArrayPtr);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
public void RunStructFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load));
var test = TestStruct.Create();
test.RunStructFldScenario_Load(this);
}
public void RunUnsupportedScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario));
bool succeeded = false;
try
{
RunBasicScenario_UnsafeRead();
}
catch (PlatformNotSupportedException)
{
succeeded = true;
}
if (!succeeded)
{
Succeeded = false;
}
}
private void ValidateResult({Op1VectorType}<{Op1BaseType}> firstOp, void* result, [CallerMemberName] string method = "")
{
{Op1BaseType}[] inArray = new {Op1BaseType}[Op1ElementCount];
{RetBaseType}[] outArray = new {RetBaseType}[RetElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<{Op1BaseType}, byte>(ref inArray[0]), firstOp);
Unsafe.CopyBlockUnaligned(ref Unsafe.As<{RetBaseType}, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<{RetVectorType}<{RetBaseType}>>());
ValidateResult(inArray, outArray, method);
}
private void ValidateResult(void* firstOp, void* result, [CallerMemberName] string method = "")
{
{Op1BaseType}[] inArray = new {Op1BaseType}[Op1ElementCount];
{RetBaseType}[] outArray = new {RetBaseType}[RetElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<{Op1BaseType}, byte>(ref inArray[0]), ref Unsafe.AsRef<byte>(firstOp), (uint)Unsafe.SizeOf<{Op1VectorType}<{Op1BaseType}>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<{RetBaseType}, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<{RetVectorType}<{RetBaseType}>>());
ValidateResult(inArray, outArray, method);
}
private void ValidateResult({Op1BaseType}[] firstOp, {RetBaseType}[] result, [CallerMemberName] string method = "")
{
bool succeeded = true;
{TemplateValidationLogic}
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof({Isa})}.{nameof({Isa}.{Method})}<{RetBaseType}>({Op1VectorType}<{Op1BaseType}>, {Imm}): {method} failed:");
TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})");
TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
| -1 |
dotnet/runtime | 65,932 | Update stale comments that reference GitHub issues | Fixes #65931
[Cleanup Issue-URLs in Code · Issue #63902 · dotnet/runtime](https://github.com/dotnet/runtime/issues/63902) identified stale comments that reference GitHub issues. This PR updates comments to reflect updated statuses.
_I'm marking this PR as a draft while working through that issue to see if other comments can also be quickly updated._
/cc @deeprobin | jeffhandley | "2022-02-27T20:14:23Z" | "2022-02-28T03:21:49Z" | 68fb7fc68cc1af800bee1d38af22b5027bf4ab4e | a58437f7e6794aba690e053827a5d436919c6bc3 | Update stale comments that reference GitHub issues. Fixes #65931
[Cleanup Issue-URLs in Code · Issue #63902 · dotnet/runtime](https://github.com/dotnet/runtime/issues/63902) identified stale comments that reference GitHub issues. This PR updates comments to reflect updated statuses.
_I'm marking this PR as a draft while working through that issue to see if other comments can also be quickly updated._
/cc @deeprobin | ./src/coreclr/tools/Common/TypeSystem/IL/Stubs/StructMarshallingThunk.Sorting.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using Internal.TypeSystem;
namespace Internal.IL.Stubs
{
// Functionality related to deterministic ordering of types
partial class StructMarshallingThunk
{
protected override int ClassCode => 340834018;
protected override int CompareToImpl(MethodDesc other, TypeSystemComparer comparer)
{
var otherMethod = (StructMarshallingThunk)other;
int result = ThunkType - otherMethod.ThunkType;
if (result != 0)
return result;
return comparer.Compare(ManagedType, otherMethod.ManagedType);
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using Internal.TypeSystem;
namespace Internal.IL.Stubs
{
// Functionality related to deterministic ordering of types
partial class StructMarshallingThunk
{
protected override int ClassCode => 340834018;
protected override int CompareToImpl(MethodDesc other, TypeSystemComparer comparer)
{
var otherMethod = (StructMarshallingThunk)other;
int result = ThunkType - otherMethod.ThunkType;
if (result != 0)
return result;
return comparer.Compare(ManagedType, otherMethod.ManagedType);
}
}
}
| -1 |
dotnet/runtime | 65,932 | Update stale comments that reference GitHub issues | Fixes #65931
[Cleanup Issue-URLs in Code · Issue #63902 · dotnet/runtime](https://github.com/dotnet/runtime/issues/63902) identified stale comments that reference GitHub issues. This PR updates comments to reflect updated statuses.
_I'm marking this PR as a draft while working through that issue to see if other comments can also be quickly updated._
/cc @deeprobin | jeffhandley | "2022-02-27T20:14:23Z" | "2022-02-28T03:21:49Z" | 68fb7fc68cc1af800bee1d38af22b5027bf4ab4e | a58437f7e6794aba690e053827a5d436919c6bc3 | Update stale comments that reference GitHub issues. Fixes #65931
[Cleanup Issue-URLs in Code · Issue #63902 · dotnet/runtime](https://github.com/dotnet/runtime/issues/63902) identified stale comments that reference GitHub issues. This PR updates comments to reflect updated statuses.
_I'm marking this PR as a draft while working through that issue to see if other comments can also be quickly updated._
/cc @deeprobin | ./src/tests/JIT/Methodical/MDArray/DataTypes/sbyte.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
using System;
public struct VT
{
public sbyte[,] sbyte2darr;
public sbyte[, ,] sbyte3darr;
public sbyte[,] sbyte2darr_b;
public sbyte[, ,] sbyte3darr_b;
}
public class CL
{
public sbyte[,] sbyte2darr = { { 0, 1 }, { 0, 0 } };
public sbyte[, ,] sbyte3darr = { { { 0, 0 } }, { { 0, 1 } }, { { 0, 0 } } };
public sbyte[,] sbyte2darr_b = { { 0, 49 }, { 0, 0 } };
public sbyte[, ,] sbyte3darr_b = { { { 0, 0 } }, { { 0, 49 } }, { { 0, 0 } } };
}
public class sbyteMDArrTest
{
static sbyte[,] sbyte2darr = { { 0, 1 }, { 0, 0 } };
static sbyte[, ,] sbyte3darr = { { { 0, 0 } }, { { 0, 1 } }, { { 0, 0 } } };
static sbyte[,] sbyte2darr_b = { { 0, 49 }, { 0, 0 } };
static sbyte[, ,] sbyte3darr_b = { { { 0, 0 } }, { { 0, 49 } }, { { 0, 0 } } };
static sbyte[][,] ja1 = new sbyte[2][,];
static sbyte[][, ,] ja2 = new sbyte[2][, ,];
static sbyte[][,] ja1_b = new sbyte[2][,];
static sbyte[][, ,] ja2_b = new sbyte[2][, ,];
public static int Main()
{
bool pass = true;
VT vt1;
vt1.sbyte2darr = new sbyte[,] { { 0, 1 }, { 0, 0 } };
vt1.sbyte3darr = new sbyte[,,] { { { 0, 0 } }, { { 0, 1 } }, { { 0, 0 } } };
vt1.sbyte2darr_b = new sbyte[,] { { 0, 49 }, { 0, 0 } };
vt1.sbyte3darr_b = new sbyte[,,] { { { 0, 0 } }, { { 0, 49 } }, { { 0, 0 } } };
CL cl1 = new CL();
ja1[0] = new sbyte[,] { { 0, 1 }, { 0, 0 } };
ja2[1] = new sbyte[,,] { { { 0, 0 } }, { { 0, 1 } }, { { 0, 0 } } };
ja1_b[0] = new sbyte[,] { { 0, 49 }, { 0, 0 } };
ja2_b[1] = new sbyte[,,] { { { 0, 0 } }, { { 0, 49 } }, { { 0, 0 } } };
sbyte result = 1;
// 2D
if (result != sbyte2darr[0, 1])
{
Console.WriteLine("ERROR:");
Console.WriteLine("result is: {0}", result);
Console.WriteLine("2darr[0, 1] is: {0}", sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (result != vt1.sbyte2darr[0, 1])
{
Console.WriteLine("ERROR:");
Console.WriteLine("result is: {0}", result);
Console.WriteLine("vt1.sbyte2darr[0, 1] is: {0}", vt1.sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (result != cl1.sbyte2darr[0, 1])
{
Console.WriteLine("ERROR:");
Console.WriteLine("result is: {0}", result);
Console.WriteLine("cl1.sbyte2darr[0, 1] is: {0}", cl1.sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (result != ja1[0][0, 1])
{
Console.WriteLine("ERROR:");
Console.WriteLine("result is: {0}", result);
Console.WriteLine("ja1[0][0, 1] is: {0}", ja1[0][0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
// 3D
if (result != sbyte3darr[1, 0, 1])
{
Console.WriteLine("ERROR:");
Console.WriteLine("result is: {0}", result);
Console.WriteLine("sbyte3darr[1,0,1] is: {0}", sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (result != vt1.sbyte3darr[1, 0, 1])
{
Console.WriteLine("ERROR:");
Console.WriteLine("result is: {0}", result);
Console.WriteLine("vt1.sbyte3darr[1,0,1] is: {0}", vt1.sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (result != cl1.sbyte3darr[1, 0, 1])
{
Console.WriteLine("ERROR:");
Console.WriteLine("result is: {0}", result);
Console.WriteLine("cl1.sbyte3darr[1,0,1] is: {0}", cl1.sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (result != ja2[1][1, 0, 1])
{
Console.WriteLine("ERROR:");
Console.WriteLine("result is: {0}", result);
Console.WriteLine("ja2[1][1,0,1] is: {0}", ja2[1][1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
//SByteToBool tests
bool Bool_result = true;
// 2D
if (Bool_result != Convert.ToBoolean(sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Bool_result is: {0}", Bool_result);
Console.WriteLine("2darr[0, 1] is: {0}", sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Bool_result != Convert.ToBoolean(vt1.sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Bool_result is: {0}", Bool_result);
Console.WriteLine("vt1.sbyte2darr[0, 1] is: {0}", vt1.sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Bool_result != Convert.ToBoolean(cl1.sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Bool_result is: {0}", Bool_result);
Console.WriteLine("cl1.sbyte2darr[0, 1] is: {0}", cl1.sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Bool_result != Convert.ToBoolean(ja1[0][0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Bool_result is: {0}", Bool_result);
Console.WriteLine("ja1[0][0, 1] is: {0}", ja1[0][0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
// 3D
if (Bool_result != Convert.ToBoolean(sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Bool_result is: {0}", Bool_result);
Console.WriteLine("sbyte3darr[1,0,1] is: {0}", sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Bool_result != Convert.ToBoolean(vt1.sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Bool_result is: {0}", Bool_result);
Console.WriteLine("vt1.sbyte3darr[1,0,1] is: {0}", vt1.sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Bool_result != Convert.ToBoolean(cl1.sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Bool_result is: {0}", Bool_result);
Console.WriteLine("cl1.sbyte3darr[1,0,1] is: {0}", cl1.sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Bool_result != Convert.ToBoolean(ja2[1][1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Bool_result is: {0}", Bool_result);
Console.WriteLine("ja2[1][1,0,1] is: {0}", ja2[1][1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
//SByteToByte tests
byte Byte_result = 1;
// 2D
if (Byte_result != Convert.ToSByte(sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Byte_result is: {0}", Byte_result);
Console.WriteLine("2darr[0, 1] is: {0}", sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Byte_result != Convert.ToSByte(vt1.sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Byte_result is: {0}", Byte_result);
Console.WriteLine("vt1.sbyte2darr[0, 1] is: {0}", vt1.sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Byte_result != Convert.ToSByte(cl1.sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Byte_result is: {0}", Byte_result);
Console.WriteLine("cl1.sbyte2darr[0, 1] is: {0}", cl1.sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Byte_result != Convert.ToSByte(ja1[0][0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Byte_result is: {0}", Byte_result);
Console.WriteLine("ja1[0][0, 1] is: {0}", ja1[0][0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
// 3D
if (Byte_result != Convert.ToSByte(sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Byte_result is: {0}", Byte_result);
Console.WriteLine("sbyte3darr[1,0,1] is: {0}", sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Byte_result != Convert.ToSByte(vt1.sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Byte_result is: {0}", Byte_result);
Console.WriteLine("vt1.sbyte3darr[1,0,1] is: {0}", vt1.sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Byte_result != Convert.ToSByte(cl1.sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Byte_result is: {0}", Byte_result);
Console.WriteLine("cl1.sbyte3darr[1,0,1] is: {0}", cl1.sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Byte_result != Convert.ToSByte(ja2[1][1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Byte_result is: {0}", Byte_result);
Console.WriteLine("ja2[1][1,0,1] is: {0}", ja2[1][1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
//SByteToChar tests
char Char_result = '1';
// 2D
if (Char_result != Convert.ToChar(sbyte2darr_b[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Char_result is: {0}", Char_result);
Console.WriteLine("2darr[0, 1] is: {0}", sbyte2darr_b[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Char_result != Convert.ToChar(vt1.sbyte2darr_b[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Char_result is: {0}", Char_result);
Console.WriteLine("vt1.sbyte2darr_b[0, 1] is: {0}", vt1.sbyte2darr_b[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Char_result != Convert.ToChar(cl1.sbyte2darr_b[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Char_result is: {0}", Char_result);
Console.WriteLine("cl1.sbyte2darr_b[0, 1] is: {0}", cl1.sbyte2darr_b[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Char_result != Convert.ToChar(ja1_b[0][0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Char_result is: {0}", Char_result);
Console.WriteLine("ja1_b[0][0, 1] is: {0}", ja1_b[0][0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
// 3D
if (Char_result != Convert.ToChar(sbyte3darr_b[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Char_result is: {0}", Char_result);
Console.WriteLine("sbyte3darr_b[1,0,1] is: {0}", sbyte3darr_b[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Char_result != Convert.ToChar(vt1.sbyte3darr_b[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Char_result is: {0}", Char_result);
Console.WriteLine("vt1.sbyte3darr_b[1,0,1] is: {0}", vt1.sbyte3darr_b[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Char_result != Convert.ToChar(cl1.sbyte3darr_b[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Char_result is: {0}", Char_result);
Console.WriteLine("cl1.sbyte3darr_b[1,0,1] is: {0}", cl1.sbyte3darr_b[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Char_result != Convert.ToChar(ja2_b[1][1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Char_result is: {0}", Char_result);
Console.WriteLine("ja2_b[1][1,0,1] is: {0}", ja2_b[1][1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
//SByteToDecimal tests
decimal Decimal_result = 1;
// 2D
if (Decimal_result != Convert.ToDecimal(sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Decimal_result is: {0}", Decimal_result);
Console.WriteLine("2darr[0, 1] is: {0}", sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Decimal_result != Convert.ToDecimal(vt1.sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Decimal_result is: {0}", Decimal_result);
Console.WriteLine("vt1.sbyte2darr[0, 1] is: {0}", vt1.sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Decimal_result != Convert.ToDecimal(cl1.sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Decimal_result is: {0}", Decimal_result);
Console.WriteLine("cl1.sbyte2darr[0, 1] is: {0}", cl1.sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Decimal_result != Convert.ToDecimal(ja1[0][0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Decimal_result is: {0}", Decimal_result);
Console.WriteLine("ja1[0][0, 1] is: {0}", ja1[0][0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
// 3D
if (Decimal_result != Convert.ToDecimal(sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Decimal_result is: {0}", Decimal_result);
Console.WriteLine("sbyte3darr[1,0,1] is: {0}", sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Decimal_result != Convert.ToDecimal(vt1.sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Decimal_result is: {0}", Decimal_result);
Console.WriteLine("vt1.sbyte3darr[1,0,1] is: {0}", vt1.sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Decimal_result != Convert.ToDecimal(cl1.sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Decimal_result is: {0}", Decimal_result);
Console.WriteLine("cl1.sbyte3darr[1,0,1] is: {0}", cl1.sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Decimal_result != Convert.ToDecimal(ja2[1][1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Decimal_result is: {0}", Decimal_result);
Console.WriteLine("ja2[1][1,0,1] is: {0}", ja2[1][1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
//SByteToDouble tests
double Double_result = 1;
// 2D
if (Double_result != Convert.ToDouble(sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Double_result is: {0}", Double_result);
Console.WriteLine("2darr[0, 1] is: {0}", sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Double_result != Convert.ToDouble(vt1.sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Double_result is: {0}", Double_result);
Console.WriteLine("vt1.sbyte2darr[0, 1] is: {0}", vt1.sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Double_result != Convert.ToDouble(cl1.sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Double_result is: {0}", Double_result);
Console.WriteLine("cl1.sbyte2darr[0, 1] is: {0}", cl1.sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Double_result != Convert.ToDouble(ja1[0][0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Double_result is: {0}", Double_result);
Console.WriteLine("ja1[0][0, 1] is: {0}", ja1[0][0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
// 3D
if (Double_result != Convert.ToDouble(sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Double_result is: {0}", Double_result);
Console.WriteLine("sbyte3darr[1,0,1] is: {0}", sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Double_result != Convert.ToDouble(vt1.sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Double_result is: {0}", Double_result);
Console.WriteLine("vt1.sbyte3darr[1,0,1] is: {0}", vt1.sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Double_result != Convert.ToDouble(cl1.sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Double_result is: {0}", Double_result);
Console.WriteLine("cl1.sbyte3darr[1,0,1] is: {0}", cl1.sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Double_result != Convert.ToDouble(ja2[1][1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Double_result is: {0}", Double_result);
Console.WriteLine("ja2[1][1,0,1] is: {0}", ja2[1][1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
//SByteToSingle tests
float Single_result = 1;
// 2D
if (Single_result != Convert.ToSingle(sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Single_result is: {0}", Single_result);
Console.WriteLine("2darr[0, 1] is: {0}", sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Single_result != Convert.ToSingle(vt1.sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Single_result is: {0}", Single_result);
Console.WriteLine("vt1.sbyte2darr[0, 1] is: {0}", vt1.sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Single_result != Convert.ToSingle(cl1.sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Single_result is: {0}", Single_result);
Console.WriteLine("cl1.sbyte2darr[0, 1] is: {0}", cl1.sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Single_result != Convert.ToSingle(ja1[0][0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Single_result is: {0}", Single_result);
Console.WriteLine("ja1[0][0, 1] is: {0}", ja1[0][0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
// 3D
if (Single_result != Convert.ToSingle(sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Single_result is: {0}", Single_result);
Console.WriteLine("sbyte3darr[1,0,1] is: {0}", sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Single_result != Convert.ToSingle(vt1.sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Single_result is: {0}", Single_result);
Console.WriteLine("vt1.sbyte3darr[1,0,1] is: {0}", vt1.sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Single_result != Convert.ToSingle(cl1.sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Single_result is: {0}", Single_result);
Console.WriteLine("cl1.sbyte3darr[1,0,1] is: {0}", cl1.sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Single_result != Convert.ToSingle(ja2[1][1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Single_result is: {0}", Single_result);
Console.WriteLine("ja2[1][1,0,1] is: {0}", ja2[1][1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
//SbyteToInt32 tests
int Int32_result = 1;
// 2D
if (Int32_result != Convert.ToInt32(sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Int32_result is: {0}", Int32_result);
Console.WriteLine("2darr[0, 1] is: {0}", sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Int32_result != Convert.ToInt32(vt1.sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Int32_result is: {0}", Int32_result);
Console.WriteLine("vt1.sbyte2darr[0, 1] is: {0}", vt1.sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Int32_result != Convert.ToInt32(cl1.sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Int32_result is: {0}", Int32_result);
Console.WriteLine("cl1.sbyte2darr[0, 1] is: {0}", cl1.sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Int32_result != Convert.ToInt32(ja1[0][0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Int32_result is: {0}", Int32_result);
Console.WriteLine("ja1[0][0, 1] is: {0}", ja1[0][0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
// 3D
if (Int32_result != Convert.ToInt32(sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Int32_result is: {0}", Int32_result);
Console.WriteLine("sbyte3darr[1,0,1] is: {0}", sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Int32_result != Convert.ToInt32(vt1.sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Int32_result is: {0}", Int32_result);
Console.WriteLine("vt1.sbyte3darr[1,0,1] is: {0}", vt1.sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Int32_result != Convert.ToInt32(cl1.sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Int32_result is: {0}", Int32_result);
Console.WriteLine("cl1.sbyte3darr[1,0,1] is: {0}", cl1.sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Int32_result != Convert.ToInt32(ja2[1][1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Int32_result is: {0}", Int32_result);
Console.WriteLine("ja2[1][1,0,1] is: {0}", ja2[1][1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
//SByteToInt64 tests
long Int64_result = 1;
// 2D
if (Int64_result != Convert.ToInt64(sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Int64_result is: {0}", Int64_result);
Console.WriteLine("2darr[0, 1] is: {0}", sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Int64_result != Convert.ToInt64(vt1.sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Int64_result is: {0}", Int64_result);
Console.WriteLine("vt1.sbyte2darr[0, 1] is: {0}", vt1.sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Int64_result != Convert.ToInt64(cl1.sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Int64_result is: {0}", Int64_result);
Console.WriteLine("cl1.sbyte2darr[0, 1] is: {0}", cl1.sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Int64_result != Convert.ToInt64(ja1[0][0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Int64_result is: {0}", Int64_result);
Console.WriteLine("ja1[0][0, 1] is: {0}", ja1[0][0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
// 3D
if (Int64_result != Convert.ToInt64(sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Int64_result is: {0}", Int64_result);
Console.WriteLine("sbyte3darr[1,0,1] is: {0}", sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Int64_result != Convert.ToInt64(vt1.sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Int64_result is: {0}", Int64_result);
Console.WriteLine("vt1.sbyte3darr[1,0,1] is: {0}", vt1.sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Int64_result != Convert.ToInt64(cl1.sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Int64_result is: {0}", Int64_result);
Console.WriteLine("cl1.sbyte3darr[1,0,1] is: {0}", cl1.sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Int64_result != Convert.ToInt64(ja2[1][1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Int64_result is: {0}", Int64_result);
Console.WriteLine("ja2[1][1,0,1] is: {0}", ja2[1][1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
//SByteToInt16 tests
short Int16_result = 1;
// 2D
if (Int16_result != Convert.ToInt16(sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Int16_result is: {0}", Int16_result);
Console.WriteLine("2darr[0, 1] is: {0}", sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Int16_result != Convert.ToInt16(vt1.sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Int16_result is: {0}", Int16_result);
Console.WriteLine("vt1.sbyte2darr[0, 1] is: {0}", vt1.sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Int16_result != Convert.ToInt16(cl1.sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Int16_result is: {0}", Int16_result);
Console.WriteLine("cl1.sbyte2darr[0, 1] is: {0}", cl1.sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Int16_result != Convert.ToInt16(ja1[0][0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Int16_result is: {0}", Int16_result);
Console.WriteLine("ja1[0][0, 1] is: {0}", ja1[0][0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
// 3D
if (Int16_result != Convert.ToInt16(sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Int16_result is: {0}", Int16_result);
Console.WriteLine("sbyte3darr[1,0,1] is: {0}", sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Int16_result != Convert.ToInt16(vt1.sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Int16_result is: {0}", Int16_result);
Console.WriteLine("vt1.sbyte3darr[1,0,1] is: {0}", vt1.sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Int16_result != Convert.ToInt16(cl1.sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Int16_result is: {0}", Int16_result);
Console.WriteLine("cl1.sbyte3darr[1,0,1] is: {0}", cl1.sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Int16_result != Convert.ToInt16(ja2[1][1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Int16_result is: {0}", Int16_result);
Console.WriteLine("ja2[1][1,0,1] is: {0}", ja2[1][1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
//SByteToUInt32 tests
uint UInt32_result = 1;
// 2D
if (UInt32_result != Convert.ToUInt32(sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("UInt32_result is: {0}", UInt32_result);
Console.WriteLine("2darr[0, 1] is: {0}", sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (UInt32_result != Convert.ToUInt32(vt1.sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("UInt32_result is: {0}", UInt32_result);
Console.WriteLine("vt1.sbyte2darr[0, 1] is: {0}", vt1.sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (UInt32_result != Convert.ToUInt32(cl1.sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("UInt32_result is: {0}", UInt32_result);
Console.WriteLine("cl1.sbyte2darr[0, 1] is: {0}", cl1.sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (UInt32_result != Convert.ToUInt32(ja1[0][0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("UInt32_result is: {0}", UInt32_result);
Console.WriteLine("ja1[0][0, 1] is: {0}", ja1[0][0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
// 3D
if (UInt32_result != Convert.ToUInt32(sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("UInt32_result is: {0}", UInt32_result);
Console.WriteLine("sbyte3darr[1,0,1] is: {0}", sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (UInt32_result != Convert.ToUInt32(vt1.sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("UInt32_result is: {0}", UInt32_result);
Console.WriteLine("vt1.sbyte3darr[1,0,1] is: {0}", vt1.sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (UInt32_result != Convert.ToUInt32(cl1.sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("UInt32_result is: {0}", UInt32_result);
Console.WriteLine("cl1.sbyte3darr[1,0,1] is: {0}", cl1.sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (UInt32_result != Convert.ToUInt32(ja2[1][1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("UInt32_result is: {0}", UInt32_result);
Console.WriteLine("ja2[1][1,0,1] is: {0}", ja2[1][1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
//SByteToUInt64 tests
ulong UInt64_result = 1;
// 2D
if (UInt64_result != Convert.ToUInt64(sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("UInt64_result is: {0}", UInt64_result);
Console.WriteLine("2darr[0, 1] is: {0}", sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (UInt64_result != Convert.ToUInt64(vt1.sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("UInt64_result is: {0}", UInt64_result);
Console.WriteLine("vt1.sbyte2darr[0, 1] is: {0}", vt1.sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (UInt64_result != Convert.ToUInt64(cl1.sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("UInt64_result is: {0}", UInt64_result);
Console.WriteLine("cl1.sbyte2darr[0, 1] is: {0}", cl1.sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (UInt64_result != Convert.ToUInt64(ja1[0][0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("UInt64_result is: {0}", UInt64_result);
Console.WriteLine("ja1[0][0, 1] is: {0}", ja1[0][0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
// 3D
if (UInt64_result != Convert.ToUInt64(sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("UInt64_result is: {0}", UInt64_result);
Console.WriteLine("sbyte3darr[1,0,1] is: {0}", sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (UInt64_result != Convert.ToUInt64(vt1.sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("UInt64_result is: {0}", UInt64_result);
Console.WriteLine("vt1.sbyte3darr[1,0,1] is: {0}", vt1.sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (UInt64_result != Convert.ToUInt64(cl1.sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("UInt64_result is: {0}", UInt64_result);
Console.WriteLine("cl1.sbyte3darr[1,0,1] is: {0}", cl1.sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (UInt64_result != Convert.ToUInt64(ja2[1][1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("UInt64_result is: {0}", UInt64_result);
Console.WriteLine("ja2[1][1,0,1] is: {0}", ja2[1][1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
//SByteToUInt16
ushort UInt16_result = 1;
// 2D
if (UInt16_result != Convert.ToUInt16(sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("UInt16_result is: {0}", UInt16_result);
Console.WriteLine("2darr[0, 1] is: {0}", sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (UInt16_result != Convert.ToUInt16(vt1.sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("UInt16_result is: {0}", UInt16_result);
Console.WriteLine("vt1.sbyte2darr[0, 1] is: {0}", vt1.sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (UInt16_result != Convert.ToUInt16(cl1.sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("UInt16_result is: {0}", UInt16_result);
Console.WriteLine("cl1.sbyte2darr[0, 1] is: {0}", cl1.sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (UInt16_result != Convert.ToUInt16(ja1[0][0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("UInt16_result is: {0}", UInt16_result);
Console.WriteLine("ja1[0][0, 1] is: {0}", ja1[0][0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
// 3D
if (UInt16_result != Convert.ToUInt16(sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("UInt16_result is: {0}", UInt16_result);
Console.WriteLine("sbyte3darr[1,0,1] is: {0}", sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (UInt16_result != Convert.ToUInt16(vt1.sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("UInt16_result is: {0}", UInt16_result);
Console.WriteLine("vt1.sbyte3darr[1,0,1] is: {0}", vt1.sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (UInt16_result != Convert.ToUInt16(cl1.sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("UInt16_result is: {0}", UInt16_result);
Console.WriteLine("cl1.sbyte3darr[1,0,1] is: {0}", cl1.sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (UInt16_result != Convert.ToUInt16(ja2[1][1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("UInt16_result is: {0}", UInt16_result);
Console.WriteLine("ja2[1][1,0,1] is: {0}", ja2[1][1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (!pass)
{
Console.WriteLine("FAILED");
return 1;
}
else
{
Console.WriteLine("PASSED");
return 100;
}
}
};
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
using System;
public struct VT
{
public sbyte[,] sbyte2darr;
public sbyte[, ,] sbyte3darr;
public sbyte[,] sbyte2darr_b;
public sbyte[, ,] sbyte3darr_b;
}
public class CL
{
public sbyte[,] sbyte2darr = { { 0, 1 }, { 0, 0 } };
public sbyte[, ,] sbyte3darr = { { { 0, 0 } }, { { 0, 1 } }, { { 0, 0 } } };
public sbyte[,] sbyte2darr_b = { { 0, 49 }, { 0, 0 } };
public sbyte[, ,] sbyte3darr_b = { { { 0, 0 } }, { { 0, 49 } }, { { 0, 0 } } };
}
public class sbyteMDArrTest
{
static sbyte[,] sbyte2darr = { { 0, 1 }, { 0, 0 } };
static sbyte[, ,] sbyte3darr = { { { 0, 0 } }, { { 0, 1 } }, { { 0, 0 } } };
static sbyte[,] sbyte2darr_b = { { 0, 49 }, { 0, 0 } };
static sbyte[, ,] sbyte3darr_b = { { { 0, 0 } }, { { 0, 49 } }, { { 0, 0 } } };
static sbyte[][,] ja1 = new sbyte[2][,];
static sbyte[][, ,] ja2 = new sbyte[2][, ,];
static sbyte[][,] ja1_b = new sbyte[2][,];
static sbyte[][, ,] ja2_b = new sbyte[2][, ,];
public static int Main()
{
bool pass = true;
VT vt1;
vt1.sbyte2darr = new sbyte[,] { { 0, 1 }, { 0, 0 } };
vt1.sbyte3darr = new sbyte[,,] { { { 0, 0 } }, { { 0, 1 } }, { { 0, 0 } } };
vt1.sbyte2darr_b = new sbyte[,] { { 0, 49 }, { 0, 0 } };
vt1.sbyte3darr_b = new sbyte[,,] { { { 0, 0 } }, { { 0, 49 } }, { { 0, 0 } } };
CL cl1 = new CL();
ja1[0] = new sbyte[,] { { 0, 1 }, { 0, 0 } };
ja2[1] = new sbyte[,,] { { { 0, 0 } }, { { 0, 1 } }, { { 0, 0 } } };
ja1_b[0] = new sbyte[,] { { 0, 49 }, { 0, 0 } };
ja2_b[1] = new sbyte[,,] { { { 0, 0 } }, { { 0, 49 } }, { { 0, 0 } } };
sbyte result = 1;
// 2D
if (result != sbyte2darr[0, 1])
{
Console.WriteLine("ERROR:");
Console.WriteLine("result is: {0}", result);
Console.WriteLine("2darr[0, 1] is: {0}", sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (result != vt1.sbyte2darr[0, 1])
{
Console.WriteLine("ERROR:");
Console.WriteLine("result is: {0}", result);
Console.WriteLine("vt1.sbyte2darr[0, 1] is: {0}", vt1.sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (result != cl1.sbyte2darr[0, 1])
{
Console.WriteLine("ERROR:");
Console.WriteLine("result is: {0}", result);
Console.WriteLine("cl1.sbyte2darr[0, 1] is: {0}", cl1.sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (result != ja1[0][0, 1])
{
Console.WriteLine("ERROR:");
Console.WriteLine("result is: {0}", result);
Console.WriteLine("ja1[0][0, 1] is: {0}", ja1[0][0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
// 3D
if (result != sbyte3darr[1, 0, 1])
{
Console.WriteLine("ERROR:");
Console.WriteLine("result is: {0}", result);
Console.WriteLine("sbyte3darr[1,0,1] is: {0}", sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (result != vt1.sbyte3darr[1, 0, 1])
{
Console.WriteLine("ERROR:");
Console.WriteLine("result is: {0}", result);
Console.WriteLine("vt1.sbyte3darr[1,0,1] is: {0}", vt1.sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (result != cl1.sbyte3darr[1, 0, 1])
{
Console.WriteLine("ERROR:");
Console.WriteLine("result is: {0}", result);
Console.WriteLine("cl1.sbyte3darr[1,0,1] is: {0}", cl1.sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (result != ja2[1][1, 0, 1])
{
Console.WriteLine("ERROR:");
Console.WriteLine("result is: {0}", result);
Console.WriteLine("ja2[1][1,0,1] is: {0}", ja2[1][1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
//SByteToBool tests
bool Bool_result = true;
// 2D
if (Bool_result != Convert.ToBoolean(sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Bool_result is: {0}", Bool_result);
Console.WriteLine("2darr[0, 1] is: {0}", sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Bool_result != Convert.ToBoolean(vt1.sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Bool_result is: {0}", Bool_result);
Console.WriteLine("vt1.sbyte2darr[0, 1] is: {0}", vt1.sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Bool_result != Convert.ToBoolean(cl1.sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Bool_result is: {0}", Bool_result);
Console.WriteLine("cl1.sbyte2darr[0, 1] is: {0}", cl1.sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Bool_result != Convert.ToBoolean(ja1[0][0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Bool_result is: {0}", Bool_result);
Console.WriteLine("ja1[0][0, 1] is: {0}", ja1[0][0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
// 3D
if (Bool_result != Convert.ToBoolean(sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Bool_result is: {0}", Bool_result);
Console.WriteLine("sbyte3darr[1,0,1] is: {0}", sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Bool_result != Convert.ToBoolean(vt1.sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Bool_result is: {0}", Bool_result);
Console.WriteLine("vt1.sbyte3darr[1,0,1] is: {0}", vt1.sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Bool_result != Convert.ToBoolean(cl1.sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Bool_result is: {0}", Bool_result);
Console.WriteLine("cl1.sbyte3darr[1,0,1] is: {0}", cl1.sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Bool_result != Convert.ToBoolean(ja2[1][1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Bool_result is: {0}", Bool_result);
Console.WriteLine("ja2[1][1,0,1] is: {0}", ja2[1][1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
//SByteToByte tests
byte Byte_result = 1;
// 2D
if (Byte_result != Convert.ToSByte(sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Byte_result is: {0}", Byte_result);
Console.WriteLine("2darr[0, 1] is: {0}", sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Byte_result != Convert.ToSByte(vt1.sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Byte_result is: {0}", Byte_result);
Console.WriteLine("vt1.sbyte2darr[0, 1] is: {0}", vt1.sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Byte_result != Convert.ToSByte(cl1.sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Byte_result is: {0}", Byte_result);
Console.WriteLine("cl1.sbyte2darr[0, 1] is: {0}", cl1.sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Byte_result != Convert.ToSByte(ja1[0][0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Byte_result is: {0}", Byte_result);
Console.WriteLine("ja1[0][0, 1] is: {0}", ja1[0][0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
// 3D
if (Byte_result != Convert.ToSByte(sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Byte_result is: {0}", Byte_result);
Console.WriteLine("sbyte3darr[1,0,1] is: {0}", sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Byte_result != Convert.ToSByte(vt1.sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Byte_result is: {0}", Byte_result);
Console.WriteLine("vt1.sbyte3darr[1,0,1] is: {0}", vt1.sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Byte_result != Convert.ToSByte(cl1.sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Byte_result is: {0}", Byte_result);
Console.WriteLine("cl1.sbyte3darr[1,0,1] is: {0}", cl1.sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Byte_result != Convert.ToSByte(ja2[1][1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Byte_result is: {0}", Byte_result);
Console.WriteLine("ja2[1][1,0,1] is: {0}", ja2[1][1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
//SByteToChar tests
char Char_result = '1';
// 2D
if (Char_result != Convert.ToChar(sbyte2darr_b[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Char_result is: {0}", Char_result);
Console.WriteLine("2darr[0, 1] is: {0}", sbyte2darr_b[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Char_result != Convert.ToChar(vt1.sbyte2darr_b[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Char_result is: {0}", Char_result);
Console.WriteLine("vt1.sbyte2darr_b[0, 1] is: {0}", vt1.sbyte2darr_b[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Char_result != Convert.ToChar(cl1.sbyte2darr_b[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Char_result is: {0}", Char_result);
Console.WriteLine("cl1.sbyte2darr_b[0, 1] is: {0}", cl1.sbyte2darr_b[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Char_result != Convert.ToChar(ja1_b[0][0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Char_result is: {0}", Char_result);
Console.WriteLine("ja1_b[0][0, 1] is: {0}", ja1_b[0][0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
// 3D
if (Char_result != Convert.ToChar(sbyte3darr_b[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Char_result is: {0}", Char_result);
Console.WriteLine("sbyte3darr_b[1,0,1] is: {0}", sbyte3darr_b[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Char_result != Convert.ToChar(vt1.sbyte3darr_b[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Char_result is: {0}", Char_result);
Console.WriteLine("vt1.sbyte3darr_b[1,0,1] is: {0}", vt1.sbyte3darr_b[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Char_result != Convert.ToChar(cl1.sbyte3darr_b[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Char_result is: {0}", Char_result);
Console.WriteLine("cl1.sbyte3darr_b[1,0,1] is: {0}", cl1.sbyte3darr_b[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Char_result != Convert.ToChar(ja2_b[1][1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Char_result is: {0}", Char_result);
Console.WriteLine("ja2_b[1][1,0,1] is: {0}", ja2_b[1][1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
//SByteToDecimal tests
decimal Decimal_result = 1;
// 2D
if (Decimal_result != Convert.ToDecimal(sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Decimal_result is: {0}", Decimal_result);
Console.WriteLine("2darr[0, 1] is: {0}", sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Decimal_result != Convert.ToDecimal(vt1.sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Decimal_result is: {0}", Decimal_result);
Console.WriteLine("vt1.sbyte2darr[0, 1] is: {0}", vt1.sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Decimal_result != Convert.ToDecimal(cl1.sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Decimal_result is: {0}", Decimal_result);
Console.WriteLine("cl1.sbyte2darr[0, 1] is: {0}", cl1.sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Decimal_result != Convert.ToDecimal(ja1[0][0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Decimal_result is: {0}", Decimal_result);
Console.WriteLine("ja1[0][0, 1] is: {0}", ja1[0][0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
// 3D
if (Decimal_result != Convert.ToDecimal(sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Decimal_result is: {0}", Decimal_result);
Console.WriteLine("sbyte3darr[1,0,1] is: {0}", sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Decimal_result != Convert.ToDecimal(vt1.sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Decimal_result is: {0}", Decimal_result);
Console.WriteLine("vt1.sbyte3darr[1,0,1] is: {0}", vt1.sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Decimal_result != Convert.ToDecimal(cl1.sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Decimal_result is: {0}", Decimal_result);
Console.WriteLine("cl1.sbyte3darr[1,0,1] is: {0}", cl1.sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Decimal_result != Convert.ToDecimal(ja2[1][1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Decimal_result is: {0}", Decimal_result);
Console.WriteLine("ja2[1][1,0,1] is: {0}", ja2[1][1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
//SByteToDouble tests
double Double_result = 1;
// 2D
if (Double_result != Convert.ToDouble(sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Double_result is: {0}", Double_result);
Console.WriteLine("2darr[0, 1] is: {0}", sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Double_result != Convert.ToDouble(vt1.sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Double_result is: {0}", Double_result);
Console.WriteLine("vt1.sbyte2darr[0, 1] is: {0}", vt1.sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Double_result != Convert.ToDouble(cl1.sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Double_result is: {0}", Double_result);
Console.WriteLine("cl1.sbyte2darr[0, 1] is: {0}", cl1.sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Double_result != Convert.ToDouble(ja1[0][0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Double_result is: {0}", Double_result);
Console.WriteLine("ja1[0][0, 1] is: {0}", ja1[0][0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
// 3D
if (Double_result != Convert.ToDouble(sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Double_result is: {0}", Double_result);
Console.WriteLine("sbyte3darr[1,0,1] is: {0}", sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Double_result != Convert.ToDouble(vt1.sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Double_result is: {0}", Double_result);
Console.WriteLine("vt1.sbyte3darr[1,0,1] is: {0}", vt1.sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Double_result != Convert.ToDouble(cl1.sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Double_result is: {0}", Double_result);
Console.WriteLine("cl1.sbyte3darr[1,0,1] is: {0}", cl1.sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Double_result != Convert.ToDouble(ja2[1][1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Double_result is: {0}", Double_result);
Console.WriteLine("ja2[1][1,0,1] is: {0}", ja2[1][1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
//SByteToSingle tests
float Single_result = 1;
// 2D
if (Single_result != Convert.ToSingle(sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Single_result is: {0}", Single_result);
Console.WriteLine("2darr[0, 1] is: {0}", sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Single_result != Convert.ToSingle(vt1.sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Single_result is: {0}", Single_result);
Console.WriteLine("vt1.sbyte2darr[0, 1] is: {0}", vt1.sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Single_result != Convert.ToSingle(cl1.sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Single_result is: {0}", Single_result);
Console.WriteLine("cl1.sbyte2darr[0, 1] is: {0}", cl1.sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Single_result != Convert.ToSingle(ja1[0][0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Single_result is: {0}", Single_result);
Console.WriteLine("ja1[0][0, 1] is: {0}", ja1[0][0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
// 3D
if (Single_result != Convert.ToSingle(sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Single_result is: {0}", Single_result);
Console.WriteLine("sbyte3darr[1,0,1] is: {0}", sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Single_result != Convert.ToSingle(vt1.sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Single_result is: {0}", Single_result);
Console.WriteLine("vt1.sbyte3darr[1,0,1] is: {0}", vt1.sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Single_result != Convert.ToSingle(cl1.sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Single_result is: {0}", Single_result);
Console.WriteLine("cl1.sbyte3darr[1,0,1] is: {0}", cl1.sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Single_result != Convert.ToSingle(ja2[1][1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Single_result is: {0}", Single_result);
Console.WriteLine("ja2[1][1,0,1] is: {0}", ja2[1][1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
//SbyteToInt32 tests
int Int32_result = 1;
// 2D
if (Int32_result != Convert.ToInt32(sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Int32_result is: {0}", Int32_result);
Console.WriteLine("2darr[0, 1] is: {0}", sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Int32_result != Convert.ToInt32(vt1.sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Int32_result is: {0}", Int32_result);
Console.WriteLine("vt1.sbyte2darr[0, 1] is: {0}", vt1.sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Int32_result != Convert.ToInt32(cl1.sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Int32_result is: {0}", Int32_result);
Console.WriteLine("cl1.sbyte2darr[0, 1] is: {0}", cl1.sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Int32_result != Convert.ToInt32(ja1[0][0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Int32_result is: {0}", Int32_result);
Console.WriteLine("ja1[0][0, 1] is: {0}", ja1[0][0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
// 3D
if (Int32_result != Convert.ToInt32(sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Int32_result is: {0}", Int32_result);
Console.WriteLine("sbyte3darr[1,0,1] is: {0}", sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Int32_result != Convert.ToInt32(vt1.sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Int32_result is: {0}", Int32_result);
Console.WriteLine("vt1.sbyte3darr[1,0,1] is: {0}", vt1.sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Int32_result != Convert.ToInt32(cl1.sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Int32_result is: {0}", Int32_result);
Console.WriteLine("cl1.sbyte3darr[1,0,1] is: {0}", cl1.sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Int32_result != Convert.ToInt32(ja2[1][1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Int32_result is: {0}", Int32_result);
Console.WriteLine("ja2[1][1,0,1] is: {0}", ja2[1][1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
//SByteToInt64 tests
long Int64_result = 1;
// 2D
if (Int64_result != Convert.ToInt64(sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Int64_result is: {0}", Int64_result);
Console.WriteLine("2darr[0, 1] is: {0}", sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Int64_result != Convert.ToInt64(vt1.sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Int64_result is: {0}", Int64_result);
Console.WriteLine("vt1.sbyte2darr[0, 1] is: {0}", vt1.sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Int64_result != Convert.ToInt64(cl1.sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Int64_result is: {0}", Int64_result);
Console.WriteLine("cl1.sbyte2darr[0, 1] is: {0}", cl1.sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Int64_result != Convert.ToInt64(ja1[0][0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Int64_result is: {0}", Int64_result);
Console.WriteLine("ja1[0][0, 1] is: {0}", ja1[0][0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
// 3D
if (Int64_result != Convert.ToInt64(sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Int64_result is: {0}", Int64_result);
Console.WriteLine("sbyte3darr[1,0,1] is: {0}", sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Int64_result != Convert.ToInt64(vt1.sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Int64_result is: {0}", Int64_result);
Console.WriteLine("vt1.sbyte3darr[1,0,1] is: {0}", vt1.sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Int64_result != Convert.ToInt64(cl1.sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Int64_result is: {0}", Int64_result);
Console.WriteLine("cl1.sbyte3darr[1,0,1] is: {0}", cl1.sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Int64_result != Convert.ToInt64(ja2[1][1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Int64_result is: {0}", Int64_result);
Console.WriteLine("ja2[1][1,0,1] is: {0}", ja2[1][1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
//SByteToInt16 tests
short Int16_result = 1;
// 2D
if (Int16_result != Convert.ToInt16(sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Int16_result is: {0}", Int16_result);
Console.WriteLine("2darr[0, 1] is: {0}", sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Int16_result != Convert.ToInt16(vt1.sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Int16_result is: {0}", Int16_result);
Console.WriteLine("vt1.sbyte2darr[0, 1] is: {0}", vt1.sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Int16_result != Convert.ToInt16(cl1.sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Int16_result is: {0}", Int16_result);
Console.WriteLine("cl1.sbyte2darr[0, 1] is: {0}", cl1.sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Int16_result != Convert.ToInt16(ja1[0][0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Int16_result is: {0}", Int16_result);
Console.WriteLine("ja1[0][0, 1] is: {0}", ja1[0][0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
// 3D
if (Int16_result != Convert.ToInt16(sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Int16_result is: {0}", Int16_result);
Console.WriteLine("sbyte3darr[1,0,1] is: {0}", sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Int16_result != Convert.ToInt16(vt1.sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Int16_result is: {0}", Int16_result);
Console.WriteLine("vt1.sbyte3darr[1,0,1] is: {0}", vt1.sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Int16_result != Convert.ToInt16(cl1.sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Int16_result is: {0}", Int16_result);
Console.WriteLine("cl1.sbyte3darr[1,0,1] is: {0}", cl1.sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (Int16_result != Convert.ToInt16(ja2[1][1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("Int16_result is: {0}", Int16_result);
Console.WriteLine("ja2[1][1,0,1] is: {0}", ja2[1][1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
//SByteToUInt32 tests
uint UInt32_result = 1;
// 2D
if (UInt32_result != Convert.ToUInt32(sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("UInt32_result is: {0}", UInt32_result);
Console.WriteLine("2darr[0, 1] is: {0}", sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (UInt32_result != Convert.ToUInt32(vt1.sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("UInt32_result is: {0}", UInt32_result);
Console.WriteLine("vt1.sbyte2darr[0, 1] is: {0}", vt1.sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (UInt32_result != Convert.ToUInt32(cl1.sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("UInt32_result is: {0}", UInt32_result);
Console.WriteLine("cl1.sbyte2darr[0, 1] is: {0}", cl1.sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (UInt32_result != Convert.ToUInt32(ja1[0][0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("UInt32_result is: {0}", UInt32_result);
Console.WriteLine("ja1[0][0, 1] is: {0}", ja1[0][0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
// 3D
if (UInt32_result != Convert.ToUInt32(sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("UInt32_result is: {0}", UInt32_result);
Console.WriteLine("sbyte3darr[1,0,1] is: {0}", sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (UInt32_result != Convert.ToUInt32(vt1.sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("UInt32_result is: {0}", UInt32_result);
Console.WriteLine("vt1.sbyte3darr[1,0,1] is: {0}", vt1.sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (UInt32_result != Convert.ToUInt32(cl1.sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("UInt32_result is: {0}", UInt32_result);
Console.WriteLine("cl1.sbyte3darr[1,0,1] is: {0}", cl1.sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (UInt32_result != Convert.ToUInt32(ja2[1][1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("UInt32_result is: {0}", UInt32_result);
Console.WriteLine("ja2[1][1,0,1] is: {0}", ja2[1][1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
//SByteToUInt64 tests
ulong UInt64_result = 1;
// 2D
if (UInt64_result != Convert.ToUInt64(sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("UInt64_result is: {0}", UInt64_result);
Console.WriteLine("2darr[0, 1] is: {0}", sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (UInt64_result != Convert.ToUInt64(vt1.sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("UInt64_result is: {0}", UInt64_result);
Console.WriteLine("vt1.sbyte2darr[0, 1] is: {0}", vt1.sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (UInt64_result != Convert.ToUInt64(cl1.sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("UInt64_result is: {0}", UInt64_result);
Console.WriteLine("cl1.sbyte2darr[0, 1] is: {0}", cl1.sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (UInt64_result != Convert.ToUInt64(ja1[0][0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("UInt64_result is: {0}", UInt64_result);
Console.WriteLine("ja1[0][0, 1] is: {0}", ja1[0][0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
// 3D
if (UInt64_result != Convert.ToUInt64(sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("UInt64_result is: {0}", UInt64_result);
Console.WriteLine("sbyte3darr[1,0,1] is: {0}", sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (UInt64_result != Convert.ToUInt64(vt1.sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("UInt64_result is: {0}", UInt64_result);
Console.WriteLine("vt1.sbyte3darr[1,0,1] is: {0}", vt1.sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (UInt64_result != Convert.ToUInt64(cl1.sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("UInt64_result is: {0}", UInt64_result);
Console.WriteLine("cl1.sbyte3darr[1,0,1] is: {0}", cl1.sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (UInt64_result != Convert.ToUInt64(ja2[1][1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("UInt64_result is: {0}", UInt64_result);
Console.WriteLine("ja2[1][1,0,1] is: {0}", ja2[1][1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
//SByteToUInt16
ushort UInt16_result = 1;
// 2D
if (UInt16_result != Convert.ToUInt16(sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("UInt16_result is: {0}", UInt16_result);
Console.WriteLine("2darr[0, 1] is: {0}", sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (UInt16_result != Convert.ToUInt16(vt1.sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("UInt16_result is: {0}", UInt16_result);
Console.WriteLine("vt1.sbyte2darr[0, 1] is: {0}", vt1.sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (UInt16_result != Convert.ToUInt16(cl1.sbyte2darr[0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("UInt16_result is: {0}", UInt16_result);
Console.WriteLine("cl1.sbyte2darr[0, 1] is: {0}", cl1.sbyte2darr[0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (UInt16_result != Convert.ToUInt16(ja1[0][0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("UInt16_result is: {0}", UInt16_result);
Console.WriteLine("ja1[0][0, 1] is: {0}", ja1[0][0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
// 3D
if (UInt16_result != Convert.ToUInt16(sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("UInt16_result is: {0}", UInt16_result);
Console.WriteLine("sbyte3darr[1,0,1] is: {0}", sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (UInt16_result != Convert.ToUInt16(vt1.sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("UInt16_result is: {0}", UInt16_result);
Console.WriteLine("vt1.sbyte3darr[1,0,1] is: {0}", vt1.sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (UInt16_result != Convert.ToUInt16(cl1.sbyte3darr[1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("UInt16_result is: {0}", UInt16_result);
Console.WriteLine("cl1.sbyte3darr[1,0,1] is: {0}", cl1.sbyte3darr[1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (UInt16_result != Convert.ToUInt16(ja2[1][1, 0, 1]))
{
Console.WriteLine("ERROR:");
Console.WriteLine("UInt16_result is: {0}", UInt16_result);
Console.WriteLine("ja2[1][1,0,1] is: {0}", ja2[1][1, 0, 1]);
Console.WriteLine("and they are NOT equal !");
Console.WriteLine();
pass = false;
}
if (!pass)
{
Console.WriteLine("FAILED");
return 1;
}
else
{
Console.WriteLine("PASSED");
return 100;
}
}
};
| -1 |
dotnet/runtime | 65,932 | Update stale comments that reference GitHub issues | Fixes #65931
[Cleanup Issue-URLs in Code · Issue #63902 · dotnet/runtime](https://github.com/dotnet/runtime/issues/63902) identified stale comments that reference GitHub issues. This PR updates comments to reflect updated statuses.
_I'm marking this PR as a draft while working through that issue to see if other comments can also be quickly updated._
/cc @deeprobin | jeffhandley | "2022-02-27T20:14:23Z" | "2022-02-28T03:21:49Z" | 68fb7fc68cc1af800bee1d38af22b5027bf4ab4e | a58437f7e6794aba690e053827a5d436919c6bc3 | Update stale comments that reference GitHub issues. Fixes #65931
[Cleanup Issue-URLs in Code · Issue #63902 · dotnet/runtime](https://github.com/dotnet/runtime/issues/63902) identified stale comments that reference GitHub issues. This PR updates comments to reflect updated statuses.
_I'm marking this PR as a draft while working through that issue to see if other comments can also be quickly updated._
/cc @deeprobin | ./src/libraries/System.Reflection.Metadata/tests/Resources/TestResources.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Collections.Immutable;
using System.IO;
using System.Linq;
using System.Reflection.PortableExecutable;
namespace System.Reflection.Metadata.Tests
{
internal static class Interop
{
public static readonly byte[] IndexerWithByRefParam = ResourceHelper.GetResource("Interop.IndexerWithByRefParam.dll");
public static readonly byte[] OtherAccessors = ResourceHelper.GetResource("Interop.OtherAccessors.dll");
public static readonly byte[] Interop_Mock01 = ResourceHelper.GetResource("Interop.Interop.Mock01.dll");
public static readonly byte[] Interop_Mock01_Impl = ResourceHelper.GetResource("Interop.Interop.Mock01.Impl.dll");
}
internal static class Misc
{
public static readonly byte[] CPPClassLibrary2 = ResourceHelper.GetResource("Misc.CPPClassLibrary2.obj");
public static readonly byte[] EmptyType = ResourceHelper.GetResource("Misc.EmptyType.dll");
public static readonly byte[] Members = ResourceHelper.GetResource("Misc.Members.dll");
public static readonly byte[] Deterministic = ResourceHelper.GetResource("Misc.Deterministic.dll");
public static readonly byte[] Debug = ResourceHelper.GetResource("Misc.Debug.dll");
public static readonly byte[] KeyPair = ResourceHelper.GetResource("Misc.KeyPair.snk");
public static readonly byte[] Signed = ResourceHelper.GetResource("Misc.Signed.exe");
public static readonly byte[] Satellite = ResourceHelper.GetResource("Misc.SatelliteAssembly.resources.dll");
public static readonly byte[] KeyPair_PublicKey = new byte[]
{
0x00, 0x24, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x94, 0x00, 0x00, 0x00, 0x06, 0x02, 0x00, 0x00,
0x00, 0x24, 0x00, 0x00, 0x52, 0x53, 0x41, 0x31, 0x00, 0x04, 0x00, 0x00, 0x01, 0x00, 0x01, 0x00,
0x33, 0x61, 0x19, 0xca, 0x32, 0xc4, 0x2b, 0xc8, 0x1e, 0x80, 0x48, 0xc1, 0xa9, 0xb2, 0x75, 0xa8,
0xdf, 0x83, 0x1b, 0xb1, 0xeb, 0x4c, 0xf4, 0xdf, 0xdf, 0x99, 0xec, 0x35, 0x15, 0x35, 0x80, 0x0e,
0x26, 0x85, 0x15, 0x73, 0x19, 0xba, 0xdc, 0xff, 0xb7, 0x0c, 0x96, 0x3e, 0xa0, 0x9b, 0x0a, 0x62,
0x01, 0x17, 0x4b, 0x45, 0xa0, 0x76, 0x0a, 0xa8, 0xdb, 0x08, 0xbe, 0x16, 0x56, 0xa3, 0x20, 0x53,
0xef, 0xf2, 0x12, 0x25, 0x85, 0xe7, 0x40, 0x74, 0x8e, 0x0a, 0xb8, 0x3e, 0xd7, 0xbf, 0xad, 0x13,
0x1a, 0xa9, 0x81, 0x22, 0x86, 0xc9, 0x5f, 0xa5, 0x27, 0xde, 0x70, 0x40, 0x8b, 0xd0, 0xf4, 0x6a,
0xfb, 0x48, 0x23, 0x8a, 0x27, 0x00, 0xe1, 0x80, 0xad, 0xd4, 0x08, 0xd4, 0x43, 0xf0, 0xcd, 0xd8,
0x57, 0x1d, 0x5b, 0xa1, 0x5f, 0x96, 0x72, 0x58, 0xd7, 0x4a, 0xcc, 0xa7, 0x82, 0x00, 0x11, 0xcf
};
}
internal static class NetModule
{
public static readonly byte[] ModuleCS01 = ResourceHelper.GetResource("NetModule.ModuleCS01.mod");
public static readonly byte[] ModuleVB01 = ResourceHelper.GetResource("NetModule.ModuleVB01.mod");
public static readonly byte[] AppCS = ResourceHelper.GetResource("NetModule.AppCS.exe");
}
internal static class Namespace
{
public static readonly byte[] NamespaceTests = ResourceHelper.GetResource("Namespace.NamespaceTests.dll");
}
internal static class WinRT
{
public static readonly byte[] Lib = ResourceHelper.GetResource("WinRT.Lib.winmd");
}
internal static class PortablePdbs
{
public static readonly byte[] DocumentsDll = ResourceHelper.GetResource("PortablePdbs.Documents.dll");
public static readonly byte[] DocumentsPdb = ResourceHelper.GetResource("PortablePdbs.Documents.pdb");
public static readonly byte[] DocumentsEmbeddedDll = ResourceHelper.GetResource("PortablePdbs.Documents.Embedded.dll");
}
internal static class SynthesizedPeImages
{
private static Lazy<ImmutableArray<byte>> _image1 = new Lazy<ImmutableArray<byte>>(GenerateImage);
public static ImmutableArray<byte> Image1 => _image1.Value;
private sealed class TestPEBuilder : PEBuilder
{
private readonly PEDirectoriesBuilder _dirBuilder = new PEDirectoriesBuilder();
public TestPEBuilder()
: base(new PEHeaderBuilder(sectionAlignment: 512, fileAlignment: 512), deterministicIdProvider: _ => new BlobContentId())
{
}
protected override ImmutableArray<Section> CreateSections()
{
return ImmutableArray.Create(
new Section(".s1", 0),
new Section(".s2", 0),
new Section(".s3", 0));
}
protected override BlobBuilder SerializeSection(string name, SectionLocation location)
{
if (name == ".s2")
{
_dirBuilder.CopyrightTable = new DirectoryEntry(location.RelativeVirtualAddress + 5, 10);
}
var builder = new BlobBuilder();
builder.WriteBytes((byte)name[name.Length - 1], 10);
return builder;
}
protected internal override PEDirectoriesBuilder GetDirectories()
{
return _dirBuilder;
}
}
private static ImmutableArray<byte> GenerateImage()
{
var peBuilder = new TestPEBuilder();
BlobBuilder peImageBuilder = new BlobBuilder();
var contentId = peBuilder.Serialize(peImageBuilder);
var peImage = peImageBuilder.ToImmutableArray();
AssertEx.Equal(new byte[]
{
// headers:
0x4D, 0x5A, 0x90, 0x00, 0x03, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x00, 0x00,
0xB8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00,
0x0E, 0x1F, 0xBA, 0x0E, 0x00, 0xB4, 0x09, 0xCD, 0x21, 0xB8, 0x01, 0x4C, 0xCD, 0x21, 0x54, 0x68,
0x69, 0x73, 0x20, 0x70, 0x72, 0x6F, 0x67, 0x72, 0x61, 0x6D, 0x20, 0x63, 0x61, 0x6E, 0x6E, 0x6F,
0x74, 0x20, 0x62, 0x65, 0x20, 0x72, 0x75, 0x6E, 0x20, 0x69, 0x6E, 0x20, 0x44, 0x4F, 0x53, 0x20,
0x6D, 0x6F, 0x64, 0x65, 0x2E, 0x0D, 0x0D, 0x0A, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x50, 0x45, 0x00, 0x00, 0x4C, 0x01, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0xE0, 0x00, 0x00, 0x20, 0x0B, 0x01, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x08, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x40, 0x85,
0x00, 0x00, 0x10, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x10, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x05, 0x04, 0x00, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2E, 0x73, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00,
0x0A, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x2E, 0x73, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00,
0x00, 0x02, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2E, 0x73, 0x33, 0x00, 0x00, 0x00, 0x00, 0x00,
0x0A, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
}.
// .s1
Concat(Pad(512, new byte[] { 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31 })).
// .s2
Concat(Pad(512, new byte[] { 0x32, 0x32, 0x32, 0x32, 0x32, 0x32, 0x32, 0x32, 0x32, 0x32 })).
// .s3
Concat(Pad(512, new byte[] { 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33 })), peImage);
return peImage;
}
private static byte[] Pad(int length, byte[] bytes)
{
var result = new byte[length];
Array.Copy(bytes, result, bytes.Length);
return result;
}
}
internal static class ResourceHelper
{
public static byte[] GetResource(string name)
{
string fullName = "System.Reflection.Metadata.Tests.Resources." + name;
using (var stream = typeof(ResourceHelper).GetTypeInfo().Assembly.GetManifestResourceStream(fullName))
{
var bytes = new byte[stream.Length];
using (var memoryStream = new MemoryStream(bytes))
{
stream.CopyTo(memoryStream);
}
return bytes;
}
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Collections.Immutable;
using System.IO;
using System.Linq;
using System.Reflection.PortableExecutable;
namespace System.Reflection.Metadata.Tests
{
internal static class Interop
{
public static readonly byte[] IndexerWithByRefParam = ResourceHelper.GetResource("Interop.IndexerWithByRefParam.dll");
public static readonly byte[] OtherAccessors = ResourceHelper.GetResource("Interop.OtherAccessors.dll");
public static readonly byte[] Interop_Mock01 = ResourceHelper.GetResource("Interop.Interop.Mock01.dll");
public static readonly byte[] Interop_Mock01_Impl = ResourceHelper.GetResource("Interop.Interop.Mock01.Impl.dll");
}
internal static class Misc
{
public static readonly byte[] CPPClassLibrary2 = ResourceHelper.GetResource("Misc.CPPClassLibrary2.obj");
public static readonly byte[] EmptyType = ResourceHelper.GetResource("Misc.EmptyType.dll");
public static readonly byte[] Members = ResourceHelper.GetResource("Misc.Members.dll");
public static readonly byte[] Deterministic = ResourceHelper.GetResource("Misc.Deterministic.dll");
public static readonly byte[] Debug = ResourceHelper.GetResource("Misc.Debug.dll");
public static readonly byte[] KeyPair = ResourceHelper.GetResource("Misc.KeyPair.snk");
public static readonly byte[] Signed = ResourceHelper.GetResource("Misc.Signed.exe");
public static readonly byte[] Satellite = ResourceHelper.GetResource("Misc.SatelliteAssembly.resources.dll");
public static readonly byte[] KeyPair_PublicKey = new byte[]
{
0x00, 0x24, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x94, 0x00, 0x00, 0x00, 0x06, 0x02, 0x00, 0x00,
0x00, 0x24, 0x00, 0x00, 0x52, 0x53, 0x41, 0x31, 0x00, 0x04, 0x00, 0x00, 0x01, 0x00, 0x01, 0x00,
0x33, 0x61, 0x19, 0xca, 0x32, 0xc4, 0x2b, 0xc8, 0x1e, 0x80, 0x48, 0xc1, 0xa9, 0xb2, 0x75, 0xa8,
0xdf, 0x83, 0x1b, 0xb1, 0xeb, 0x4c, 0xf4, 0xdf, 0xdf, 0x99, 0xec, 0x35, 0x15, 0x35, 0x80, 0x0e,
0x26, 0x85, 0x15, 0x73, 0x19, 0xba, 0xdc, 0xff, 0xb7, 0x0c, 0x96, 0x3e, 0xa0, 0x9b, 0x0a, 0x62,
0x01, 0x17, 0x4b, 0x45, 0xa0, 0x76, 0x0a, 0xa8, 0xdb, 0x08, 0xbe, 0x16, 0x56, 0xa3, 0x20, 0x53,
0xef, 0xf2, 0x12, 0x25, 0x85, 0xe7, 0x40, 0x74, 0x8e, 0x0a, 0xb8, 0x3e, 0xd7, 0xbf, 0xad, 0x13,
0x1a, 0xa9, 0x81, 0x22, 0x86, 0xc9, 0x5f, 0xa5, 0x27, 0xde, 0x70, 0x40, 0x8b, 0xd0, 0xf4, 0x6a,
0xfb, 0x48, 0x23, 0x8a, 0x27, 0x00, 0xe1, 0x80, 0xad, 0xd4, 0x08, 0xd4, 0x43, 0xf0, 0xcd, 0xd8,
0x57, 0x1d, 0x5b, 0xa1, 0x5f, 0x96, 0x72, 0x58, 0xd7, 0x4a, 0xcc, 0xa7, 0x82, 0x00, 0x11, 0xcf
};
}
internal static class NetModule
{
public static readonly byte[] ModuleCS01 = ResourceHelper.GetResource("NetModule.ModuleCS01.mod");
public static readonly byte[] ModuleVB01 = ResourceHelper.GetResource("NetModule.ModuleVB01.mod");
public static readonly byte[] AppCS = ResourceHelper.GetResource("NetModule.AppCS.exe");
}
internal static class Namespace
{
public static readonly byte[] NamespaceTests = ResourceHelper.GetResource("Namespace.NamespaceTests.dll");
}
internal static class WinRT
{
public static readonly byte[] Lib = ResourceHelper.GetResource("WinRT.Lib.winmd");
}
internal static class PortablePdbs
{
public static readonly byte[] DocumentsDll = ResourceHelper.GetResource("PortablePdbs.Documents.dll");
public static readonly byte[] DocumentsPdb = ResourceHelper.GetResource("PortablePdbs.Documents.pdb");
public static readonly byte[] DocumentsEmbeddedDll = ResourceHelper.GetResource("PortablePdbs.Documents.Embedded.dll");
}
internal static class SynthesizedPeImages
{
private static Lazy<ImmutableArray<byte>> _image1 = new Lazy<ImmutableArray<byte>>(GenerateImage);
public static ImmutableArray<byte> Image1 => _image1.Value;
private sealed class TestPEBuilder : PEBuilder
{
private readonly PEDirectoriesBuilder _dirBuilder = new PEDirectoriesBuilder();
public TestPEBuilder()
: base(new PEHeaderBuilder(sectionAlignment: 512, fileAlignment: 512), deterministicIdProvider: _ => new BlobContentId())
{
}
protected override ImmutableArray<Section> CreateSections()
{
return ImmutableArray.Create(
new Section(".s1", 0),
new Section(".s2", 0),
new Section(".s3", 0));
}
protected override BlobBuilder SerializeSection(string name, SectionLocation location)
{
if (name == ".s2")
{
_dirBuilder.CopyrightTable = new DirectoryEntry(location.RelativeVirtualAddress + 5, 10);
}
var builder = new BlobBuilder();
builder.WriteBytes((byte)name[name.Length - 1], 10);
return builder;
}
protected internal override PEDirectoriesBuilder GetDirectories()
{
return _dirBuilder;
}
}
private static ImmutableArray<byte> GenerateImage()
{
var peBuilder = new TestPEBuilder();
BlobBuilder peImageBuilder = new BlobBuilder();
var contentId = peBuilder.Serialize(peImageBuilder);
var peImage = peImageBuilder.ToImmutableArray();
AssertEx.Equal(new byte[]
{
// headers:
0x4D, 0x5A, 0x90, 0x00, 0x03, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x00, 0x00,
0xB8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00,
0x0E, 0x1F, 0xBA, 0x0E, 0x00, 0xB4, 0x09, 0xCD, 0x21, 0xB8, 0x01, 0x4C, 0xCD, 0x21, 0x54, 0x68,
0x69, 0x73, 0x20, 0x70, 0x72, 0x6F, 0x67, 0x72, 0x61, 0x6D, 0x20, 0x63, 0x61, 0x6E, 0x6E, 0x6F,
0x74, 0x20, 0x62, 0x65, 0x20, 0x72, 0x75, 0x6E, 0x20, 0x69, 0x6E, 0x20, 0x44, 0x4F, 0x53, 0x20,
0x6D, 0x6F, 0x64, 0x65, 0x2E, 0x0D, 0x0D, 0x0A, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x50, 0x45, 0x00, 0x00, 0x4C, 0x01, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0xE0, 0x00, 0x00, 0x20, 0x0B, 0x01, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x08, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x40, 0x85,
0x00, 0x00, 0x10, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x10, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x05, 0x04, 0x00, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2E, 0x73, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00,
0x0A, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x2E, 0x73, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00,
0x00, 0x02, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2E, 0x73, 0x33, 0x00, 0x00, 0x00, 0x00, 0x00,
0x0A, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
}.
// .s1
Concat(Pad(512, new byte[] { 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31 })).
// .s2
Concat(Pad(512, new byte[] { 0x32, 0x32, 0x32, 0x32, 0x32, 0x32, 0x32, 0x32, 0x32, 0x32 })).
// .s3
Concat(Pad(512, new byte[] { 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33 })), peImage);
return peImage;
}
private static byte[] Pad(int length, byte[] bytes)
{
var result = new byte[length];
Array.Copy(bytes, result, bytes.Length);
return result;
}
}
internal static class ResourceHelper
{
public static byte[] GetResource(string name)
{
string fullName = "System.Reflection.Metadata.Tests.Resources." + name;
using (var stream = typeof(ResourceHelper).GetTypeInfo().Assembly.GetManifestResourceStream(fullName))
{
var bytes = new byte[stream.Length];
using (var memoryStream = new MemoryStream(bytes))
{
stream.CopyTo(memoryStream);
}
return bytes;
}
}
}
}
| -1 |
dotnet/runtime | 65,926 | FastMod for EEHashTable (Faster virtual generics) | Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| EgorBo | "2022-02-27T11:34:01Z" | "2022-03-01T19:43:37Z" | a5a7836f8193414fe64232d5c386b6402eb70e19 | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | FastMod for EEHashTable (Faster virtual generics). Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| ./src/coreclr/vm/eehash.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
//
//emp
// File: eehash.h
//
// Provides hash table functionality needed in the EE - intended to be replaced later with better
// algorithms, but which have the same interface.
//
// Two requirements are:
//
// 1. Any number of threads can be reading the hash table while another thread is writing, without error.
// 2. Only one thread can write at a time.
// 3. When calling ReplaceValue(), a reader will get the old value, or the new value, but not something
// in between.
// 4. DeleteValue() is an unsafe operation - no other threads can be in the hash table when this happens.
//
#ifndef _EE_HASH_H
#define _EE_HASH_H
#include "exceptmacros.h"
#include "syncclean.hpp"
#include "util.hpp"
class AllocMemTracker;
class ClassLoader;
struct LockOwner;
class NameHandle;
class SigTypeContext;
// The "blob" you get to store in the hash table
typedef PTR_VOID HashDatum;
// The heap that you want the allocation to be done in
typedef void* AllocationHeap;
// One of these is present for each element in the table.
// Update the SIZEOF_EEHASH_ENTRY macro below if you change this
// struct
typedef struct EEHashEntry EEHashEntry_t;
typedef DPTR(EEHashEntry_t) PTR_EEHashEntry_t;
struct EEHashEntry
{
PTR_EEHashEntry_t pNext;
DWORD dwHashValue;
HashDatum Data;
BYTE Key[1]; // The key is stored inline
};
// The key[1] is a place holder for the key
// SIZEOF_EEHASH_ENTRY is the size of struct up to (and not including) the key
#define SIZEOF_EEHASH_ENTRY (offsetof(EEHashEntry,Key[0]))
// Struct to hold a client's iteration state
struct EEHashTableIteration;
class GCHeap;
// Generic hash table.
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
class EEHashTableBase
{
public:
BOOL Init(DWORD dwNumBuckets, LockOwner *pLock, AllocationHeap pHeap = 0,BOOL CheckThreadSafety = TRUE);
void InsertValue(KeyType pKey, HashDatum Data, BOOL bDeepCopyKey = bDefaultCopyIsDeep);
void InsertKeyAsValue(KeyType pKey, BOOL bDeepCopyKey = bDefaultCopyIsDeep);
BOOL DeleteValue(KeyType pKey);
BOOL ReplaceValue(KeyType pKey, HashDatum Data);
BOOL ReplaceKey(KeyType pOldKey, KeyType pNewKey);
void ClearHashTable();
void EmptyHashTable();
BOOL IsEmpty();
void Destroy();
// Reader functions. Please place any functions that can be called from the
// reader threads here.
BOOL GetValue(KeyType pKey, HashDatum *pData);
BOOL GetValue(KeyType pKey, HashDatum *pData, DWORD hashValue);
// A fast inlinable flavor of GetValue that can return false instead of the actual item
// if there is race with updating of the hashtable. Callers of GetValueSpeculative
// should fall back to the slow GetValue if GetValueSpeculative returns false.
// Assumes that we are in cooperative mode already. For performance-sensitive codepaths.
BOOL GetValueSpeculative(KeyType pKey, HashDatum *pData);
BOOL GetValueSpeculative(KeyType pKey, HashDatum *pData, DWORD hashValue);
DWORD GetHash(KeyType Key);
DWORD GetCount();
// Walk through all the entries in the hash table, in meaningless order, without any
// synchronization.
//
// IterateStart()
// while (IterateNext())
// IterateGetKey();
//
// This is guaranteed to be DeleteValue-friendly if you advance the iterator before
// deletig, i.e. if used in the following pattern:
//
// IterateStart();
// BOOL keepGoing = IterateNext();
// while(keepGoing)
// {
// key = IterateGetKey();
// keepGoing = IterateNext();
// ...
// DeleteValue(key);
// ..
// }
void IterateStart(EEHashTableIteration *pIter);
BOOL IterateNext(EEHashTableIteration *pIter);
KeyType IterateGetKey(EEHashTableIteration *pIter);
HashDatum IterateGetValue(EEHashTableIteration *pIter);
#ifdef _DEBUG
void SuppressSyncCheck()
{
LIMITED_METHOD_CONTRACT;
m_CheckThreadSafety=FALSE;
}
#endif
protected:
BOOL GrowHashTable();
EEHashEntry_t * FindItem(KeyType pKey);
EEHashEntry_t * FindItem(KeyType pKey, DWORD hashValue);
// A fast inlinable flavor of FindItem that can return null instead of the actual item
// if there is race with updating of the hashtable. Callers of FindItemSpeculative
// should fall back to the slow FindItem if FindItemSpeculative returns null.
// Assumes that we are in cooperative mode already. For performance-sensitive codepaths.
EEHashEntry_t * FindItemSpeculative(KeyType pKey, DWORD hashValue);
// Double buffer to fix the race condition of growhashtable (the update
// of m_pBuckets and m_dwNumBuckets has to be atomic, so we double buffer
// the structure and access it through a pointer, which can be updated
// atomically. The union is in order to not change the SOS macros.
struct BucketTable
{
DPTR(PTR_EEHashEntry_t) m_pBuckets; // Pointer to first entry for each bucket
DWORD m_dwNumBuckets;
} m_BucketTable[2];
typedef DPTR(BucketTable) PTR_BucketTable;
// In a function we MUST only read this value ONCE, as the writer thread can change
// the value asynchronously. We make this member volatile the compiler won't do copy propagation
// optimizations that can make this read happen more than once. Note that we only need
// this property for the readers. As they are the ones that can have
// this variable changed (note also that if the variable was enregistered we wouldn't
// have any problem)
// BE VERY CAREFUL WITH WHAT YOU DO WITH THIS VARIABLE AS USING IT BADLY CAN CAUSE
// RACING CONDITIONS
VolatilePtr<BucketTable, PTR_BucketTable> m_pVolatileBucketTable;
DWORD m_dwNumEntries;
AllocationHeap m_Heap;
Volatile<LONG> m_bGrowing;
#ifdef _DEBUG
LPVOID m_lockData;
FnLockOwner m_pfnLockOwner;
EEThreadId m_writerThreadId;
BOOL m_CheckThreadSafety;
#endif
#ifdef _DEBUG_IMPL
// A thread must own a lock for a hash if it is a writer.
BOOL OwnLock();
#endif // _DEBUG_IMPL
};
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
class EEHashTable : public EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>
{
public:
EEHashTable()
{
LIMITED_METHOD_CONTRACT;
this->m_BucketTable[0].m_pBuckets = NULL;
this->m_BucketTable[0].m_dwNumBuckets = 0;
this->m_BucketTable[1].m_pBuckets = NULL;
this->m_BucketTable[1].m_dwNumBuckets = 0;
#ifndef DACCESS_COMPILE
this->m_pVolatileBucketTable = NULL;
#endif
this->m_dwNumEntries = 0;
this->m_bGrowing = 0;
#ifdef _DEBUG
this->m_lockData = NULL;
this->m_pfnLockOwner = NULL;
#endif
}
~EEHashTable()
{
WRAPPER_NO_CONTRACT;
this->Destroy();
}
};
/* to be used as static variable - no constructor/destructor, assumes zero
initialized memory */
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
class EEHashTableStatic : public EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>
{
};
class EEIntHashTableHelper
{
public:
static EEHashEntry_t *AllocateEntry(int iKey, BOOL bDeepCopy, AllocationHeap pHeap = 0)
{
CONTRACTL
{
WRAPPER(THROWS);
WRAPPER(GC_NOTRIGGER);
INJECT_FAULT(return NULL;);
}
CONTRACTL_END
_ASSERTE(!bDeepCopy && "Deep copy is not supported by the EEPtrHashTableHelper");
EEHashEntry_t *pEntry = (EEHashEntry_t *) new (nothrow) BYTE[SIZEOF_EEHASH_ENTRY + sizeof(int)];
if (!pEntry)
return NULL;
*((int*) pEntry->Key) = iKey;
return pEntry;
}
static void DeleteEntry(EEHashEntry_t *pEntry, AllocationHeap pHeap = 0)
{
LIMITED_METHOD_CONTRACT;
// Delete the entry.
delete [] (BYTE*) pEntry;
}
static BOOL CompareKeys(EEHashEntry_t *pEntry, int iKey)
{
LIMITED_METHOD_CONTRACT;
return *((int*)pEntry->Key) == iKey;
}
static DWORD Hash(int iKey)
{
LIMITED_METHOD_CONTRACT;
return (DWORD)iKey;
}
static int GetKey(EEHashEntry_t *pEntry)
{
LIMITED_METHOD_CONTRACT;
return *((int*) pEntry->Key);
}
};
typedef EEHashTable<int, EEIntHashTableHelper, FALSE> EEIntHashTable;
typedef struct PtrPlusInt
{
void* pValue;
int iValue;
} *PPtrPlusInt;
class EEPtrPlusIntHashTableHelper
{
public:
static EEHashEntry_t *AllocateEntry(PtrPlusInt ppiKey, BOOL bDeepCopy, AllocationHeap pHeap = 0)
{
CONTRACTL
{
WRAPPER(THROWS);
WRAPPER(GC_NOTRIGGER);
INJECT_FAULT(return NULL;);
}
CONTRACTL_END
_ASSERTE(!bDeepCopy && "Deep copy is not supported by the EEPtrPlusIntHashTableHelper");
EEHashEntry_t *pEntry = (EEHashEntry_t *) new (nothrow) BYTE[SIZEOF_EEHASH_ENTRY + sizeof(PtrPlusInt)];
if (!pEntry)
return NULL;
*((PPtrPlusInt) pEntry->Key) = ppiKey;
return pEntry;
}
static void DeleteEntry(EEHashEntry_t *pEntry, AllocationHeap pHeap = 0)
{
LIMITED_METHOD_CONTRACT;
// Delete the entry.
delete [] (BYTE*) pEntry;
}
static BOOL CompareKeys(EEHashEntry_t *pEntry, PtrPlusInt ppiKey)
{
LIMITED_METHOD_CONTRACT;
return (((PPtrPlusInt)pEntry->Key)->pValue == ppiKey.pValue) &&
(((PPtrPlusInt)pEntry->Key)->iValue == ppiKey.iValue);
}
static DWORD Hash(PtrPlusInt ppiKey)
{
LIMITED_METHOD_CONTRACT;
return (DWORD)ppiKey.iValue ^
#ifdef TARGET_X86
(DWORD)(size_t) ppiKey.pValue;
#else
// <TODO> IA64: Is this a good hashing mechanism on IA64?</TODO>
(DWORD)(((size_t) ppiKey.pValue) >> 3);
#endif
}
static PtrPlusInt GetKey(EEHashEntry_t *pEntry)
{
LIMITED_METHOD_CONTRACT;
return *((PPtrPlusInt) pEntry->Key);
}
};
typedef EEHashTable<PtrPlusInt, EEPtrPlusIntHashTableHelper, FALSE> EEPtrPlusIntHashTable;
// UTF8 string hash table. The UTF8 strings are NULL terminated.
class EEUtf8HashTableHelper
{
public:
static EEHashEntry_t * AllocateEntry(LPCUTF8 pKey, BOOL bDeepCopy, AllocationHeap Heap);
static void DeleteEntry(EEHashEntry_t *pEntry, AllocationHeap Heap);
static BOOL CompareKeys(EEHashEntry_t *pEntry, LPCUTF8 pKey);
static DWORD Hash(LPCUTF8 pKey);
static LPCUTF8 GetKey(EEHashEntry_t *pEntry);
};
typedef EEHashTable<LPCUTF8, EEUtf8HashTableHelper, TRUE> EEUtf8StringHashTable;
typedef DPTR(EEUtf8StringHashTable) PTR_EEUtf8StringHashTable;
// Unicode String hash table - the keys are UNICODE strings which may
// contain embedded nulls. An EEStringData struct is used for the key
// which contains the length of the item. Note that this string is
// not necessarily null terminated and should never be treated as such.
const DWORD ONLY_LOW_CHARS_MASK = 0x80000000;
class EEStringData
{
private:
LPCWSTR szString; // The string data.
DWORD cch; // Characters in the string.
#ifdef _DEBUG
BOOL bDebugOnlyLowChars; // Does the string contain only characters less than 0x80?
DWORD dwDebugCch;
#endif // _DEBUG
public:
// explicilty initialize cch to 0 because SetCharCount uses cch
EEStringData() : cch(0)
{
LIMITED_METHOD_CONTRACT;
SetStringBuffer(NULL);
SetCharCount(0);
SetIsOnlyLowChars(FALSE);
};
EEStringData(DWORD cchString, LPCWSTR str) : cch(0)
{
LIMITED_METHOD_CONTRACT;
SetStringBuffer(str);
SetCharCount(cchString);
SetIsOnlyLowChars(FALSE);
};
EEStringData(DWORD cchString, LPCWSTR str, BOOL onlyLow) : cch(0)
{
LIMITED_METHOD_CONTRACT;
SetStringBuffer(str);
SetCharCount(cchString);
SetIsOnlyLowChars(onlyLow);
};
inline ULONG GetCharCount() const
{
LIMITED_METHOD_CONTRACT;
_ASSERTE ((cch & ~ONLY_LOW_CHARS_MASK) == dwDebugCch);
return (cch & ~ONLY_LOW_CHARS_MASK);
}
inline void SetCharCount(ULONG _cch)
{
LIMITED_METHOD_CONTRACT;
#ifdef _DEBUG
dwDebugCch = _cch;
#endif // _DEBUG
cch = ((DWORD)_cch) | (cch & ONLY_LOW_CHARS_MASK);
}
inline LPCWSTR GetStringBuffer() const
{
LIMITED_METHOD_CONTRACT;
return (szString);
}
inline void SetStringBuffer(LPCWSTR _szString)
{
LIMITED_METHOD_CONTRACT;
szString = _szString;
}
inline BOOL GetIsOnlyLowChars() const
{
LIMITED_METHOD_CONTRACT;
_ASSERTE(bDebugOnlyLowChars == ((cch & ONLY_LOW_CHARS_MASK) ? TRUE : FALSE));
return ((cch & ONLY_LOW_CHARS_MASK) ? TRUE : FALSE);
}
inline void SetIsOnlyLowChars(BOOL bIsOnlyLowChars)
{
LIMITED_METHOD_CONTRACT;
#ifdef _DEBUG
bDebugOnlyLowChars = bIsOnlyLowChars;
#endif // _DEBUG
bIsOnlyLowChars ? (cch |= ONLY_LOW_CHARS_MASK) : (cch &= ~ONLY_LOW_CHARS_MASK);
}
};
class EEUnicodeHashTableHelper
{
public:
static EEHashEntry_t * AllocateEntry(EEStringData *pKey, BOOL bDeepCopy, AllocationHeap Heap);
static void DeleteEntry(EEHashEntry_t *pEntry, AllocationHeap Heap);
static BOOL CompareKeys(EEHashEntry_t *pEntry, EEStringData *pKey);
static DWORD Hash(EEStringData *pKey);
static EEStringData * GetKey(EEHashEntry_t *pEntry);
static void ReplaceKey(EEHashEntry_t *pEntry, EEStringData *pNewKey);
};
typedef EEHashTable<EEStringData *, EEUnicodeHashTableHelper, TRUE> EEUnicodeStringHashTable;
class EEUnicodeStringLiteralHashTableHelper
{
public:
static EEHashEntry_t * AllocateEntry(EEStringData *pKey, BOOL bDeepCopy, AllocationHeap Heap);
static void DeleteEntry(EEHashEntry_t *pEntry, AllocationHeap Heap);
static BOOL CompareKeys(EEHashEntry_t *pEntry, EEStringData *pKey);
static DWORD Hash(EEStringData *pKey);
static void ReplaceKey(EEHashEntry_t *pEntry, EEStringData *pNewKey);
};
typedef EEHashTable<EEStringData *, EEUnicodeStringLiteralHashTableHelper, TRUE> EEUnicodeStringLiteralHashTable;
// Generic pointer hash table helper.
template <class KeyPointerType>
class EEPtrHashTableHelper
{
public:
static EEHashEntry_t *AllocateEntry(KeyPointerType pKey, BOOL bDeepCopy, AllocationHeap Heap)
{
CONTRACTL
{
WRAPPER(THROWS);
WRAPPER(GC_NOTRIGGER);
INJECT_FAULT(return FALSE;);
}
CONTRACTL_END
_ASSERTE(!bDeepCopy && "Deep copy is not supported by the EEPtrHashTableHelper");
_ASSERTE(sizeof(KeyPointerType) == sizeof(void *) && "KeyPointerType must be a pointer type");
EEHashEntry_t *pEntry = (EEHashEntry_t *) new (nothrow) BYTE[SIZEOF_EEHASH_ENTRY + sizeof(KeyPointerType)];
if (!pEntry)
return NULL;
*((KeyPointerType*)pEntry->Key) = pKey;
return pEntry;
}
static void DeleteEntry(EEHashEntry_t *pEntry, AllocationHeap Heap)
{
LIMITED_METHOD_CONTRACT;
// Delete the entry.
delete [] (BYTE*) pEntry;
}
static BOOL CompareKeys(EEHashEntry_t *pEntry, KeyPointerType pKey)
{
LIMITED_METHOD_CONTRACT;
SUPPORTS_DAC;
KeyPointerType pEntryKey = *((KeyPointerType*)pEntry->Key);
return pEntryKey == pKey;
}
static DWORD Hash(KeyPointerType pKey)
{
LIMITED_METHOD_CONTRACT;
SUPPORTS_DAC;
#ifdef TARGET_X86
return (DWORD)(size_t) dac_cast<TADDR>(pKey);
#else
// <TODO> IA64: Is this a good hashing mechanism on IA64?</TODO>
return (DWORD)(((size_t) dac_cast<TADDR>(pKey)) >> 3);
#endif
}
static KeyPointerType GetKey(EEHashEntry_t *pEntry)
{
LIMITED_METHOD_CONTRACT;
return *((KeyPointerType*)pEntry->Key);
}
};
typedef EEHashTable<PTR_VOID, EEPtrHashTableHelper<PTR_VOID>, FALSE> EEPtrHashTable;
typedef DPTR(EEPtrHashTable) PTR_EEPtrHashTable;
// Define a hash of generic instantiations (represented by a SigTypeContext).
class EEInstantiationHashTableHelper
{
public:
static EEHashEntry_t *AllocateEntry(const SigTypeContext *pKey, BOOL bDeepCopy, AllocationHeap pHeap = 0);
static void DeleteEntry(EEHashEntry_t *pEntry, AllocationHeap pHeap = 0);
static BOOL CompareKeys(EEHashEntry_t *pEntry, const SigTypeContext *pKey);
static DWORD Hash(const SigTypeContext *pKey);
static const SigTypeContext *GetKey(EEHashEntry_t *pEntry);
};
typedef EEHashTable<const SigTypeContext*, EEInstantiationHashTableHelper, FALSE> EEInstantiationHashTable;
// ComComponentInfo hashtable.
struct ClassFactoryInfo
{
GUID m_clsid;
PCWSTR m_strServerName;
};
class EEClassFactoryInfoHashTableHelper
{
public:
static EEHashEntry_t *AllocateEntry(ClassFactoryInfo *pKey, BOOL bDeepCopy, AllocationHeap Heap);
static void DeleteEntry(EEHashEntry_t *pEntry, AllocationHeap Heap);
static BOOL CompareKeys(EEHashEntry_t *pEntry, ClassFactoryInfo *pKey);
static DWORD Hash(ClassFactoryInfo *pKey);
static ClassFactoryInfo *GetKey(EEHashEntry_t *pEntry);
};
typedef EEHashTable<ClassFactoryInfo *, EEClassFactoryInfoHashTableHelper, TRUE> EEClassFactoryInfoHashTable;
// Struct to hold a client's iteration state
struct EEHashTableIteration
{
DWORD m_dwBucket;
EEHashEntry_t *m_pEntry;
#ifdef _DEBUG
void *m_pTable;
#endif
};
#endif /* _EE_HASH_H */
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
//
//emp
// File: eehash.h
//
// Provides hash table functionality needed in the EE - intended to be replaced later with better
// algorithms, but which have the same interface.
//
// Two requirements are:
//
// 1. Any number of threads can be reading the hash table while another thread is writing, without error.
// 2. Only one thread can write at a time.
// 3. When calling ReplaceValue(), a reader will get the old value, or the new value, but not something
// in between.
// 4. DeleteValue() is an unsafe operation - no other threads can be in the hash table when this happens.
//
#ifndef _EE_HASH_H
#define _EE_HASH_H
#include "exceptmacros.h"
#include "syncclean.hpp"
#include "util.hpp"
class AllocMemTracker;
class ClassLoader;
struct LockOwner;
class NameHandle;
class SigTypeContext;
// The "blob" you get to store in the hash table
typedef PTR_VOID HashDatum;
// The heap that you want the allocation to be done in
typedef void* AllocationHeap;
// One of these is present for each element in the table.
// Update the SIZEOF_EEHASH_ENTRY macro below if you change this
// struct
typedef struct EEHashEntry EEHashEntry_t;
typedef DPTR(EEHashEntry_t) PTR_EEHashEntry_t;
struct EEHashEntry
{
PTR_EEHashEntry_t pNext;
DWORD dwHashValue;
HashDatum Data;
BYTE Key[1]; // The key is stored inline
};
// The key[1] is a place holder for the key
// SIZEOF_EEHASH_ENTRY is the size of struct up to (and not including) the key
#define SIZEOF_EEHASH_ENTRY (offsetof(EEHashEntry,Key[0]))
// Struct to hold a client's iteration state
struct EEHashTableIteration;
class GCHeap;
// Generic hash table.
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
class EEHashTableBase
{
public:
BOOL Init(DWORD dwNumBuckets, LockOwner *pLock, AllocationHeap pHeap = 0,BOOL CheckThreadSafety = TRUE);
void InsertValue(KeyType pKey, HashDatum Data, BOOL bDeepCopyKey = bDefaultCopyIsDeep);
void InsertKeyAsValue(KeyType pKey, BOOL bDeepCopyKey = bDefaultCopyIsDeep);
BOOL DeleteValue(KeyType pKey);
BOOL ReplaceValue(KeyType pKey, HashDatum Data);
BOOL ReplaceKey(KeyType pOldKey, KeyType pNewKey);
void ClearHashTable();
void EmptyHashTable();
BOOL IsEmpty();
void Destroy();
// Reader functions. Please place any functions that can be called from the
// reader threads here.
BOOL GetValue(KeyType pKey, HashDatum *pData);
BOOL GetValue(KeyType pKey, HashDatum *pData, DWORD hashValue);
// A fast inlinable flavor of GetValue that can return false instead of the actual item
// if there is race with updating of the hashtable. Callers of GetValueSpeculative
// should fall back to the slow GetValue if GetValueSpeculative returns false.
// Assumes that we are in cooperative mode already. For performance-sensitive codepaths.
BOOL GetValueSpeculative(KeyType pKey, HashDatum *pData);
BOOL GetValueSpeculative(KeyType pKey, HashDatum *pData, DWORD hashValue);
DWORD GetHash(KeyType Key);
DWORD GetCount();
// Walk through all the entries in the hash table, in meaningless order, without any
// synchronization.
//
// IterateStart()
// while (IterateNext())
// IterateGetKey();
//
// This is guaranteed to be DeleteValue-friendly if you advance the iterator before
// deletig, i.e. if used in the following pattern:
//
// IterateStart();
// BOOL keepGoing = IterateNext();
// while(keepGoing)
// {
// key = IterateGetKey();
// keepGoing = IterateNext();
// ...
// DeleteValue(key);
// ..
// }
void IterateStart(EEHashTableIteration *pIter);
BOOL IterateNext(EEHashTableIteration *pIter);
KeyType IterateGetKey(EEHashTableIteration *pIter);
HashDatum IterateGetValue(EEHashTableIteration *pIter);
#ifdef _DEBUG
void SuppressSyncCheck()
{
LIMITED_METHOD_CONTRACT;
m_CheckThreadSafety=FALSE;
}
#endif
protected:
BOOL GrowHashTable();
EEHashEntry_t * FindItem(KeyType pKey);
EEHashEntry_t * FindItem(KeyType pKey, DWORD hashValue);
// A fast inlinable flavor of FindItem that can return null instead of the actual item
// if there is race with updating of the hashtable. Callers of FindItemSpeculative
// should fall back to the slow FindItem if FindItemSpeculative returns null.
// Assumes that we are in cooperative mode already. For performance-sensitive codepaths.
EEHashEntry_t * FindItemSpeculative(KeyType pKey, DWORD hashValue);
// Double buffer to fix the race condition of growhashtable (the update
// of m_pBuckets and m_dwNumBuckets has to be atomic, so we double buffer
// the structure and access it through a pointer, which can be updated
// atomically. The union is in order to not change the SOS macros.
struct BucketTable
{
DPTR(PTR_EEHashEntry_t) m_pBuckets; // Pointer to first entry for each bucket
DWORD m_dwNumBuckets;
#ifdef TARGET_64BIT
UINT64 m_dwNumBucketsMul; // "Fast Mod" multiplier for "X % m_dwNumBuckets"
#endif
} m_BucketTable[2];
typedef DPTR(BucketTable) PTR_BucketTable;
// In a function we MUST only read this value ONCE, as the writer thread can change
// the value asynchronously. We make this member volatile the compiler won't do copy propagation
// optimizations that can make this read happen more than once. Note that we only need
// this property for the readers. As they are the ones that can have
// this variable changed (note also that if the variable was enregistered we wouldn't
// have any problem)
// BE VERY CAREFUL WITH WHAT YOU DO WITH THIS VARIABLE AS USING IT BADLY CAN CAUSE
// RACING CONDITIONS
VolatilePtr<BucketTable, PTR_BucketTable> m_pVolatileBucketTable;
DWORD m_dwNumEntries;
AllocationHeap m_Heap;
Volatile<LONG> m_bGrowing;
#ifdef _DEBUG
LPVOID m_lockData;
FnLockOwner m_pfnLockOwner;
EEThreadId m_writerThreadId;
BOOL m_CheckThreadSafety;
#endif
#ifdef _DEBUG_IMPL
// A thread must own a lock for a hash if it is a writer.
BOOL OwnLock();
#endif // _DEBUG_IMPL
};
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
class EEHashTable : public EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>
{
public:
EEHashTable()
{
LIMITED_METHOD_CONTRACT;
this->m_BucketTable[0].m_pBuckets = NULL;
this->m_BucketTable[0].m_dwNumBuckets = 0;
this->m_BucketTable[1].m_pBuckets = NULL;
this->m_BucketTable[1].m_dwNumBuckets = 0;
#ifdef TARGET_64BIT
this->m_BucketTable[0].m_dwNumBucketsMul = 0;
this->m_BucketTable[1].m_dwNumBucketsMul = 0;
#endif
#ifndef DACCESS_COMPILE
this->m_pVolatileBucketTable = NULL;
#endif
this->m_dwNumEntries = 0;
this->m_bGrowing = 0;
#ifdef _DEBUG
this->m_lockData = NULL;
this->m_pfnLockOwner = NULL;
#endif
}
~EEHashTable()
{
WRAPPER_NO_CONTRACT;
this->Destroy();
}
};
/* to be used as static variable - no constructor/destructor, assumes zero
initialized memory */
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
class EEHashTableStatic : public EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>
{
};
class EEIntHashTableHelper
{
public:
static EEHashEntry_t *AllocateEntry(int iKey, BOOL bDeepCopy, AllocationHeap pHeap = 0)
{
CONTRACTL
{
WRAPPER(THROWS);
WRAPPER(GC_NOTRIGGER);
INJECT_FAULT(return NULL;);
}
CONTRACTL_END
_ASSERTE(!bDeepCopy && "Deep copy is not supported by the EEPtrHashTableHelper");
EEHashEntry_t *pEntry = (EEHashEntry_t *) new (nothrow) BYTE[SIZEOF_EEHASH_ENTRY + sizeof(int)];
if (!pEntry)
return NULL;
*((int*) pEntry->Key) = iKey;
return pEntry;
}
static void DeleteEntry(EEHashEntry_t *pEntry, AllocationHeap pHeap = 0)
{
LIMITED_METHOD_CONTRACT;
// Delete the entry.
delete [] (BYTE*) pEntry;
}
static BOOL CompareKeys(EEHashEntry_t *pEntry, int iKey)
{
LIMITED_METHOD_CONTRACT;
return *((int*)pEntry->Key) == iKey;
}
static DWORD Hash(int iKey)
{
LIMITED_METHOD_CONTRACT;
return (DWORD)iKey;
}
static int GetKey(EEHashEntry_t *pEntry)
{
LIMITED_METHOD_CONTRACT;
return *((int*) pEntry->Key);
}
};
typedef EEHashTable<int, EEIntHashTableHelper, FALSE> EEIntHashTable;
typedef struct PtrPlusInt
{
void* pValue;
int iValue;
} *PPtrPlusInt;
class EEPtrPlusIntHashTableHelper
{
public:
static EEHashEntry_t *AllocateEntry(PtrPlusInt ppiKey, BOOL bDeepCopy, AllocationHeap pHeap = 0)
{
CONTRACTL
{
WRAPPER(THROWS);
WRAPPER(GC_NOTRIGGER);
INJECT_FAULT(return NULL;);
}
CONTRACTL_END
_ASSERTE(!bDeepCopy && "Deep copy is not supported by the EEPtrPlusIntHashTableHelper");
EEHashEntry_t *pEntry = (EEHashEntry_t *) new (nothrow) BYTE[SIZEOF_EEHASH_ENTRY + sizeof(PtrPlusInt)];
if (!pEntry)
return NULL;
*((PPtrPlusInt) pEntry->Key) = ppiKey;
return pEntry;
}
static void DeleteEntry(EEHashEntry_t *pEntry, AllocationHeap pHeap = 0)
{
LIMITED_METHOD_CONTRACT;
// Delete the entry.
delete [] (BYTE*) pEntry;
}
static BOOL CompareKeys(EEHashEntry_t *pEntry, PtrPlusInt ppiKey)
{
LIMITED_METHOD_CONTRACT;
return (((PPtrPlusInt)pEntry->Key)->pValue == ppiKey.pValue) &&
(((PPtrPlusInt)pEntry->Key)->iValue == ppiKey.iValue);
}
static DWORD Hash(PtrPlusInt ppiKey)
{
LIMITED_METHOD_CONTRACT;
return (DWORD)ppiKey.iValue ^
#ifdef TARGET_X86
(DWORD)(size_t) ppiKey.pValue;
#else
// <TODO> IA64: Is this a good hashing mechanism on IA64?</TODO>
(DWORD)(((size_t) ppiKey.pValue) >> 3);
#endif
}
static PtrPlusInt GetKey(EEHashEntry_t *pEntry)
{
LIMITED_METHOD_CONTRACT;
return *((PPtrPlusInt) pEntry->Key);
}
};
typedef EEHashTable<PtrPlusInt, EEPtrPlusIntHashTableHelper, FALSE> EEPtrPlusIntHashTable;
// UTF8 string hash table. The UTF8 strings are NULL terminated.
class EEUtf8HashTableHelper
{
public:
static EEHashEntry_t * AllocateEntry(LPCUTF8 pKey, BOOL bDeepCopy, AllocationHeap Heap);
static void DeleteEntry(EEHashEntry_t *pEntry, AllocationHeap Heap);
static BOOL CompareKeys(EEHashEntry_t *pEntry, LPCUTF8 pKey);
static DWORD Hash(LPCUTF8 pKey);
static LPCUTF8 GetKey(EEHashEntry_t *pEntry);
};
typedef EEHashTable<LPCUTF8, EEUtf8HashTableHelper, TRUE> EEUtf8StringHashTable;
typedef DPTR(EEUtf8StringHashTable) PTR_EEUtf8StringHashTable;
// Unicode String hash table - the keys are UNICODE strings which may
// contain embedded nulls. An EEStringData struct is used for the key
// which contains the length of the item. Note that this string is
// not necessarily null terminated and should never be treated as such.
const DWORD ONLY_LOW_CHARS_MASK = 0x80000000;
class EEStringData
{
private:
LPCWSTR szString; // The string data.
DWORD cch; // Characters in the string.
#ifdef _DEBUG
BOOL bDebugOnlyLowChars; // Does the string contain only characters less than 0x80?
DWORD dwDebugCch;
#endif // _DEBUG
public:
// explicilty initialize cch to 0 because SetCharCount uses cch
EEStringData() : cch(0)
{
LIMITED_METHOD_CONTRACT;
SetStringBuffer(NULL);
SetCharCount(0);
SetIsOnlyLowChars(FALSE);
};
EEStringData(DWORD cchString, LPCWSTR str) : cch(0)
{
LIMITED_METHOD_CONTRACT;
SetStringBuffer(str);
SetCharCount(cchString);
SetIsOnlyLowChars(FALSE);
};
EEStringData(DWORD cchString, LPCWSTR str, BOOL onlyLow) : cch(0)
{
LIMITED_METHOD_CONTRACT;
SetStringBuffer(str);
SetCharCount(cchString);
SetIsOnlyLowChars(onlyLow);
};
inline ULONG GetCharCount() const
{
LIMITED_METHOD_CONTRACT;
_ASSERTE ((cch & ~ONLY_LOW_CHARS_MASK) == dwDebugCch);
return (cch & ~ONLY_LOW_CHARS_MASK);
}
inline void SetCharCount(ULONG _cch)
{
LIMITED_METHOD_CONTRACT;
#ifdef _DEBUG
dwDebugCch = _cch;
#endif // _DEBUG
cch = ((DWORD)_cch) | (cch & ONLY_LOW_CHARS_MASK);
}
inline LPCWSTR GetStringBuffer() const
{
LIMITED_METHOD_CONTRACT;
return (szString);
}
inline void SetStringBuffer(LPCWSTR _szString)
{
LIMITED_METHOD_CONTRACT;
szString = _szString;
}
inline BOOL GetIsOnlyLowChars() const
{
LIMITED_METHOD_CONTRACT;
_ASSERTE(bDebugOnlyLowChars == ((cch & ONLY_LOW_CHARS_MASK) ? TRUE : FALSE));
return ((cch & ONLY_LOW_CHARS_MASK) ? TRUE : FALSE);
}
inline void SetIsOnlyLowChars(BOOL bIsOnlyLowChars)
{
LIMITED_METHOD_CONTRACT;
#ifdef _DEBUG
bDebugOnlyLowChars = bIsOnlyLowChars;
#endif // _DEBUG
bIsOnlyLowChars ? (cch |= ONLY_LOW_CHARS_MASK) : (cch &= ~ONLY_LOW_CHARS_MASK);
}
};
class EEUnicodeHashTableHelper
{
public:
static EEHashEntry_t * AllocateEntry(EEStringData *pKey, BOOL bDeepCopy, AllocationHeap Heap);
static void DeleteEntry(EEHashEntry_t *pEntry, AllocationHeap Heap);
static BOOL CompareKeys(EEHashEntry_t *pEntry, EEStringData *pKey);
static DWORD Hash(EEStringData *pKey);
static EEStringData * GetKey(EEHashEntry_t *pEntry);
static void ReplaceKey(EEHashEntry_t *pEntry, EEStringData *pNewKey);
};
typedef EEHashTable<EEStringData *, EEUnicodeHashTableHelper, TRUE> EEUnicodeStringHashTable;
class EEUnicodeStringLiteralHashTableHelper
{
public:
static EEHashEntry_t * AllocateEntry(EEStringData *pKey, BOOL bDeepCopy, AllocationHeap Heap);
static void DeleteEntry(EEHashEntry_t *pEntry, AllocationHeap Heap);
static BOOL CompareKeys(EEHashEntry_t *pEntry, EEStringData *pKey);
static DWORD Hash(EEStringData *pKey);
static void ReplaceKey(EEHashEntry_t *pEntry, EEStringData *pNewKey);
};
typedef EEHashTable<EEStringData *, EEUnicodeStringLiteralHashTableHelper, TRUE> EEUnicodeStringLiteralHashTable;
// Generic pointer hash table helper.
template <class KeyPointerType>
class EEPtrHashTableHelper
{
public:
static EEHashEntry_t *AllocateEntry(KeyPointerType pKey, BOOL bDeepCopy, AllocationHeap Heap)
{
CONTRACTL
{
WRAPPER(THROWS);
WRAPPER(GC_NOTRIGGER);
INJECT_FAULT(return FALSE;);
}
CONTRACTL_END
_ASSERTE(!bDeepCopy && "Deep copy is not supported by the EEPtrHashTableHelper");
_ASSERTE(sizeof(KeyPointerType) == sizeof(void *) && "KeyPointerType must be a pointer type");
EEHashEntry_t *pEntry = (EEHashEntry_t *) new (nothrow) BYTE[SIZEOF_EEHASH_ENTRY + sizeof(KeyPointerType)];
if (!pEntry)
return NULL;
*((KeyPointerType*)pEntry->Key) = pKey;
return pEntry;
}
static void DeleteEntry(EEHashEntry_t *pEntry, AllocationHeap Heap)
{
LIMITED_METHOD_CONTRACT;
// Delete the entry.
delete [] (BYTE*) pEntry;
}
static BOOL CompareKeys(EEHashEntry_t *pEntry, KeyPointerType pKey)
{
LIMITED_METHOD_CONTRACT;
SUPPORTS_DAC;
KeyPointerType pEntryKey = *((KeyPointerType*)pEntry->Key);
return pEntryKey == pKey;
}
static DWORD Hash(KeyPointerType pKey)
{
LIMITED_METHOD_CONTRACT;
SUPPORTS_DAC;
#ifdef TARGET_X86
return (DWORD)(size_t) dac_cast<TADDR>(pKey);
#else
// <TODO> IA64: Is this a good hashing mechanism on IA64?</TODO>
return (DWORD)(((size_t) dac_cast<TADDR>(pKey)) >> 3);
#endif
}
static KeyPointerType GetKey(EEHashEntry_t *pEntry)
{
LIMITED_METHOD_CONTRACT;
return *((KeyPointerType*)pEntry->Key);
}
};
typedef EEHashTable<PTR_VOID, EEPtrHashTableHelper<PTR_VOID>, FALSE> EEPtrHashTable;
typedef DPTR(EEPtrHashTable) PTR_EEPtrHashTable;
// Define a hash of generic instantiations (represented by a SigTypeContext).
class EEInstantiationHashTableHelper
{
public:
static EEHashEntry_t *AllocateEntry(const SigTypeContext *pKey, BOOL bDeepCopy, AllocationHeap pHeap = 0);
static void DeleteEntry(EEHashEntry_t *pEntry, AllocationHeap pHeap = 0);
static BOOL CompareKeys(EEHashEntry_t *pEntry, const SigTypeContext *pKey);
static DWORD Hash(const SigTypeContext *pKey);
static const SigTypeContext *GetKey(EEHashEntry_t *pEntry);
};
typedef EEHashTable<const SigTypeContext*, EEInstantiationHashTableHelper, FALSE> EEInstantiationHashTable;
// ComComponentInfo hashtable.
struct ClassFactoryInfo
{
GUID m_clsid;
PCWSTR m_strServerName;
};
class EEClassFactoryInfoHashTableHelper
{
public:
static EEHashEntry_t *AllocateEntry(ClassFactoryInfo *pKey, BOOL bDeepCopy, AllocationHeap Heap);
static void DeleteEntry(EEHashEntry_t *pEntry, AllocationHeap Heap);
static BOOL CompareKeys(EEHashEntry_t *pEntry, ClassFactoryInfo *pKey);
static DWORD Hash(ClassFactoryInfo *pKey);
static ClassFactoryInfo *GetKey(EEHashEntry_t *pEntry);
};
typedef EEHashTable<ClassFactoryInfo *, EEClassFactoryInfoHashTableHelper, TRUE> EEClassFactoryInfoHashTable;
// Struct to hold a client's iteration state
struct EEHashTableIteration
{
DWORD m_dwBucket;
EEHashEntry_t *m_pEntry;
#ifdef _DEBUG
void *m_pTable;
#endif
};
#endif /* _EE_HASH_H */
| 1 |
dotnet/runtime | 65,926 | FastMod for EEHashTable (Faster virtual generics) | Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| EgorBo | "2022-02-27T11:34:01Z" | "2022-03-01T19:43:37Z" | a5a7836f8193414fe64232d5c386b6402eb70e19 | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | FastMod for EEHashTable (Faster virtual generics). Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| ./src/coreclr/vm/eehash.inl | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
#ifndef _EE_HASH_INL
#define _EE_HASH_INL
#ifdef _DEBUG_IMPL
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
BOOL EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::OwnLock()
{
WRAPPER_NO_CONTRACT;
if (m_CheckThreadSafety == FALSE)
return TRUE;
if (m_pfnLockOwner == NULL) {
return m_writerThreadId.IsCurrentThread();
}
else {
BOOL ret = m_pfnLockOwner(m_lockData);
if (!ret) {
if (Debug_IsLockedViaThreadSuspension()) {
ret = TRUE;
}
}
return ret;
}
}
#endif // _DEBUG_IMPL
#ifndef DACCESS_COMPILE
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
void EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::Destroy()
{
CONTRACTL
{
WRAPPER(THROWS);
WRAPPER(GC_NOTRIGGER);
FORBID_FAULT;
}
CONTRACTL_END
if (m_pVolatileBucketTable && m_pVolatileBucketTable->m_pBuckets != NULL)
{
DWORD i;
for (i = 0; i < m_pVolatileBucketTable->m_dwNumBuckets; i++)
{
EEHashEntry_t *pEntry, *pNext;
for (pEntry = m_pVolatileBucketTable->m_pBuckets[i]; pEntry != NULL; pEntry = pNext)
{
pNext = pEntry->pNext;
Helper::DeleteEntry(pEntry, m_Heap);
}
}
delete[] (m_pVolatileBucketTable->m_pBuckets-1);
m_pVolatileBucketTable = NULL;
}
}
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
void EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::ClearHashTable()
{
CONTRACTL
{
WRAPPER(THROWS);
WRAPPER(GC_NOTRIGGER);
FORBID_FAULT;
}
CONTRACTL_END
//_ASSERTE (OwnLock());
// Transition to COOP mode. This is need because EEHashTable is lock free and it can be read
// from multiple threads without taking locks. On rehash, you want to get rid of the old copy
// of table. You can only get rid of it once nobody is using it. That's a problem because
// there is no lock to tell when the last reader stopped using the old copy of the table.
// The solution to this problem is to access the table in cooperative mode, and to get rid of
// the old copy of the table when we are suspended for GC. When we are suspended for GC,
// we know that nobody is using the old copy of the table anymore.
// BROKEN: This is called sometimes from the CorMap hash before the EE is started up
GCX_COOP_NO_THREAD_BROKEN();
if (m_pVolatileBucketTable->m_pBuckets != NULL)
{
DWORD i;
for (i = 0; i < m_pVolatileBucketTable->m_dwNumBuckets; i++)
{
EEHashEntry_t *pEntry, *pNext;
for (pEntry = m_pVolatileBucketTable->m_pBuckets[i]; pEntry != NULL; pEntry = pNext)
{
pNext = pEntry->pNext;
Helper::DeleteEntry(pEntry, m_Heap);
}
}
delete[] (m_pVolatileBucketTable->m_pBuckets-1);
m_pVolatileBucketTable->m_pBuckets = NULL;
}
m_pVolatileBucketTable->m_dwNumBuckets = 0;
m_dwNumEntries = 0;
}
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
void EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::EmptyHashTable()
{
CONTRACTL
{
WRAPPER(THROWS);
WRAPPER(GC_NOTRIGGER);
FORBID_FAULT;
}
CONTRACTL_END
_ASSERTE (OwnLock());
// Transition to COOP mode. This is need because EEHashTable is lock free and it can be read
// from multiple threads without taking locks. On rehash, you want to get rid of the old copy
// of table. You can only get rid of it once nobody is using it. That's a problem because
// there is no lock to tell when the last reader stopped using the old copy of the table.
// The solution to this problem is to access the table in cooperative mode, and to get rid of
// the old copy of the table when we are suspended for GC. When we are suspended for GC,
// we know that nobody is using the old copy of the table anymore.
// BROKEN: This is called sometimes from the CorMap hash before the EE is started up
GCX_COOP_NO_THREAD_BROKEN();
if (m_pVolatileBucketTable->m_pBuckets != NULL)
{
DWORD i;
for (i = 0; i < m_pVolatileBucketTable->m_dwNumBuckets; i++)
{
EEHashEntry_t *pEntry, *pNext;
for (pEntry = m_pVolatileBucketTable->m_pBuckets[i]; pEntry != NULL; pEntry = pNext)
{
pNext = pEntry->pNext;
Helper::DeleteEntry(pEntry, m_Heap);
}
m_pVolatileBucketTable->m_pBuckets[i] = NULL;
}
}
m_dwNumEntries = 0;
}
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
BOOL EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::Init(DWORD dwNumBuckets, LockOwner *pLock, AllocationHeap pHeap, BOOL CheckThreadSafety)
{
CONTRACTL
{
WRAPPER(NOTHROW);
WRAPPER(GC_NOTRIGGER);
INJECT_FAULT(return FALSE;);
#ifndef DACCESS_COMPILE
PRECONDITION(m_pVolatileBucketTable.Load() == NULL && "EEHashTable::Init() called twice.");
#endif
}
CONTRACTL_END
m_pVolatileBucketTable = &m_BucketTable[0];
DWORD dwNumBucketsPlusOne;
// Prefast overflow sanity check the addition
if (!ClrSafeInt<DWORD>::addition(dwNumBuckets, 1, dwNumBucketsPlusOne))
return FALSE;
S_SIZE_T safeSize(sizeof(EEHashEntry_t *));
safeSize *= dwNumBucketsPlusOne;
if (safeSize.IsOverflow())
ThrowHR(COR_E_OVERFLOW);
SIZE_T cbAlloc = safeSize.Value();
m_pVolatileBucketTable->m_pBuckets = (EEHashEntry_t **) new (nothrow) BYTE[cbAlloc];
if (m_pVolatileBucketTable->m_pBuckets == NULL)
return FALSE;
memset(m_pVolatileBucketTable->m_pBuckets, 0, cbAlloc);
// The first slot links to the next list.
m_pVolatileBucketTable->m_pBuckets++;
m_pVolatileBucketTable->m_dwNumBuckets = dwNumBuckets;
m_Heap = pHeap;
#ifdef _DEBUG
if (pLock == NULL) {
m_lockData = NULL;
m_pfnLockOwner = NULL;
}
else {
m_lockData = pLock->lock;
m_pfnLockOwner = pLock->lockOwnerFunc;
}
if (m_pfnLockOwner == NULL) {
m_writerThreadId.SetToCurrentThread();
}
m_CheckThreadSafety = CheckThreadSafety;
#endif
return TRUE;
}
// Does not handle duplicates!
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
void EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::InsertValue(KeyType pKey, HashDatum Data, BOOL bDeepCopyKey)
{
CONTRACTL
{
WRAPPER(THROWS);
WRAPPER(GC_NOTRIGGER);
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACTL_END
_ASSERTE (OwnLock());
// Transition to COOP mode. This is need because EEHashTable is lock free and it can be read
// from multiple threads without taking locks. On rehash, you want to get rid of the old copy
// of table. You can only get rid of it once nobody is using it. That's a problem because
// there is no lock to tell when the last reader stopped using the old copy of the table.
// The solution to this problem is to access the table in cooperative mode, and to get rid of
// the old copy of the table when we are suspended for GC. When we are suspended for GC,
// we know that nobody is using the old copy of the table anymore.
// BROKEN: This is called sometimes from the CorMap hash before the EE is started up
GCX_COOP_NO_THREAD_BROKEN();
_ASSERTE(m_pVolatileBucketTable->m_dwNumBuckets != 0);
if (m_dwNumEntries > m_pVolatileBucketTable->m_dwNumBuckets*2)
{
if (!GrowHashTable()) COMPlusThrowOM();
}
DWORD dwHash = (DWORD)Helper::Hash(pKey);
DWORD dwBucket = dwHash % m_pVolatileBucketTable->m_dwNumBuckets;
EEHashEntry_t * pNewEntry;
pNewEntry = Helper::AllocateEntry(pKey, bDeepCopyKey, m_Heap);
if (!pNewEntry)
{
COMPlusThrowOM();
}
// Fill in the information for the new entry.
pNewEntry->pNext = m_pVolatileBucketTable->m_pBuckets[dwBucket];
pNewEntry->Data = Data;
pNewEntry->dwHashValue = dwHash;
// Insert at head of bucket
// need volatile write to avoid write reordering problem in IA
VolatileStore(&m_pVolatileBucketTable->m_pBuckets[dwBucket], pNewEntry);;
m_dwNumEntries++;
}
// Similar to the above, except that the HashDatum is a pointer to key.
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
void EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::InsertKeyAsValue(KeyType pKey, BOOL bDeepCopyKey)
{
CONTRACTL
{
WRAPPER(THROWS);
WRAPPER(GC_NOTRIGGER);
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACTL_END
_ASSERTE (OwnLock());
// Transition to COOP mode. This is need because EEHashTable is lock free and it can be read
// from multiple threads without taking locks. On rehash, you want to get rid of the old copy
// of table. You can only get rid of it once nobody is using it. That's a problem because
// there is no lock to tell when the last reader stopped using the old copy of the table.
// The solution to this problem is to access the table in cooperative mode, and to get rid of
// the old copy of the table when we are suspended for GC. When we are suspended for GC,
// we know that nobody is using the old copy of the table anymore.
// BROKEN: This is called sometimes from the CorMap hash before the EE is started up
GCX_COOP_NO_THREAD_BROKEN();
_ASSERTE(m_pVolatileBucketTable->m_dwNumBuckets != 0);
if (m_dwNumEntries > m_pVolatileBucketTable->m_dwNumBuckets*2)
{
if (!GrowHashTable()) COMPlusThrowOM();
}
DWORD dwHash = Helper::Hash(pKey);
DWORD dwBucket = dwHash % m_pVolatileBucketTable->m_dwNumBuckets;
EEHashEntry_t * pNewEntry;
pNewEntry = Helper::AllocateEntry(pKey, bDeepCopyKey, m_Heap);
if (!pNewEntry)
{
COMPlusThrowOM();
}
// Fill in the information for the new entry.
pNewEntry->pNext = m_pVolatileBucketTable->m_pBuckets[dwBucket];
pNewEntry->dwHashValue = dwHash;
pNewEntry->Data = *((LPUTF8 *)pNewEntry->Key);
// Insert at head of bucket
// need volatile write to avoid write reordering problem in IA
VolatileStore(&m_pVolatileBucketTable->m_pBuckets[dwBucket], pNewEntry);
m_dwNumEntries++;
}
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
BOOL EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::DeleteValue(KeyType pKey)
{
CONTRACTL
{
WRAPPER(THROWS);
WRAPPER(GC_NOTRIGGER);
FORBID_FAULT;
}
CONTRACTL_END
_ASSERTE (OwnLock());
Thread *pThread = GetThreadNULLOk();
GCX_MAYBE_COOP_NO_THREAD_BROKEN(pThread ? !(pThread->m_StateNC & Thread::TSNC_UnsafeSkipEnterCooperative) : FALSE);
_ASSERTE(m_pVolatileBucketTable->m_dwNumBuckets != 0);
DWORD dwHash = Helper::Hash(pKey);
DWORD dwBucket = dwHash % m_pVolatileBucketTable->m_dwNumBuckets;
EEHashEntry_t * pSearch;
EEHashEntry_t **ppPrev = &m_pVolatileBucketTable->m_pBuckets[dwBucket];
for (pSearch = m_pVolatileBucketTable->m_pBuckets[dwBucket]; pSearch; pSearch = pSearch->pNext)
{
if (pSearch->dwHashValue == dwHash && Helper::CompareKeys(pSearch, pKey))
{
*ppPrev = pSearch->pNext;
Helper::DeleteEntry(pSearch, m_Heap);
// Do we ever want to shrink?
m_dwNumEntries--;
return TRUE;
}
ppPrev = &pSearch->pNext;
}
return FALSE;
}
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
BOOL EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::ReplaceValue(KeyType pKey, HashDatum Data)
{
CONTRACTL
{
WRAPPER(THROWS);
WRAPPER(GC_NOTRIGGER);
FORBID_FAULT;
}
CONTRACTL_END
_ASSERTE (OwnLock());
EEHashEntry_t *pItem = FindItem(pKey);
if (pItem != NULL)
{
// Required to be atomic
pItem->Data = Data;
return TRUE;
}
else
{
return FALSE;
}
}
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
BOOL EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::ReplaceKey(KeyType pOldKey, KeyType pNewKey)
{
CONTRACTL
{
WRAPPER(THROWS);
WRAPPER(GC_NOTRIGGER);
FORBID_FAULT;
}
CONTRACTL_END
_ASSERTE (OwnLock());
EEHashEntry_t *pItem = FindItem(pOldKey);
if (pItem != NULL)
{
Helper::ReplaceKey (pItem, pNewKey);
return TRUE;
}
else
{
return FALSE;
}
}
#endif // !DACCESS_COMPILE
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
DWORD EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::GetHash(KeyType pKey)
{
WRAPPER_NO_CONTRACT;
return Helper::Hash(pKey);
}
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
BOOL EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::GetValue(KeyType pKey, HashDatum *pData)
{
CONTRACTL
{
WRAPPER(THROWS);
WRAPPER(GC_NOTRIGGER);
FORBID_FAULT;
SUPPORTS_DAC;
}
CONTRACTL_END
EEHashEntry_t *pItem = FindItem(pKey);
if (pItem != NULL)
{
*pData = pItem->Data;
return TRUE;
}
else
{
return FALSE;
}
}
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
BOOL EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::GetValue(KeyType pKey, HashDatum *pData, DWORD hashValue)
{
CONTRACTL
{
WRAPPER(THROWS);
WRAPPER(GC_NOTRIGGER);
FORBID_FAULT;
}
CONTRACTL_END
EEHashEntry_t *pItem = FindItem(pKey, hashValue);
if (pItem != NULL)
{
*pData = pItem->Data;
return TRUE;
}
else
{
return FALSE;
}
}
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
FORCEINLINE BOOL EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::GetValueSpeculative(KeyType pKey, HashDatum *pData)
{
CONTRACTL
{
WRAPPER(THROWS);
WRAPPER(GC_NOTRIGGER);
#ifdef MODE_COOPERATIVE // This header file sees contract.h, not eecontract.h - what a kludge!
MODE_COOPERATIVE;
#endif
}
CONTRACTL_END
EEHashEntry_t *pItem = FindItemSpeculative(pKey, Helper::Hash(pKey));
if (pItem != NULL)
{
*pData = pItem->Data;
return TRUE;
}
else
{
return FALSE;
}
}
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
FORCEINLINE BOOL EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::GetValueSpeculative(KeyType pKey, HashDatum *pData, DWORD hashValue)
{
CONTRACTL
{
WRAPPER(THROWS);
WRAPPER(GC_NOTRIGGER);
#ifdef MODE_COOPERATIVE // This header file sees contract.h, not eecontract.h - what a kludge!
MODE_COOPERATIVE;
#endif
}
CONTRACTL_END
EEHashEntry_t *pItem = FindItemSpeculative(pKey, hashValue);
if (pItem != NULL)
{
*pData = pItem->Data;
return TRUE;
}
else
{
return FALSE;
}
}
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
EEHashEntry_t *EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::FindItem(KeyType pKey)
{
CONTRACTL
{
WRAPPER(THROWS);
WRAPPER(GC_NOTRIGGER);
FORBID_FAULT;
SUPPORTS_DAC;
}
CONTRACTL_END
return FindItem(pKey, Helper::Hash(pKey));
}
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
EEHashEntry_t *EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::FindItem(KeyType pKey, DWORD dwHash)
{
CONTRACTL
{
WRAPPER(THROWS);
WRAPPER(GC_NOTRIGGER);
FORBID_FAULT;
SUPPORTS_DAC;
}
CONTRACTL_END
// Transition to COOP mode. This is need because EEHashTable is lock free and it can be read
// from multiple threads without taking locks. On rehash, you want to get rid of the old copy
// of table. You can only get rid of it once nobody is using it. That's a problem because
// there is no lock to tell when the last reader stopped using the old copy of the table.
// The solution to this problem is to access the table in cooperative mode, and to get rid of
// the old copy of the table when we are suspended for GC. When we are suspended for GC,
// we know that nobody is using the old copy of the table anymore.
//
#ifndef DACCESS_COMPILE
GCX_COOP_NO_THREAD_BROKEN();
#endif
// Atomic transaction. In any other point of this method or ANY of the callees of this function you can not read
// from m_pVolatileBucketTable!!!!!!! A racing condition would occur.
DWORD dwOldNumBuckets;
#ifndef DACCESS_COMPILE
DWORD nTry = 0;
DWORD dwSwitchCount = 0;
#endif
do
{
BucketTable* pBucketTable=(BucketTable*)(PTR_BucketTable)m_pVolatileBucketTable.Load();
dwOldNumBuckets = pBucketTable->m_dwNumBuckets;
_ASSERTE(pBucketTable->m_dwNumBuckets != 0);
DWORD dwBucket = dwHash % pBucketTable->m_dwNumBuckets;
EEHashEntry_t * pSearch;
for (pSearch = pBucketTable->m_pBuckets[dwBucket]; pSearch; pSearch = pSearch->pNext)
{
if (pSearch->dwHashValue == dwHash && Helper::CompareKeys(pSearch, pKey))
return pSearch;
}
// There is a race in EEHash Table: when we grow the hash table, we will nuke out
// the old bucket table. Readers might be looking up in the old table, they can
// fail to find an existing entry. The workaround is to retry the search process
// if we are called grow table during the search process.
#ifndef DACCESS_COMPILE
nTry ++;
if (nTry == 20) {
__SwitchToThread(0, ++dwSwitchCount);
nTry = 0;
}
#endif // #ifndef DACCESS_COMPILE
}
while ( m_bGrowing || dwOldNumBuckets != m_pVolatileBucketTable->m_dwNumBuckets);
return NULL;
}
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
FORCEINLINE EEHashEntry_t *EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::FindItemSpeculative(KeyType pKey, DWORD dwHash)
{
CONTRACTL
{
WRAPPER(THROWS);
WRAPPER(GC_NOTRIGGER);
#ifdef MODE_COOPERATIVE // This header file sees contract.h, not eecontract.h - what a kludge!
MODE_COOPERATIVE;
#endif
}
CONTRACTL_END
// Atomic transaction. In any other point of this method or ANY of the callees of this function you can not read
// from m_pVolatileBucketTable!!!!!!! A racing condition would occur.
DWORD dwOldNumBuckets;
BucketTable* pBucketTable=m_pVolatileBucketTable;
dwOldNumBuckets = pBucketTable->m_dwNumBuckets;
_ASSERTE(pBucketTable->m_dwNumBuckets != 0);
DWORD dwBucket = dwHash % pBucketTable->m_dwNumBuckets;
EEHashEntry_t * pSearch;
for (pSearch = pBucketTable->m_pBuckets[dwBucket]; pSearch; pSearch = pSearch->pNext)
{
if (pSearch->dwHashValue == dwHash && Helper::CompareKeys(pSearch, pKey))
return pSearch;
}
return NULL;
}
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
BOOL EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::IsEmpty()
{
LIMITED_METHOD_CONTRACT;
return m_dwNumEntries == 0;
}
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
DWORD EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::GetCount()
{
LIMITED_METHOD_CONTRACT;
return m_dwNumEntries;
}
#ifndef DACCESS_COMPILE
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
BOOL EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::GrowHashTable()
{
CONTRACTL
{
WRAPPER(THROWS);
WRAPPER(GC_NOTRIGGER);
INJECT_FAULT(return FALSE;);
}
CONTRACTL_END
#if defined(_DEBUG)
Thread * pThread = GetThreadNULLOk();
_ASSERTE(!g_fEEStarted || (pThread == NULL) || (pThread->PreemptiveGCDisabled()));
#endif
// Make the new bucket table 4 times bigger
//
DWORD dwNewNumBuckets;
DWORD dwNewNumBucketsPlusOne;
{
S_UINT32 safeSize(m_pVolatileBucketTable->m_dwNumBuckets);
safeSize *= 4;
if (safeSize.IsOverflow())
return FALSE;
dwNewNumBuckets = safeSize.Value();
safeSize += 1; // Allocate one extra
if (safeSize.IsOverflow())
return FALSE;
dwNewNumBucketsPlusOne = safeSize.Value();
}
// On resizes, we still have an array of old pointers we need to worry about.
// We can't free these old pointers, for we may hit a race condition where we're
// resizing and reading from the array at the same time. We need to keep track of these
// old arrays of pointers, so we're going to use the last item in the array to "link"
// to previous arrays, so that they may be freed at the end.
//
SIZE_T cbAlloc;
{
S_SIZE_T safeSize(sizeof(EEHashEntry_t *));
safeSize *= dwNewNumBucketsPlusOne;
if (safeSize.IsOverflow())
return FALSE;
cbAlloc = safeSize.Value();
}
EEHashEntry_t **pNewBuckets = (EEHashEntry_t **) new (nothrow) BYTE[cbAlloc];
if (pNewBuckets == NULL)
return FALSE;
memset(pNewBuckets, 0, cbAlloc);
// The first slot is linked to next list.
pNewBuckets++;
// Run through the old table and transfer all the entries
// Be sure not to mess with the integrity of the old table while
// we are doing this, as there can be concurrent readers! Note that
// it is OK if the concurrent reader misses out on a match, though -
// they will have to acquire the lock on a miss & try again.
FastInterlockExchange( (LONG *) &m_bGrowing, 1);
for (DWORD i = 0; i < m_pVolatileBucketTable->m_dwNumBuckets; i++)
{
EEHashEntry_t * pEntry = m_pVolatileBucketTable->m_pBuckets[i];
// Try to lock out readers from scanning this bucket. This is
// obviously a race which may fail. However, note that it's OK
// if somebody is already in the list - it's OK if we mess
// with the bucket groups, as long as we don't destroy
// anything. The lookup function will still do appropriate
// comparison even if it wanders aimlessly amongst entries
// while we are rearranging things. If a lookup finds a match
// under those circumstances, great. If not, they will have
// to acquire the lock & try again anyway.
m_pVolatileBucketTable->m_pBuckets[i] = NULL;
while (pEntry != NULL)
{
DWORD dwNewBucket = pEntry->dwHashValue % dwNewNumBuckets;
EEHashEntry_t * pNextEntry = pEntry->pNext;
pEntry->pNext = pNewBuckets[dwNewBucket];
pNewBuckets[dwNewBucket] = pEntry;
pEntry = pNextEntry;
}
}
// Finally, store the new number of buckets and the new bucket table
BucketTable* pNewBucketTable = (m_pVolatileBucketTable == &m_BucketTable[0]) ?
&m_BucketTable[1]:
&m_BucketTable[0];
pNewBucketTable->m_pBuckets = pNewBuckets;
pNewBucketTable->m_dwNumBuckets = dwNewNumBuckets;
// Add old table to the to free list. Note that the SyncClean thing will only
// delete the buckets at a safe point
//
SyncClean::AddEEHashTable (m_pVolatileBucketTable->m_pBuckets);
// Note that the SyncClean:AddEEHashTable performs at least one Interlock operation
// So we do not need to use an Interlocked operation to write m_pVolatileBucketTable
// Swap the double buffer, this is an atomic operation (the assignment)
//
m_pVolatileBucketTable = pNewBucketTable;
FastInterlockExchange( (LONG *) &m_bGrowing, 0);
return TRUE;
}
#endif // DACCESS_COMPILE
// Walk through all the entries in the hash table, in meaningless order, without any
// synchronization.
//
// IterateStart()
// while (IterateNext())
// GetKey();
//
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
void EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::
IterateStart(EEHashTableIteration *pIter)
{
CONTRACTL
{
WRAPPER(THROWS);
WRAPPER(GC_NOTRIGGER);
FORBID_FAULT;
}
CONTRACTL_END
_ASSERTE_IMPL(OwnLock());
pIter->m_dwBucket = -1;
pIter->m_pEntry = NULL;
#ifdef _DEBUG
pIter->m_pTable = this;
#endif
}
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
BOOL EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::
IterateNext(EEHashTableIteration *pIter)
{
CONTRACTL
{
WRAPPER(THROWS);
WRAPPER(GC_NOTRIGGER);
FORBID_FAULT;
}
CONTRACTL_END
_ASSERTE_IMPL(OwnLock());
Thread *pThread = GetThreadNULLOk();
GCX_MAYBE_COOP_NO_THREAD_BROKEN(pThread ? !(pThread->m_StateNC & Thread::TSNC_UnsafeSkipEnterCooperative) : FALSE);
_ASSERTE(pIter->m_pTable == (void *) this);
// If we haven't started iterating yet, or if we are at the end of a particular
// chain, advance to the next chain.
while (pIter->m_pEntry == NULL || pIter->m_pEntry->pNext == NULL)
{
if (++pIter->m_dwBucket >= m_pVolatileBucketTable->m_dwNumBuckets)
{
// advanced beyond the end of the table.
_ASSERTE(pIter->m_dwBucket == m_pVolatileBucketTable->m_dwNumBuckets); // client keeps asking?
return FALSE;
}
pIter->m_pEntry = m_pVolatileBucketTable->m_pBuckets[pIter->m_dwBucket];
// If this bucket has no chain, keep advancing. Otherwise we are done
if (pIter->m_pEntry)
return TRUE;
}
// We are within a chain. Advance to the next entry
pIter->m_pEntry = pIter->m_pEntry->pNext;
_ASSERTE(pIter->m_pEntry);
return TRUE;
}
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
KeyType EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::
IterateGetKey(EEHashTableIteration *pIter)
{
CONTRACTL
{
WRAPPER(THROWS);
WRAPPER(GC_NOTRIGGER);
FORBID_FAULT;
}
CONTRACTL_END
_ASSERTE(pIter->m_pTable == (void *) this);
_ASSERTE(pIter->m_dwBucket < m_pVolatileBucketTable->m_dwNumBuckets && pIter->m_pEntry);
return Helper::GetKey(pIter->m_pEntry);
}
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
HashDatum EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::
IterateGetValue(EEHashTableIteration *pIter)
{
LIMITED_METHOD_CONTRACT;
_ASSERTE(pIter->m_pTable == (void *) this);
_ASSERTE(pIter->m_dwBucket < m_pVolatileBucketTable->m_dwNumBuckets && pIter->m_pEntry);
return pIter->m_pEntry->Data;
}
#endif /* _EE_HASH_INL */
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
#ifndef _EE_HASH_INL
#define _EE_HASH_INL
#ifdef _DEBUG_IMPL
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
BOOL EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::OwnLock()
{
WRAPPER_NO_CONTRACT;
if (m_CheckThreadSafety == FALSE)
return TRUE;
if (m_pfnLockOwner == NULL) {
return m_writerThreadId.IsCurrentThread();
}
else {
BOOL ret = m_pfnLockOwner(m_lockData);
if (!ret) {
if (Debug_IsLockedViaThreadSuspension()) {
ret = TRUE;
}
}
return ret;
}
}
#endif // _DEBUG_IMPL
#ifndef DACCESS_COMPILE
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
void EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::Destroy()
{
CONTRACTL
{
WRAPPER(THROWS);
WRAPPER(GC_NOTRIGGER);
FORBID_FAULT;
}
CONTRACTL_END
if (m_pVolatileBucketTable && m_pVolatileBucketTable->m_pBuckets != NULL)
{
DWORD i;
for (i = 0; i < m_pVolatileBucketTable->m_dwNumBuckets; i++)
{
EEHashEntry_t *pEntry, *pNext;
for (pEntry = m_pVolatileBucketTable->m_pBuckets[i]; pEntry != NULL; pEntry = pNext)
{
pNext = pEntry->pNext;
Helper::DeleteEntry(pEntry, m_Heap);
}
}
delete[] (m_pVolatileBucketTable->m_pBuckets-1);
m_pVolatileBucketTable = NULL;
}
}
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
void EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::ClearHashTable()
{
CONTRACTL
{
WRAPPER(THROWS);
WRAPPER(GC_NOTRIGGER);
FORBID_FAULT;
}
CONTRACTL_END
//_ASSERTE (OwnLock());
// Transition to COOP mode. This is need because EEHashTable is lock free and it can be read
// from multiple threads without taking locks. On rehash, you want to get rid of the old copy
// of table. You can only get rid of it once nobody is using it. That's a problem because
// there is no lock to tell when the last reader stopped using the old copy of the table.
// The solution to this problem is to access the table in cooperative mode, and to get rid of
// the old copy of the table when we are suspended for GC. When we are suspended for GC,
// we know that nobody is using the old copy of the table anymore.
// BROKEN: This is called sometimes from the CorMap hash before the EE is started up
GCX_COOP_NO_THREAD_BROKEN();
if (m_pVolatileBucketTable->m_pBuckets != NULL)
{
DWORD i;
for (i = 0; i < m_pVolatileBucketTable->m_dwNumBuckets; i++)
{
EEHashEntry_t *pEntry, *pNext;
for (pEntry = m_pVolatileBucketTable->m_pBuckets[i]; pEntry != NULL; pEntry = pNext)
{
pNext = pEntry->pNext;
Helper::DeleteEntry(pEntry, m_Heap);
}
}
delete[] (m_pVolatileBucketTable->m_pBuckets-1);
m_pVolatileBucketTable->m_pBuckets = NULL;
}
m_pVolatileBucketTable->m_dwNumBuckets = 0;
#ifdef TARGET_64BIT
m_pVolatileBucketTable->m_dwNumBucketsMul = 0;
#endif
m_dwNumEntries = 0;
}
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
void EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::EmptyHashTable()
{
CONTRACTL
{
WRAPPER(THROWS);
WRAPPER(GC_NOTRIGGER);
FORBID_FAULT;
}
CONTRACTL_END
_ASSERTE (OwnLock());
// Transition to COOP mode. This is need because EEHashTable is lock free and it can be read
// from multiple threads without taking locks. On rehash, you want to get rid of the old copy
// of table. You can only get rid of it once nobody is using it. That's a problem because
// there is no lock to tell when the last reader stopped using the old copy of the table.
// The solution to this problem is to access the table in cooperative mode, and to get rid of
// the old copy of the table when we are suspended for GC. When we are suspended for GC,
// we know that nobody is using the old copy of the table anymore.
// BROKEN: This is called sometimes from the CorMap hash before the EE is started up
GCX_COOP_NO_THREAD_BROKEN();
if (m_pVolatileBucketTable->m_pBuckets != NULL)
{
DWORD i;
for (i = 0; i < m_pVolatileBucketTable->m_dwNumBuckets; i++)
{
EEHashEntry_t *pEntry, *pNext;
for (pEntry = m_pVolatileBucketTable->m_pBuckets[i]; pEntry != NULL; pEntry = pNext)
{
pNext = pEntry->pNext;
Helper::DeleteEntry(pEntry, m_Heap);
}
m_pVolatileBucketTable->m_pBuckets[i] = NULL;
}
}
m_dwNumEntries = 0;
}
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
BOOL EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::Init(DWORD dwNumBuckets, LockOwner *pLock, AllocationHeap pHeap, BOOL CheckThreadSafety)
{
CONTRACTL
{
WRAPPER(NOTHROW);
WRAPPER(GC_NOTRIGGER);
INJECT_FAULT(return FALSE;);
#ifndef DACCESS_COMPILE
PRECONDITION(m_pVolatileBucketTable.Load() == NULL && "EEHashTable::Init() called twice.");
#endif
}
CONTRACTL_END
m_pVolatileBucketTable = &m_BucketTable[0];
DWORD dwNumBucketsPlusOne;
// Prefast overflow sanity check the addition
if (!ClrSafeInt<DWORD>::addition(dwNumBuckets, 1, dwNumBucketsPlusOne))
return FALSE;
S_SIZE_T safeSize(sizeof(EEHashEntry_t *));
safeSize *= dwNumBucketsPlusOne;
if (safeSize.IsOverflow())
ThrowHR(COR_E_OVERFLOW);
SIZE_T cbAlloc = safeSize.Value();
m_pVolatileBucketTable->m_pBuckets = (EEHashEntry_t **) new (nothrow) BYTE[cbAlloc];
if (m_pVolatileBucketTable->m_pBuckets == NULL)
return FALSE;
memset(m_pVolatileBucketTable->m_pBuckets, 0, cbAlloc);
// The first slot links to the next list.
m_pVolatileBucketTable->m_pBuckets++;
m_pVolatileBucketTable->m_dwNumBuckets = dwNumBuckets;
#ifdef TARGET_64BIT
m_pVolatileBucketTable->m_dwNumBucketsMul = GetFastModMultiplier(dwNumBuckets);
#endif
m_Heap = pHeap;
#ifdef _DEBUG
if (pLock == NULL) {
m_lockData = NULL;
m_pfnLockOwner = NULL;
}
else {
m_lockData = pLock->lock;
m_pfnLockOwner = pLock->lockOwnerFunc;
}
if (m_pfnLockOwner == NULL) {
m_writerThreadId.SetToCurrentThread();
}
m_CheckThreadSafety = CheckThreadSafety;
#endif
return TRUE;
}
// Does not handle duplicates!
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
void EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::InsertValue(KeyType pKey, HashDatum Data, BOOL bDeepCopyKey)
{
CONTRACTL
{
WRAPPER(THROWS);
WRAPPER(GC_NOTRIGGER);
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACTL_END
_ASSERTE (OwnLock());
// Transition to COOP mode. This is need because EEHashTable is lock free and it can be read
// from multiple threads without taking locks. On rehash, you want to get rid of the old copy
// of table. You can only get rid of it once nobody is using it. That's a problem because
// there is no lock to tell when the last reader stopped using the old copy of the table.
// The solution to this problem is to access the table in cooperative mode, and to get rid of
// the old copy of the table when we are suspended for GC. When we are suspended for GC,
// we know that nobody is using the old copy of the table anymore.
// BROKEN: This is called sometimes from the CorMap hash before the EE is started up
GCX_COOP_NO_THREAD_BROKEN();
_ASSERTE(m_pVolatileBucketTable->m_dwNumBuckets != 0);
if (m_dwNumEntries > m_pVolatileBucketTable->m_dwNumBuckets*2)
{
if (!GrowHashTable()) COMPlusThrowOM();
}
DWORD dwHash = (DWORD)Helper::Hash(pKey);
DWORD dwBucket = dwHash % m_pVolatileBucketTable->m_dwNumBuckets;
EEHashEntry_t * pNewEntry;
pNewEntry = Helper::AllocateEntry(pKey, bDeepCopyKey, m_Heap);
if (!pNewEntry)
{
COMPlusThrowOM();
}
// Fill in the information for the new entry.
pNewEntry->pNext = m_pVolatileBucketTable->m_pBuckets[dwBucket];
pNewEntry->Data = Data;
pNewEntry->dwHashValue = dwHash;
// Insert at head of bucket
// need volatile write to avoid write reordering problem in IA
VolatileStore(&m_pVolatileBucketTable->m_pBuckets[dwBucket], pNewEntry);;
m_dwNumEntries++;
}
// Similar to the above, except that the HashDatum is a pointer to key.
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
void EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::InsertKeyAsValue(KeyType pKey, BOOL bDeepCopyKey)
{
CONTRACTL
{
WRAPPER(THROWS);
WRAPPER(GC_NOTRIGGER);
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACTL_END
_ASSERTE (OwnLock());
// Transition to COOP mode. This is need because EEHashTable is lock free and it can be read
// from multiple threads without taking locks. On rehash, you want to get rid of the old copy
// of table. You can only get rid of it once nobody is using it. That's a problem because
// there is no lock to tell when the last reader stopped using the old copy of the table.
// The solution to this problem is to access the table in cooperative mode, and to get rid of
// the old copy of the table when we are suspended for GC. When we are suspended for GC,
// we know that nobody is using the old copy of the table anymore.
// BROKEN: This is called sometimes from the CorMap hash before the EE is started up
GCX_COOP_NO_THREAD_BROKEN();
_ASSERTE(m_pVolatileBucketTable->m_dwNumBuckets != 0);
if (m_dwNumEntries > m_pVolatileBucketTable->m_dwNumBuckets*2)
{
if (!GrowHashTable()) COMPlusThrowOM();
}
DWORD dwHash = Helper::Hash(pKey);
DWORD dwBucket = dwHash % m_pVolatileBucketTable->m_dwNumBuckets;
EEHashEntry_t * pNewEntry;
pNewEntry = Helper::AllocateEntry(pKey, bDeepCopyKey, m_Heap);
if (!pNewEntry)
{
COMPlusThrowOM();
}
// Fill in the information for the new entry.
pNewEntry->pNext = m_pVolatileBucketTable->m_pBuckets[dwBucket];
pNewEntry->dwHashValue = dwHash;
pNewEntry->Data = *((LPUTF8 *)pNewEntry->Key);
// Insert at head of bucket
// need volatile write to avoid write reordering problem in IA
VolatileStore(&m_pVolatileBucketTable->m_pBuckets[dwBucket], pNewEntry);
m_dwNumEntries++;
}
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
BOOL EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::DeleteValue(KeyType pKey)
{
CONTRACTL
{
WRAPPER(THROWS);
WRAPPER(GC_NOTRIGGER);
FORBID_FAULT;
}
CONTRACTL_END
_ASSERTE (OwnLock());
Thread *pThread = GetThreadNULLOk();
GCX_MAYBE_COOP_NO_THREAD_BROKEN(pThread ? !(pThread->m_StateNC & Thread::TSNC_UnsafeSkipEnterCooperative) : FALSE);
_ASSERTE(m_pVolatileBucketTable->m_dwNumBuckets != 0);
DWORD dwHash = Helper::Hash(pKey);
DWORD dwBucket = dwHash % m_pVolatileBucketTable->m_dwNumBuckets;
EEHashEntry_t * pSearch;
EEHashEntry_t **ppPrev = &m_pVolatileBucketTable->m_pBuckets[dwBucket];
for (pSearch = m_pVolatileBucketTable->m_pBuckets[dwBucket]; pSearch; pSearch = pSearch->pNext)
{
if (pSearch->dwHashValue == dwHash && Helper::CompareKeys(pSearch, pKey))
{
*ppPrev = pSearch->pNext;
Helper::DeleteEntry(pSearch, m_Heap);
// Do we ever want to shrink?
m_dwNumEntries--;
return TRUE;
}
ppPrev = &pSearch->pNext;
}
return FALSE;
}
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
BOOL EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::ReplaceValue(KeyType pKey, HashDatum Data)
{
CONTRACTL
{
WRAPPER(THROWS);
WRAPPER(GC_NOTRIGGER);
FORBID_FAULT;
}
CONTRACTL_END
_ASSERTE (OwnLock());
EEHashEntry_t *pItem = FindItem(pKey);
if (pItem != NULL)
{
// Required to be atomic
pItem->Data = Data;
return TRUE;
}
else
{
return FALSE;
}
}
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
BOOL EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::ReplaceKey(KeyType pOldKey, KeyType pNewKey)
{
CONTRACTL
{
WRAPPER(THROWS);
WRAPPER(GC_NOTRIGGER);
FORBID_FAULT;
}
CONTRACTL_END
_ASSERTE (OwnLock());
EEHashEntry_t *pItem = FindItem(pOldKey);
if (pItem != NULL)
{
Helper::ReplaceKey (pItem, pNewKey);
return TRUE;
}
else
{
return FALSE;
}
}
#endif // !DACCESS_COMPILE
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
DWORD EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::GetHash(KeyType pKey)
{
WRAPPER_NO_CONTRACT;
return Helper::Hash(pKey);
}
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
BOOL EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::GetValue(KeyType pKey, HashDatum *pData)
{
CONTRACTL
{
WRAPPER(THROWS);
WRAPPER(GC_NOTRIGGER);
FORBID_FAULT;
SUPPORTS_DAC;
}
CONTRACTL_END
EEHashEntry_t *pItem = FindItem(pKey);
if (pItem != NULL)
{
*pData = pItem->Data;
return TRUE;
}
else
{
return FALSE;
}
}
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
BOOL EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::GetValue(KeyType pKey, HashDatum *pData, DWORD hashValue)
{
CONTRACTL
{
WRAPPER(THROWS);
WRAPPER(GC_NOTRIGGER);
FORBID_FAULT;
}
CONTRACTL_END
EEHashEntry_t *pItem = FindItem(pKey, hashValue);
if (pItem != NULL)
{
*pData = pItem->Data;
return TRUE;
}
else
{
return FALSE;
}
}
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
FORCEINLINE BOOL EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::GetValueSpeculative(KeyType pKey, HashDatum *pData)
{
CONTRACTL
{
WRAPPER(THROWS);
WRAPPER(GC_NOTRIGGER);
#ifdef MODE_COOPERATIVE // This header file sees contract.h, not eecontract.h - what a kludge!
MODE_COOPERATIVE;
#endif
}
CONTRACTL_END
EEHashEntry_t *pItem = FindItemSpeculative(pKey, Helper::Hash(pKey));
if (pItem != NULL)
{
*pData = pItem->Data;
return TRUE;
}
else
{
return FALSE;
}
}
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
FORCEINLINE BOOL EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::GetValueSpeculative(KeyType pKey, HashDatum *pData, DWORD hashValue)
{
CONTRACTL
{
WRAPPER(THROWS);
WRAPPER(GC_NOTRIGGER);
#ifdef MODE_COOPERATIVE // This header file sees contract.h, not eecontract.h - what a kludge!
MODE_COOPERATIVE;
#endif
}
CONTRACTL_END
EEHashEntry_t *pItem = FindItemSpeculative(pKey, hashValue);
if (pItem != NULL)
{
*pData = pItem->Data;
return TRUE;
}
else
{
return FALSE;
}
}
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
EEHashEntry_t *EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::FindItem(KeyType pKey)
{
CONTRACTL
{
WRAPPER(THROWS);
WRAPPER(GC_NOTRIGGER);
FORBID_FAULT;
SUPPORTS_DAC;
}
CONTRACTL_END
return FindItem(pKey, Helper::Hash(pKey));
}
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
EEHashEntry_t *EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::FindItem(KeyType pKey, DWORD dwHash)
{
CONTRACTL
{
WRAPPER(THROWS);
WRAPPER(GC_NOTRIGGER);
FORBID_FAULT;
SUPPORTS_DAC;
}
CONTRACTL_END
// Transition to COOP mode. This is need because EEHashTable is lock free and it can be read
// from multiple threads without taking locks. On rehash, you want to get rid of the old copy
// of table. You can only get rid of it once nobody is using it. That's a problem because
// there is no lock to tell when the last reader stopped using the old copy of the table.
// The solution to this problem is to access the table in cooperative mode, and to get rid of
// the old copy of the table when we are suspended for GC. When we are suspended for GC,
// we know that nobody is using the old copy of the table anymore.
//
#ifndef DACCESS_COMPILE
GCX_COOP_NO_THREAD_BROKEN();
#endif
// Atomic transaction. In any other point of this method or ANY of the callees of this function you can not read
// from m_pVolatileBucketTable!!!!!!! A racing condition would occur.
DWORD dwOldNumBuckets;
#ifndef DACCESS_COMPILE
DWORD nTry = 0;
DWORD dwSwitchCount = 0;
#endif
do
{
BucketTable* pBucketTable=(BucketTable*)(PTR_BucketTable)m_pVolatileBucketTable.Load();
dwOldNumBuckets = pBucketTable->m_dwNumBuckets;
_ASSERTE(pBucketTable->m_dwNumBuckets != 0);
DWORD dwBucket = dwHash % pBucketTable->m_dwNumBuckets;
EEHashEntry_t * pSearch;
for (pSearch = pBucketTable->m_pBuckets[dwBucket]; pSearch; pSearch = pSearch->pNext)
{
if (pSearch->dwHashValue == dwHash && Helper::CompareKeys(pSearch, pKey))
return pSearch;
}
// There is a race in EEHash Table: when we grow the hash table, we will nuke out
// the old bucket table. Readers might be looking up in the old table, they can
// fail to find an existing entry. The workaround is to retry the search process
// if we are called grow table during the search process.
#ifndef DACCESS_COMPILE
nTry ++;
if (nTry == 20) {
__SwitchToThread(0, ++dwSwitchCount);
nTry = 0;
}
#endif // #ifndef DACCESS_COMPILE
}
while ( m_bGrowing || dwOldNumBuckets != m_pVolatileBucketTable->m_dwNumBuckets);
return NULL;
}
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
FORCEINLINE EEHashEntry_t *EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::FindItemSpeculative(KeyType pKey, DWORD dwHash)
{
CONTRACTL
{
WRAPPER(THROWS);
WRAPPER(GC_NOTRIGGER);
#ifdef MODE_COOPERATIVE // This header file sees contract.h, not eecontract.h - what a kludge!
MODE_COOPERATIVE;
#endif
}
CONTRACTL_END
// Atomic transaction. In any other point of this method or ANY of the callees of this function you can not read
// from m_pVolatileBucketTable!!!!!!! A racing condition would occur.
DWORD dwOldNumBuckets;
BucketTable* pBucketTable=m_pVolatileBucketTable;
dwOldNumBuckets = pBucketTable->m_dwNumBuckets;
_ASSERTE(pBucketTable->m_dwNumBuckets != 0);
DWORD dwBucket;
#ifdef TARGET_64BIT
_ASSERTE(pBucketTable->m_dwNumBucketsMul != 0);
dwBucket = FastMod(dwHash, pBucketTable->m_dwNumBuckets, pBucketTable->m_dwNumBucketsMul);
#else
dwBucket = dwHash % pBucketTable->m_dwNumBuckets;
#endif
EEHashEntry_t * pSearch;
for (pSearch = pBucketTable->m_pBuckets[dwBucket]; pSearch; pSearch = pSearch->pNext)
{
if (pSearch->dwHashValue == dwHash && Helper::CompareKeys(pSearch, pKey))
return pSearch;
}
return NULL;
}
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
BOOL EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::IsEmpty()
{
LIMITED_METHOD_CONTRACT;
return m_dwNumEntries == 0;
}
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
DWORD EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::GetCount()
{
LIMITED_METHOD_CONTRACT;
return m_dwNumEntries;
}
#ifndef DACCESS_COMPILE
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
BOOL EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::GrowHashTable()
{
CONTRACTL
{
WRAPPER(THROWS);
WRAPPER(GC_NOTRIGGER);
INJECT_FAULT(return FALSE;);
}
CONTRACTL_END
#if defined(_DEBUG)
Thread * pThread = GetThreadNULLOk();
_ASSERTE(!g_fEEStarted || (pThread == NULL) || (pThread->PreemptiveGCDisabled()));
#endif
// Make the new bucket table 4 times bigger
//
DWORD dwNewNumBuckets;
DWORD dwNewNumBucketsPlusOne;
{
S_UINT32 safeSize(m_pVolatileBucketTable->m_dwNumBuckets);
safeSize *= 4;
if (safeSize.IsOverflow())
return FALSE;
dwNewNumBuckets = safeSize.Value();
safeSize += 1; // Allocate one extra
if (safeSize.IsOverflow())
return FALSE;
dwNewNumBucketsPlusOne = safeSize.Value();
}
// On resizes, we still have an array of old pointers we need to worry about.
// We can't free these old pointers, for we may hit a race condition where we're
// resizing and reading from the array at the same time. We need to keep track of these
// old arrays of pointers, so we're going to use the last item in the array to "link"
// to previous arrays, so that they may be freed at the end.
//
SIZE_T cbAlloc;
{
S_SIZE_T safeSize(sizeof(EEHashEntry_t *));
safeSize *= dwNewNumBucketsPlusOne;
if (safeSize.IsOverflow())
return FALSE;
cbAlloc = safeSize.Value();
}
EEHashEntry_t **pNewBuckets = (EEHashEntry_t **) new (nothrow) BYTE[cbAlloc];
if (pNewBuckets == NULL)
return FALSE;
memset(pNewBuckets, 0, cbAlloc);
// The first slot is linked to next list.
pNewBuckets++;
// Run through the old table and transfer all the entries
// Be sure not to mess with the integrity of the old table while
// we are doing this, as there can be concurrent readers! Note that
// it is OK if the concurrent reader misses out on a match, though -
// they will have to acquire the lock on a miss & try again.
FastInterlockExchange( (LONG *) &m_bGrowing, 1);
for (DWORD i = 0; i < m_pVolatileBucketTable->m_dwNumBuckets; i++)
{
EEHashEntry_t * pEntry = m_pVolatileBucketTable->m_pBuckets[i];
// Try to lock out readers from scanning this bucket. This is
// obviously a race which may fail. However, note that it's OK
// if somebody is already in the list - it's OK if we mess
// with the bucket groups, as long as we don't destroy
// anything. The lookup function will still do appropriate
// comparison even if it wanders aimlessly amongst entries
// while we are rearranging things. If a lookup finds a match
// under those circumstances, great. If not, they will have
// to acquire the lock & try again anyway.
m_pVolatileBucketTable->m_pBuckets[i] = NULL;
while (pEntry != NULL)
{
DWORD dwNewBucket = pEntry->dwHashValue % dwNewNumBuckets;
EEHashEntry_t * pNextEntry = pEntry->pNext;
pEntry->pNext = pNewBuckets[dwNewBucket];
pNewBuckets[dwNewBucket] = pEntry;
pEntry = pNextEntry;
}
}
// Finally, store the new number of buckets and the new bucket table
BucketTable* pNewBucketTable = (m_pVolatileBucketTable == &m_BucketTable[0]) ?
&m_BucketTable[1]:
&m_BucketTable[0];
pNewBucketTable->m_pBuckets = pNewBuckets;
pNewBucketTable->m_dwNumBuckets = dwNewNumBuckets;
#ifdef TARGET_64BIT
pNewBucketTable->m_dwNumBucketsMul = GetFastModMultiplier(dwNewNumBuckets);
#endif
// Add old table to the to free list. Note that the SyncClean thing will only
// delete the buckets at a safe point
//
SyncClean::AddEEHashTable (m_pVolatileBucketTable->m_pBuckets);
// Note that the SyncClean:AddEEHashTable performs at least one Interlock operation
// So we do not need to use an Interlocked operation to write m_pVolatileBucketTable
// Swap the double buffer, this is an atomic operation (the assignment)
//
m_pVolatileBucketTable = pNewBucketTable;
FastInterlockExchange( (LONG *) &m_bGrowing, 0);
return TRUE;
}
#endif // DACCESS_COMPILE
// Walk through all the entries in the hash table, in meaningless order, without any
// synchronization.
//
// IterateStart()
// while (IterateNext())
// GetKey();
//
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
void EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::
IterateStart(EEHashTableIteration *pIter)
{
CONTRACTL
{
WRAPPER(THROWS);
WRAPPER(GC_NOTRIGGER);
FORBID_FAULT;
}
CONTRACTL_END
_ASSERTE_IMPL(OwnLock());
pIter->m_dwBucket = -1;
pIter->m_pEntry = NULL;
#ifdef _DEBUG
pIter->m_pTable = this;
#endif
}
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
BOOL EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::
IterateNext(EEHashTableIteration *pIter)
{
CONTRACTL
{
WRAPPER(THROWS);
WRAPPER(GC_NOTRIGGER);
FORBID_FAULT;
}
CONTRACTL_END
_ASSERTE_IMPL(OwnLock());
Thread *pThread = GetThreadNULLOk();
GCX_MAYBE_COOP_NO_THREAD_BROKEN(pThread ? !(pThread->m_StateNC & Thread::TSNC_UnsafeSkipEnterCooperative) : FALSE);
_ASSERTE(pIter->m_pTable == (void *) this);
// If we haven't started iterating yet, or if we are at the end of a particular
// chain, advance to the next chain.
while (pIter->m_pEntry == NULL || pIter->m_pEntry->pNext == NULL)
{
if (++pIter->m_dwBucket >= m_pVolatileBucketTable->m_dwNumBuckets)
{
// advanced beyond the end of the table.
_ASSERTE(pIter->m_dwBucket == m_pVolatileBucketTable->m_dwNumBuckets); // client keeps asking?
return FALSE;
}
pIter->m_pEntry = m_pVolatileBucketTable->m_pBuckets[pIter->m_dwBucket];
// If this bucket has no chain, keep advancing. Otherwise we are done
if (pIter->m_pEntry)
return TRUE;
}
// We are within a chain. Advance to the next entry
pIter->m_pEntry = pIter->m_pEntry->pNext;
_ASSERTE(pIter->m_pEntry);
return TRUE;
}
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
KeyType EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::
IterateGetKey(EEHashTableIteration *pIter)
{
CONTRACTL
{
WRAPPER(THROWS);
WRAPPER(GC_NOTRIGGER);
FORBID_FAULT;
}
CONTRACTL_END
_ASSERTE(pIter->m_pTable == (void *) this);
_ASSERTE(pIter->m_dwBucket < m_pVolatileBucketTable->m_dwNumBuckets && pIter->m_pEntry);
return Helper::GetKey(pIter->m_pEntry);
}
template <class KeyType, class Helper, BOOL bDefaultCopyIsDeep>
HashDatum EEHashTableBase<KeyType, Helper, bDefaultCopyIsDeep>::
IterateGetValue(EEHashTableIteration *pIter)
{
LIMITED_METHOD_CONTRACT;
_ASSERTE(pIter->m_pTable == (void *) this);
_ASSERTE(pIter->m_dwBucket < m_pVolatileBucketTable->m_dwNumBuckets && pIter->m_pEntry);
return pIter->m_pEntry->Data;
}
#endif /* _EE_HASH_INL */
| 1 |
dotnet/runtime | 65,926 | FastMod for EEHashTable (Faster virtual generics) | Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| EgorBo | "2022-02-27T11:34:01Z" | "2022-03-01T19:43:37Z" | a5a7836f8193414fe64232d5c386b6402eb70e19 | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | FastMod for EEHashTable (Faster virtual generics). Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| ./src/coreclr/vm/util.hpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// util.hpp
//
//
// Miscellaneous useful functions
//
#ifndef _H_UTIL
#define _H_UTIL
#include "utilcode.h"
#include "metadata.h"
#include "holderinst.h"
#include "clrdata.h"
#include "xclrdata.h"
#include "posterror.h"
#include "clr_std/type_traits"
// Prevent the use of UtilMessageBox and WszMessageBox from inside the EE.
#undef UtilMessageBoxCatastrophic
#undef UtilMessageBoxCatastrophicNonLocalized
#undef UtilMessageBoxCatastrophic
#undef UtilMessageBoxCatastrophicNonLocalizedVA
#undef UtilMessageBox
#undef UtilMessageBoxNonLocalized
#undef UtilMessageBoxVA
#undef UtilMessageBoxNonLocalizedVA
#undef WszMessageBox
#define UtilMessageBoxCatastrophic __error("Use one of the EEMessageBox APIs (defined in eemessagebox.h) from inside the EE")
#define UtilMessageBoxCatastrophicNonLocalized __error("Use one of the EEMessageBox APIs (defined in eemessagebox.h) from inside the EE")
#define UtilMessageBoxCatastrophicVA __error("Use one of the EEMessageBox APIs (defined in eemessagebox.h) from inside the EE")
#define UtilMessageBoxCatastrophicNonLocalizedVA __error("Use one of the EEMessageBox APIs (defined in eemessagebox.h) from inside the EE")
#define UtilMessageBox __error("Use one of the EEMessageBox APIs (defined in eemessagebox.h) from inside the EE")
#define UtilMessageBoxNonLocalized __error("Use one of the EEMessageBox APIs (defined in eemessagebox.h) from inside the EE")
#define UtilMessageBoxVA __error("Use one of the EEMessageBox APIs (defined in eemessagebox.h) from inside the EE")
#define UtilMessageBoxNonLocalizedVA __error("Use one of the EEMessageBox APIs (defined in eemessagebox.h) from inside the EE")
#define WszMessageBox __error("Use one of the EEMessageBox APIs (defined in eemessagebox.h) from inside the EE")
// Hot cache lines need to be aligned to cache line size to improve performance
#if defined(TARGET_ARM64)
#define MAX_CACHE_LINE_SIZE 128
#else
#define MAX_CACHE_LINE_SIZE 64
#endif
//========================================================================
// More convenient names for integer types of a guaranteed size.
//========================================================================
typedef __int8 I1;
typedef ArrayDPTR(I1) PTR_I1;
typedef unsigned __int8 U1;
typedef __int16 I2;
typedef unsigned __int16 U2;
typedef __int32 I4;
typedef unsigned __int32 U4;
typedef __int64 I8;
typedef unsigned __int64 U8;
typedef float R4;
typedef double R8;
//
// Forward the FastInterlock methods to the matching Win32 APIs. They are implemented
// using compiler intrinsics so they are as fast as they can possibly be.
//
#define FastInterlockIncrement InterlockedIncrement
#define FastInterlockDecrement InterlockedDecrement
#define FastInterlockExchange InterlockedExchange
#define FastInterlockCompareExchange InterlockedCompareExchange
#define FastInterlockExchangeAdd InterlockedExchangeAdd
#define FastInterlockExchangeLong InterlockedExchange64
#define FastInterlockCompareExchangeLong InterlockedCompareExchange64
#define FastInterlockExchangeAddLong InterlockedExchangeAdd64
//
// Forward FastInterlock[Compare]ExchangePointer to the
// Utilcode Interlocked[Compare]ExchangeT.
//
#define FastInterlockExchangePointer InterlockedExchangeT
#define FastInterlockCompareExchangePointer InterlockedCompareExchangeT
FORCEINLINE void FastInterlockOr(DWORD RAW_KEYWORD(volatile) *p, const int msk)
{
LIMITED_METHOD_CONTRACT;
InterlockedOr((LONG *)p, msk);
}
FORCEINLINE void FastInterlockAnd(DWORD RAW_KEYWORD(volatile) *p, const int msk)
{
LIMITED_METHOD_CONTRACT;
InterlockedAnd((LONG *)p, msk);
}
#ifndef TARGET_UNIX
// Copied from malloc.h: don't want to bring in the whole header file.
void * __cdecl _alloca(size_t);
#endif // !TARGET_UNIX
#ifdef _PREFAST_
// Suppress prefast warning #6255: alloca indicates failure by raising a stack overflow exception
#pragma warning(disable:6255)
#endif // _PREFAST_
#define ISWWHITE(x) ((x)==W(' ') || (x)==W('\t') || (x)==W('\n') || (x)==W('\r') )
BOOL inline FitsInI1(__int64 val)
{
LIMITED_METHOD_DAC_CONTRACT;
return val == (__int64)(__int8)val;
}
BOOL inline FitsInI2(__int64 val)
{
LIMITED_METHOD_CONTRACT;
return val == (__int64)(__int16)val;
}
BOOL inline FitsInI4(__int64 val)
{
LIMITED_METHOD_DAC_CONTRACT;
return val == (__int64)(__int32)val;
}
BOOL inline FitsInU1(unsigned __int64 val)
{
LIMITED_METHOD_CONTRACT;
return val == (unsigned __int64)(unsigned __int8)val;
}
BOOL inline FitsInU2(unsigned __int64 val)
{
LIMITED_METHOD_CONTRACT;
return val == (unsigned __int64)(unsigned __int16)val;
}
BOOL inline FitsInU4(unsigned __int64 val)
{
LIMITED_METHOD_DAC_CONTRACT;
return val == (unsigned __int64)(unsigned __int32)val;
}
// returns FALSE if overflows 15 bits: otherwise, (*pa) is incremented by b
BOOL inline SafeAddUINT15(UINT16 *pa, ULONG b)
{
LIMITED_METHOD_CONTRACT;
UINT16 a = *pa;
// first check if overflows 16 bits
if ( ((UINT16)b) != b )
{
return FALSE;
}
// now make sure that doesn't overflow 15 bits
if (((ULONG)a + b) > 0x00007FFF)
{
return FALSE;
}
(*pa) += (UINT16)b;
return TRUE;
}
// returns FALSE if overflows 16 bits: otherwise, (*pa) is incremented by b
BOOL inline SafeAddUINT16(UINT16 *pa, ULONG b)
{
UINT16 a = *pa;
if ( ((UINT16)b) != b )
{
return FALSE;
}
// now make sure that doesn't overflow 16 bits
if (((ULONG)a + b) > 0x0000FFFF)
{
return FALSE;
}
(*pa) += (UINT16)b;
return TRUE;
}
// returns FALSE if overflow: otherwise, (*pa) is incremented by b
BOOL inline SafeAddUINT32(UINT32 *pa, UINT32 b)
{
LIMITED_METHOD_CONTRACT;
UINT32 a = *pa;
if ( ((UINT32)(a + b)) < a)
{
return FALSE;
}
(*pa) += b;
return TRUE;
}
// returns FALSE if overflow: otherwise, (*pa) is incremented by b
BOOL inline SafeAddULONG(ULONG *pa, ULONG b)
{
LIMITED_METHOD_CONTRACT;
ULONG a = *pa;
if ( ((ULONG)(a + b)) < a)
{
return FALSE;
}
(*pa) += b;
return TRUE;
}
// returns FALSE if overflow: otherwise, (*pa) is multiplied by b
BOOL inline SafeMulSIZE_T(SIZE_T *pa, SIZE_T b)
{
LIMITED_METHOD_CONTRACT;
#ifdef _DEBUG_IMPL
{
//Make sure SIZE_T is unsigned
SIZE_T m = ((SIZE_T)(-1));
SIZE_T z = 0;
_ASSERTE(m > z);
}
#endif
SIZE_T a = *pa;
const SIZE_T m = ((SIZE_T)(-1));
if ( (m / b) < a )
{
return FALSE;
}
(*pa) *= b;
return TRUE;
}
//************************************************************************
// CQuickHeap
//
// A fast non-multithread-safe heap for short term use.
// Destroying the heap frees all blocks allocated from the heap.
// Blocks cannot be freed individually.
//
// The heap uses COM+ exceptions to report errors.
//
// The heap does not use any internal synchronization so it is not
// multithreadsafe.
//************************************************************************
class CQuickHeap
{
public:
CQuickHeap();
~CQuickHeap();
//---------------------------------------------------------------
// Allocates a block of "sz" bytes. If there's not enough
// memory, throws an OutOfMemoryError.
//---------------------------------------------------------------
LPVOID Alloc(UINT sz);
private:
enum {
#ifdef _DEBUG
kBlockSize = 24
#else
kBlockSize = 1024
#endif
};
// The QuickHeap allocates QuickBlock's as needed and chains
// them in a single-linked list. Most QuickBlocks have a size
// of kBlockSize bytes (not counting m_next), and individual
// allocation requests are suballocated from them.
// Allocation requests of greater than kBlockSize are satisfied
// by allocating a special big QuickBlock of the right size.
struct QuickBlock
{
QuickBlock *m_next;
BYTE m_bytes[1];
};
// Linked list of QuickBlock's.
QuickBlock *m_pFirstQuickBlock;
// Offset to next available byte in m_pFirstQuickBlock.
LPBYTE m_pNextFree;
// Linked list of big QuickBlock's
QuickBlock *m_pFirstBigQuickBlock;
};
void PrintToStdOutA(const char *pszString);
void PrintToStdOutW(const WCHAR *pwzString);
void PrintToStdErrA(const char *pszString);
void PrintToStdErrW(const WCHAR *pwzString);
void NPrintToStdOutA(const char *pszString, size_t nbytes);
void NPrintToStdOutW(const WCHAR *pwzString, size_t nchars);
void NPrintToStdErrA(const char *pszString, size_t nbytes);
void NPrintToStdErrW(const WCHAR *pwzString, size_t nchars);
#include "nativevaraccessors.h"
// --------------------------------------------------------------------------------
// GCX macros
//
// These are the normal way to change or assert the GC mode of a thread. They handle
// the required stack discipline in mode switches with an autodestructor which
// automatically triggers on leaving the current scope.
//
// Usage:
// GCX_COOP(); Switch to cooperative mode, assume thread is setup
// GCX_PREEMP(); Switch to preemptive mode, NOP if no thread setup
// GCX_COOP_THREAD_EXISTS(Thread*); Fast switch to cooperative mode, must pass non-null Thread
// GCX_PREEMP_THREAD_EXISTS(Thread*); Fast switch to preemptive mode, must pass non-null Thread
//
// (There is an intentional asymmetry between GCX_COOP and GCX_PREEMP. GCX_COOP
// asserts if you call it without having a Thread setup. GCX_PREEMP becomes a NOP.
// This is because all unmanaged threads are effectively preemp.)
//
// (There is actually one more case here - an "EE worker thread" such as the debugger
// thread or GC thread, which we don't want to call SetupThread() on, but which is
// effectively in cooperative mode due to explicit cooperation with the collector.
// This case is not handled by these macros; the current working assumption is that
// such threads never use them. But at some point we may have to consider
// this case if there is utility code which is called from those threads.)
//
// GCX_MAYBE_*(BOOL); Same as above, but only do the switch if BOOL is TRUE.
//
// GCX_ASSERT_*(); Same as above, but assert mode rather than switch to mode.
// Note that assert is applied during backout as well.
// No overhead in a free build.
//
// GCX_FORBID(); Add "ForbidGC" semantics to a cooperative mode situation.
// Asserts that the thread will not trigger a GC or
// reach a GC-safe point, or call anything that might
// do one of these things.
//
// GCX_NOTRIGGER(); "ForbidGC" without the automatic assertion for coop mode.
//
// --------------------------------------------------------------------------------
template<BOOL COOPERATIVE>
class AutoCleanupGCAssert;
template<BOOL COOPERATIVE>
class GCAssert;
typedef AutoCleanupGCAssert<TRUE> AutoCleanupGCAssertCoop;
typedef AutoCleanupGCAssert<FALSE> AutoCleanupGCAssertPreemp;
typedef GCAssert<TRUE> GCAssertCoop;
typedef GCAssert<FALSE> GCAssertPreemp;
#if !defined(DACCESS_COMPILE)
#ifdef ENABLE_CONTRACTS_IMPL
#define GCX_COOP() GCCoop __gcHolder("GCX_COOP", __FUNCTION__, __FILE__, __LINE__)
#define GCX_COOP_NO_DTOR() GCCoopNoDtor __gcHolder; __gcHolder.Enter(TRUE, "GCX_COOP_NO_DTOR", __FUNCTION__, __FILE__, __LINE__)
#define GCX_COOP_NO_DTOR_END() __gcHolder.Leave();
#else
#define GCX_COOP() GCCoop __gcHolder
#define GCX_COOP_NO_DTOR() GCCoopNoDtor __gcHolder; __gcHolder.Enter(TRUE)
#define GCX_COOP_NO_DTOR_END() __gcHolder.Leave();
#endif
#ifdef ENABLE_CONTRACTS_IMPL
#define GCX_PREEMP() GCPreemp __gcHolder("GCX_PREEMP", __FUNCTION__, __FILE__, __LINE__)
#define GCX_PREEMP_NO_DTOR() GCPreempNoDtor __gcHolder; __gcHolder.Enter(TRUE, "GCX_PREEMP_NO_DTOR", __FUNCTION__, __FILE__, __LINE__)
#define GCX_PREEMP_NO_DTOR_HAVE_THREAD(curThreadNullOk) GCPreempNoDtor __gcHolder; __gcHolder.Enter(curThreadNullOk, TRUE, "GCX_PREEMP_NO_DTOR_HAVE_THREAD", __FUNCTION__, __FILE__, __LINE__)
#define GCX_PREEMP_NO_DTOR_END() __gcHolder.Leave();
#else
#define GCX_PREEMP() GCPreemp __gcHolder
#define GCX_PREEMP_NO_DTOR_HAVE_THREAD(curThreadNullOk) GCPreempNoDtor __gcHolder; __gcHolder.Enter(curThreadNullOk, TRUE)
#define GCX_PREEMP_NO_DTOR() GCPreempNoDtor __gcHolder; __gcHolder.Enter(TRUE)
#define GCX_PREEMP_NO_DTOR_END() __gcHolder.Leave()
#endif
#ifdef ENABLE_CONTRACTS_IMPL
#define GCX_COOP_THREAD_EXISTS(curThread) GCCoopThreadExists __gcHolder((curThread), "GCX_COOP_THREAD_EXISTS", __FUNCTION__, __FILE__, __LINE__)
#else
#define GCX_COOP_THREAD_EXISTS(curThread) GCCoopThreadExists __gcHolder((curThread))
#endif
#ifdef ENABLE_CONTRACTS_IMPL
#define GCX_PREEMP_THREAD_EXISTS(curThread) GCPreempThreadExists __gcHolder((curThread), "GCX_PREEMP_THREAD_EXISTS", __FUNCTION__, __FILE__, __LINE__)
#else
#define GCX_PREEMP_THREAD_EXISTS(curThread) GCPreempThreadExists __gcHolder((curThread))
#endif
#ifdef ENABLE_CONTRACTS_IMPL
#define GCX_MAYBE_COOP(_cond) GCCoop __gcHolder(_cond, "GCX_MAYBE_COOP", __FUNCTION__, __FILE__, __LINE__)
#define GCX_MAYBE_COOP_NO_DTOR(_cond) GCCoopNoDtor __gcHolder; __gcHolder.Enter(_cond, "GCX_MAYBE_COOP_NO_DTOR", __FUNCTION__, __FILE__, __LINE__)
#define GCX_MAYBE_COOP_NO_DTOR_END() __gcHolder.Leave();
#else
#define GCX_MAYBE_COOP(_cond) GCCoop __gcHolder(_cond)
#define GCX_MAYBE_COOP_NO_DTOR(_cond) GCCoopNoDtor __gcHolder; __gcHolder.Enter(_cond)
#define GCX_MAYBE_COOP_NO_DTOR_END() __gcHolder.Leave();
#endif
#ifdef ENABLE_CONTRACTS_IMPL
#define GCX_MAYBE_PREEMP(_cond) GCPreemp __gcHolder(_cond, "GCX_MAYBE_PREEMP", __FUNCTION__, __FILE__, __LINE__)
#define GCX_MAYBE_PREEMP_NO_DTOR(_cond) GCPreempNoDtor __gcHolder; __gcHolder.Enter(_cond, "GCX_MAYBE_PREEMP_NO_DTOR", __FUNCTION__, __FILE__, __LINE__)
#define GCX_MAYBE_PREEMP_NO_DTOR_END() __gcHolder.Leave();
#else
#define GCX_MAYBE_PREEMP(_cond) GCPreemp __gcHolder(_cond)
#define GCX_MAYBE_PREEMP_NO_DTOR(_cond) GCPreempNoDtor __gcHolder; __gcHolder.Enter(_cond)
#define GCX_MAYBE_PREEMP_NO_DTOR_END() __gcHolder.Leave()
#endif
#ifdef ENABLE_CONTRACTS_IMPL
#define GCX_MAYBE_COOP_THREAD_EXISTS(curThread, _cond) GCCoopThreadExists __gcHolder((curThread), (_cond), "GCX_MAYBE_COOP_THREAD_EXISTS", __FUNCTION__, __FILE__, __LINE__)
#else
#define GCX_MAYBE_COOP_THREAD_EXISTS(curThread, _cond) GCCoopThreadExists __gcHolder((curThread), (_cond))
#endif
#ifdef ENABLE_CONTRACTS_IMPL
#define GCX_MAYBE_PREEMP_THREAD_EXISTS(curThread, _cond) GCPreempThreadExists __gcHolder((curThread), (_cond), "GCX_MAYBE_PREEMP_THREAD_EXISTS", __FUNCTION__, __FILE__, __LINE__)
#else
#define GCX_MAYBE_PREEMP_THREAD_EXISTS(curThread, _cond) GCPreempThreadExists __gcHolder((curThread), (_cond))
#endif
// This has a potential race with the GC thread. It is currently
// used for a few cases where (a) we potentially haven't started up the EE yet, or
// (b) we are on a "special thread".
#ifdef ENABLE_CONTRACTS_IMPL
#define GCX_COOP_NO_THREAD_BROKEN() GCCoopHackNoThread __gcHolder("GCX_COOP_NO_THREAD_BROKEN", __FUNCTION__, __FILE__, __LINE__)
#else
#define GCX_COOP_NO_THREAD_BROKEN() GCCoopHackNoThread __gcHolder
#endif
#ifdef ENABLE_CONTRACTS_IMPL
#define GCX_MAYBE_COOP_NO_THREAD_BROKEN(_cond) GCCoopHackNoThread __gcHolder(_cond, "GCX_MAYBE_COOP_NO_THREAD_BROKEN", __FUNCTION__, __FILE__, __LINE__)
#else
#define GCX_MAYBE_COOP_NO_THREAD_BROKEN(_cond) GCCoopHackNoThread __gcHolder(_cond)
#endif
#else // !defined(DACCESS_COMPILE)
#define GCX_COOP()
#define GCX_COOP_NO_DTOR()
#define GCX_COOP_NO_DTOR_END()
#define GCX_PREEMP()
#define GCX_PREEMP_NO_DTOR()
#define GCX_PREEMP_NO_DTOR_HAVE_THREAD(curThreadNullOk)
#define GCX_PREEMP_NO_DTOR_END()
#define GCX_MAYBE_PREEMP(_cond)
#define GCX_COOP_NO_THREAD_BROKEN()
#define GCX_MAYBE_COOP_NO_THREAD_BROKEN(_cond)
#define GCX_PREEMP_THREAD_EXISTS(curThread)
#define GCX_COOP_THREAD_EXISTS(curThread)
#define GCX_POP()
#endif // !defined(DACCESS_COMPILE)
#if defined(_DEBUG_IMPL)
#define GCX_ASSERT_PREEMP() ::AutoCleanupGCAssertPreemp __gcHolder
#define GCX_ASSERT_COOP() ::AutoCleanupGCAssertCoop __gcHolder
#define BEGIN_GCX_ASSERT_COOP \
{ \
GCAssertCoop __gcHolder; \
__gcHolder.BeginGCAssert()
#define END_GCX_ASSERT_COOP \
__gcHolder.EndGCAssert(); \
}
#define BEGIN_GCX_ASSERT_PREEMP \
{ \
GCAssertPreemp __gcHolder; \
__gcHolder.BeginGCAssert()
#define END_GCX_ASSERT_PREEMP \
__gcHolder.EndGCAssert(); \
}
#else
#define GCX_ASSERT_PREEMP()
#define GCX_ASSERT_COOP()
#define BEGIN_GCX_ASSERT_COOP \
{
#define END_GCX_ASSERT_COOP \
}
#define BEGIN_GCX_ASSERT_PREEMP \
{
#define END_GCX_ASSERT_PREEMP \
}
#endif
#ifdef ENABLE_CONTRACTS_IMPL
#define GCX_FORBID() ::GCForbid __gcForbidHolder(__FUNCTION__, __FILE__, __LINE__)
#define GCX_NOTRIGGER() ::GCNoTrigger __gcNoTriggerHolder(__FUNCTION__, __FILE__, __LINE__)
#define GCX_MAYBE_FORBID(fConditional) ::GCForbid __gcForbidHolder(fConditional, __FUNCTION__, __FILE__, __LINE__)
#define GCX_MAYBE_NOTRIGGER(fConditional) ::GCNoTrigger __gcNoTriggerHolder(fConditional, __FUNCTION__, __FILE__, __LINE__)
#else
#define GCX_FORBID()
#define GCX_NOTRIGGER()
#define GCX_MAYBE_FORBID(fConditional)
#define GCX_MAYBE_NOTRIGGER(fConditional)
#endif
typedef BOOL (*FnLockOwner)(LPVOID);
struct LockOwner
{
LPVOID lock;
FnLockOwner lockOwnerFunc;
};
// this is the standard lockowner for things that require a lock owner but which really don't
// need any validation due to their simple/safe semantics
// the classic example of this is a hash table that is initialized and then never grows
extern LockOwner g_lockTrustMeIAmThreadSafe;
// The OS ThreadId is not a stable ID for a thread we a host uses fiber instead of Thread.
// For each managed Thread, we have a stable and unique id in Thread object. For other threads,
// e.g. Server GC or Concurrent GC thread, debugger helper thread, we do not have a Thread object,
// and we use OS ThreadId to identify them since they are not managed by a host.
class EEThreadId
{
private:
void *m_FiberPtrId;
public:
#ifdef _DEBUG
EEThreadId()
: m_FiberPtrId(NULL)
{
LIMITED_METHOD_CONTRACT;
}
#endif
void SetToCurrentThread()
{
WRAPPER_NO_CONTRACT;
m_FiberPtrId = ClrTeb::GetFiberPtrId();
}
bool IsCurrentThread() const
{
WRAPPER_NO_CONTRACT;
return (m_FiberPtrId == ClrTeb::GetFiberPtrId());
}
#ifdef _DEBUG
bool IsUnknown() const
{
LIMITED_METHOD_CONTRACT;
return m_FiberPtrId == NULL;
}
#endif
void Clear()
{
LIMITED_METHOD_CONTRACT;
m_FiberPtrId = NULL;
}
};
#ifndef TARGET_UNIX
HMODULE CLRLoadLibraryEx(LPCWSTR lpLibFileName, HANDLE hFile, DWORD dwFlags);
#endif // !TARGET_UNIX
HMODULE CLRLoadLibrary(LPCWSTR lpLibFileName);
BOOL CLRFreeLibrary(HMODULE hModule);
LPVOID
CLRMapViewOfFile(
IN HANDLE hFileMappingObject,
IN DWORD dwDesiredAccess,
IN DWORD dwFileOffsetHigh,
IN DWORD dwFileOffsetLow,
IN SIZE_T dwNumberOfBytesToMap,
IN LPVOID lpBaseAddress = NULL);
BOOL
CLRUnmapViewOfFile(
IN LPVOID lpBaseAddress
);
#ifndef DACCESS_COMPILE
FORCEINLINE void VoidCLRUnmapViewOfFile(void *ptr) { CLRUnmapViewOfFile(ptr); }
typedef Wrapper<void *, DoNothing, VoidCLRUnmapViewOfFile> CLRMapViewHolder;
#else
typedef Wrapper<void *, DoNothing, DoNothing> CLRMapViewHolder;
#endif
#ifdef TARGET_UNIX
#ifndef DACCESS_COMPILE
FORCEINLINE void VoidPALUnloadPEFile(void *ptr) { PAL_LOADUnloadPEFile(ptr); }
typedef Wrapper<void *, DoNothing, VoidPALUnloadPEFile> PALPEFileHolder;
#else
typedef Wrapper<void *, DoNothing, DoNothing> PALPEFileHolder;
#endif
#endif // TARGET_UNIX
#define SetupThreadForComCall(OOMRetVal) \
MAKE_CURRENT_THREAD_AVAILABLE_EX(GetThreadNULLOk()); \
if (CURRENT_THREAD == NULL) \
{ \
CURRENT_THREAD = SetupThreadNoThrow(); \
if (CURRENT_THREAD == NULL) \
return OOMRetVal; \
} \
#define SetupForComCallHR() SetupThreadForComCall(E_OUTOFMEMORY)
#define SetupForComCallDWORD() SetupThreadForComCall(ERROR_OUTOFMEMORY)
// A holder for NATIVE_LIBRARY_HANDLE.
FORCEINLINE void VoidFreeNativeLibrary(NATIVE_LIBRARY_HANDLE h)
{
WRAPPER_NO_CONTRACT;
if (h == NULL)
return;
#ifdef HOST_UNIX
PAL_FreeLibraryDirect(h);
#else
FreeLibrary(h);
#endif
}
typedef Wrapper<NATIVE_LIBRARY_HANDLE, DoNothing<NATIVE_LIBRARY_HANDLE>, VoidFreeNativeLibrary, NULL> NativeLibraryHandleHolder;
extern thread_local size_t t_CantStopCount;
// For debugging, we can track arbitrary Can't-Stop regions.
// In V1.0, this was on the Thread object, but we need to track this for threads w/o a Thread object.
FORCEINLINE void IncCantStopCount()
{
t_CantStopCount++;
}
FORCEINLINE void DecCantStopCount()
{
t_CantStopCount--;
}
typedef StateHolder<IncCantStopCount, DecCantStopCount> CantStopHolder;
#ifdef _DEBUG
// For debug-only, this can be used w/ a holder to ensure that we're keeping our CS count balanced.
// We should never use this w/ control flow.
inline size_t GetCantStopCount()
{
return t_CantStopCount;
}
// At places where we know we're calling out to native code, we can assert that we're NOT in a CS region.
// This is _debug only since we only use it for asserts; not for real code-flow control in a retail build.
inline bool IsInCantStopRegion()
{
return (GetCantStopCount() > 0);
}
#endif // _DEBUG
BOOL IsValidMethodCodeNotification(USHORT Notification);
typedef DPTR(struct JITNotification) PTR_JITNotification;
struct JITNotification
{
USHORT state; // values from CLRDataMethodCodeNotification
TADDR clrModule;
mdToken methodToken;
JITNotification() { SetFree(); }
BOOL IsFree() { return state == CLRDATA_METHNOTIFY_NONE; }
void SetFree() { state = CLRDATA_METHNOTIFY_NONE; clrModule = NULL; methodToken = 0; }
void SetState(TADDR moduleIn, mdToken tokenIn, USHORT NType)
{
_ASSERTE(IsValidMethodCodeNotification(NType));
clrModule = moduleIn;
methodToken = tokenIn;
state = NType;
}
};
// The maximum number of TADDR sized arguments that the SOS exception notification can use
#define MAX_CLR_NOTIFICATION_ARGS 3
GARY_DECL(size_t, g_clrNotificationArguments, MAX_CLR_NOTIFICATION_ARGS);
extern void InitializeClrNotifications();
GPTR_DECL(JITNotification, g_pNotificationTable);
GVAL_DECL(ULONG32, g_dacNotificationFlags);
#if defined(TARGET_UNIX) && !defined(DACCESS_COMPILE)
inline void
InitializeJITNotificationTable()
{
g_pNotificationTable = new (nothrow) JITNotification[1001];
}
#endif // TARGET_UNIX && !DACCESS_COMPILE
class JITNotifications
{
public:
JITNotifications(JITNotification *jitTable);
BOOL SetNotification(TADDR clrModule, mdToken token, USHORT NType);
USHORT Requested(TADDR clrModule, mdToken token);
// if clrModule is NULL, all active notifications are changed to NType
BOOL SetAllNotifications(TADDR clrModule,USHORT NType,BOOL *changedOut);
inline BOOL IsActive() { LIMITED_METHOD_CONTRACT; return m_jitTable!=NULL; }
UINT GetTableSize();
#ifdef DACCESS_COMPILE
static JITNotification *InitializeNotificationTable(UINT TableSize);
// Updates target table from host copy
BOOL UpdateOutOfProcTable();
#endif
private:
UINT GetLength();
void IncrementLength();
void DecrementLength();
BOOL FindItem(TADDR clrModule, mdToken token, UINT *indexOut);
JITNotification *m_jitTable;
};
typedef DPTR(struct GcNotification) PTR_GcNotification;
inline
BOOL IsValidGcNotification(GcEvt_t evType)
{ return (evType < GC_EVENT_TYPE_MAX); }
#define CLRDATA_GC_NONE 0
struct GcNotification
{
GcEvtArgs ev;
GcNotification() { SetFree(); }
BOOL IsFree() { return ev.typ == CLRDATA_GC_NONE; }
void SetFree() { memset(this, 0, sizeof(*this)); ev.typ = (GcEvt_t) CLRDATA_GC_NONE; }
void Set(GcEvtArgs ev_)
{
_ASSERTE(IsValidGcNotification(ev_.typ));
ev = ev_;
}
BOOL IsMatch(GcEvtArgs ev_)
{
LIMITED_METHOD_CONTRACT;
if (ev.typ != ev_.typ)
{
return FALSE;
}
switch (ev.typ)
{
case GC_MARK_END:
if (ev_.condemnedGeneration == 0 ||
(ev.condemnedGeneration & ev_.condemnedGeneration) != 0)
{
return TRUE;
}
break;
default:
break;
}
return FALSE;
}
};
GPTR_DECL(GcNotification, g_pGcNotificationTable);
class GcNotifications
{
public:
GcNotifications(GcNotification *gcTable);
BOOL SetNotification(GcEvtArgs ev);
GcEvtArgs* GetNotification(GcEvtArgs ev)
{
LIMITED_METHOD_CONTRACT;
UINT idx;
if (FindItem(ev, &idx))
{
return &m_gcTable[idx].ev;
}
else
{
return NULL;
}
}
// if clrModule is NULL, all active notifications are changed to NType
inline BOOL IsActive()
{ return m_gcTable != NULL; }
UINT GetTableSize()
{ return Size(); }
#ifdef DACCESS_COMPILE
static GcNotification *InitializeNotificationTable(UINT TableSize);
// Updates target table from host copy
BOOL UpdateOutOfProcTable();
#endif
private:
UINT& Length()
{
LIMITED_METHOD_CONTRACT;
_ASSERTE(IsActive());
UINT *pLen = (UINT *) &(m_gcTable[-1].ev.typ);
return *pLen;
}
UINT& Size()
{
_ASSERTE(IsActive());
UINT *pLen = (UINT *) &(m_gcTable[-1].ev.typ);
return *(pLen+1);
}
void IncrementLength()
{ ++Length(); }
void DecrementLength()
{ --Length(); }
BOOL FindItem(GcEvtArgs ev, UINT *indexOut);
GcNotification *m_gcTable;
};
class MethodDesc;
class Module;
class DACNotify
{
public:
// types
enum {
MODULE_LOAD_NOTIFICATION=1,
MODULE_UNLOAD_NOTIFICATION=2,
JIT_NOTIFICATION=3,
JIT_PITCHING_NOTIFICATION=4,
EXCEPTION_NOTIFICATION=5,
GC_NOTIFICATION= 6,
CATCH_ENTER_NOTIFICATION = 7,
JIT_NOTIFICATION2=8,
};
// called from the runtime
static void DoJITNotification(MethodDesc *MethodDescPtr, TADDR NativeCodeLocation);
static void DoJITPitchingNotification(MethodDesc *MethodDescPtr);
static void DoModuleLoadNotification(Module *Module);
static void DoModuleUnloadNotification(Module *Module);
static void DoExceptionNotification(class Thread* ThreadPtr);
static void DoGCNotification(const GcEvtArgs& evtargs);
static void DoExceptionCatcherEnterNotification(MethodDesc *MethodDescPtr, DWORD nativeOffset);
// called from the DAC
static int GetType(TADDR Args[]);
static BOOL ParseJITNotification(TADDR Args[], TADDR& MethodDescPtr, TADDR& NativeCodeLocation);
static BOOL ParseJITPitchingNotification(TADDR Args[], TADDR& MethodDescPtr);
static BOOL ParseModuleLoadNotification(TADDR Args[], TADDR& ModulePtr);
static BOOL ParseModuleUnloadNotification(TADDR Args[], TADDR& ModulePtr);
static BOOL ParseExceptionNotification(TADDR Args[], TADDR& ThreadPtr);
static BOOL ParseGCNotification(TADDR Args[], GcEvtArgs& evtargs);
static BOOL ParseExceptionCatcherEnterNotification(TADDR Args[], TADDR& MethodDescPtr, DWORD& nativeOffset);
};
void DACNotifyCompilationFinished(MethodDesc *pMethodDesc);
// These wrap the SString:L:CompareCaseInsenstive function in a way that makes it
// easy to fix code that uses _stricmp. _stricmp should be avoided as it uses the current
// C-runtime locale rather than the invariance culture.
//
// Note that unlike the real _stricmp, these functions unavoidably have a throws/gc_triggers/inject_fault
// contract. So if need a case-insensitive comparison in a place where you can't tolerate this contract,
// you've got a problem.
int __cdecl stricmpUTF8(const char* szStr1, const char* szStr2);
BOOL DbgIsExecutable(LPVOID lpMem, SIZE_T length);
int GetRandomInt(int maxVal);
//
//
// COMCHARACTER
//
//
class COMCharacter {
public:
//These are here for support from native code. They are never called from our managed classes.
static BOOL nativeIsWhiteSpace(WCHAR c);
static BOOL nativeIsDigit(WCHAR c);
};
// ======================================================================================
// Simple, reusable 100ns timer for normalizing ticks. For use in Q/FCalls to avoid discrepency with
// tick frequency between native and managed.
class NormalizedTimer
{
private:
static const int64_t NormalizedTicksPerSecond = 10000000 /* 100ns ticks per second (1e7) */;
static Volatile<double> s_frequency;
LARGE_INTEGER startTimestamp;
LARGE_INTEGER stopTimestamp;
#if _DEBUG
bool isRunning = false;
#endif // _DEBUG
public:
NormalizedTimer()
{
LIMITED_METHOD_CONTRACT;
if (s_frequency.Load() == -1)
{
double frequency;
LARGE_INTEGER qpfValue;
QueryPerformanceFrequency(&qpfValue);
frequency = static_cast<double>(qpfValue.QuadPart);
frequency /= NormalizedTicksPerSecond;
s_frequency.Store(frequency);
}
startTimestamp.QuadPart = 0;
startTimestamp.QuadPart = 0;
}
// ======================================================================================
// Start the timer
inline
void Start()
{
LIMITED_METHOD_CONTRACT;
_ASSERTE(!isRunning);
QueryPerformanceCounter(&startTimestamp);
#if _DEBUG
isRunning = true;
#endif // _DEBUG
}
// ======================================================================================
// stop the timer. If called before starting, sets the start time to the same as the stop
inline
void Stop()
{
LIMITED_METHOD_CONTRACT;
_ASSERTE(isRunning);
QueryPerformanceCounter(&stopTimestamp);
#if _DEBUG
isRunning = false;
#endif // _DEBUG
}
// ======================================================================================
// Return elapsed ticks. This will stop a running timer.
// Will return 0 if called out of order.
// Only recalculated this value if it has been stopped/started since previous calculation.
inline
int64_t Elapsed100nsTicks()
{
LIMITED_METHOD_CONTRACT;
_ASSERTE(!isRunning);
_ASSERTE(startTimestamp.QuadPart > 0);
_ASSERTE(stopTimestamp.QuadPart > 0);
return static_cast<int64_t>((stopTimestamp.QuadPart - startTimestamp.QuadPart) / s_frequency);
}
};
#ifdef _DEBUG
#define FORCEINLINE_NONDEBUG
#else
#define FORCEINLINE_NONDEBUG FORCEINLINE
#endif
#ifndef TARGET_UNIX
// Extract the file version from an executable.
HRESULT GetFileVersion(LPCWSTR wszFilePath, ULARGE_INTEGER* pFileVersion);
#endif // !TARGET_UNIX
#endif /* _H_UTIL */
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// util.hpp
//
//
// Miscellaneous useful functions
//
#ifndef _H_UTIL
#define _H_UTIL
#include "utilcode.h"
#include "metadata.h"
#include "holderinst.h"
#include "clrdata.h"
#include "xclrdata.h"
#include "posterror.h"
#include "clr_std/type_traits"
// Prevent the use of UtilMessageBox and WszMessageBox from inside the EE.
#undef UtilMessageBoxCatastrophic
#undef UtilMessageBoxCatastrophicNonLocalized
#undef UtilMessageBoxCatastrophic
#undef UtilMessageBoxCatastrophicNonLocalizedVA
#undef UtilMessageBox
#undef UtilMessageBoxNonLocalized
#undef UtilMessageBoxVA
#undef UtilMessageBoxNonLocalizedVA
#undef WszMessageBox
#define UtilMessageBoxCatastrophic __error("Use one of the EEMessageBox APIs (defined in eemessagebox.h) from inside the EE")
#define UtilMessageBoxCatastrophicNonLocalized __error("Use one of the EEMessageBox APIs (defined in eemessagebox.h) from inside the EE")
#define UtilMessageBoxCatastrophicVA __error("Use one of the EEMessageBox APIs (defined in eemessagebox.h) from inside the EE")
#define UtilMessageBoxCatastrophicNonLocalizedVA __error("Use one of the EEMessageBox APIs (defined in eemessagebox.h) from inside the EE")
#define UtilMessageBox __error("Use one of the EEMessageBox APIs (defined in eemessagebox.h) from inside the EE")
#define UtilMessageBoxNonLocalized __error("Use one of the EEMessageBox APIs (defined in eemessagebox.h) from inside the EE")
#define UtilMessageBoxVA __error("Use one of the EEMessageBox APIs (defined in eemessagebox.h) from inside the EE")
#define UtilMessageBoxNonLocalizedVA __error("Use one of the EEMessageBox APIs (defined in eemessagebox.h) from inside the EE")
#define WszMessageBox __error("Use one of the EEMessageBox APIs (defined in eemessagebox.h) from inside the EE")
// Hot cache lines need to be aligned to cache line size to improve performance
#if defined(TARGET_ARM64)
#define MAX_CACHE_LINE_SIZE 128
#else
#define MAX_CACHE_LINE_SIZE 64
#endif
//========================================================================
// More convenient names for integer types of a guaranteed size.
//========================================================================
typedef __int8 I1;
typedef ArrayDPTR(I1) PTR_I1;
typedef unsigned __int8 U1;
typedef __int16 I2;
typedef unsigned __int16 U2;
typedef __int32 I4;
typedef unsigned __int32 U4;
typedef __int64 I8;
typedef unsigned __int64 U8;
typedef float R4;
typedef double R8;
//
// Forward the FastInterlock methods to the matching Win32 APIs. They are implemented
// using compiler intrinsics so they are as fast as they can possibly be.
//
#define FastInterlockIncrement InterlockedIncrement
#define FastInterlockDecrement InterlockedDecrement
#define FastInterlockExchange InterlockedExchange
#define FastInterlockCompareExchange InterlockedCompareExchange
#define FastInterlockExchangeAdd InterlockedExchangeAdd
#define FastInterlockExchangeLong InterlockedExchange64
#define FastInterlockCompareExchangeLong InterlockedCompareExchange64
#define FastInterlockExchangeAddLong InterlockedExchangeAdd64
//
// Forward FastInterlock[Compare]ExchangePointer to the
// Utilcode Interlocked[Compare]ExchangeT.
//
#define FastInterlockExchangePointer InterlockedExchangeT
#define FastInterlockCompareExchangePointer InterlockedCompareExchangeT
FORCEINLINE void FastInterlockOr(DWORD RAW_KEYWORD(volatile) *p, const int msk)
{
LIMITED_METHOD_CONTRACT;
InterlockedOr((LONG *)p, msk);
}
FORCEINLINE void FastInterlockAnd(DWORD RAW_KEYWORD(volatile) *p, const int msk)
{
LIMITED_METHOD_CONTRACT;
InterlockedAnd((LONG *)p, msk);
}
#ifndef TARGET_UNIX
// Copied from malloc.h: don't want to bring in the whole header file.
void * __cdecl _alloca(size_t);
#endif // !TARGET_UNIX
#ifdef _PREFAST_
// Suppress prefast warning #6255: alloca indicates failure by raising a stack overflow exception
#pragma warning(disable:6255)
#endif // _PREFAST_
#define ISWWHITE(x) ((x)==W(' ') || (x)==W('\t') || (x)==W('\n') || (x)==W('\r') )
BOOL inline FitsInI1(__int64 val)
{
LIMITED_METHOD_DAC_CONTRACT;
return val == (__int64)(__int8)val;
}
BOOL inline FitsInI2(__int64 val)
{
LIMITED_METHOD_CONTRACT;
return val == (__int64)(__int16)val;
}
BOOL inline FitsInI4(__int64 val)
{
LIMITED_METHOD_DAC_CONTRACT;
return val == (__int64)(__int32)val;
}
BOOL inline FitsInU1(unsigned __int64 val)
{
LIMITED_METHOD_CONTRACT;
return val == (unsigned __int64)(unsigned __int8)val;
}
BOOL inline FitsInU2(unsigned __int64 val)
{
LIMITED_METHOD_CONTRACT;
return val == (unsigned __int64)(unsigned __int16)val;
}
BOOL inline FitsInU4(unsigned __int64 val)
{
LIMITED_METHOD_DAC_CONTRACT;
return val == (unsigned __int64)(unsigned __int32)val;
}
// returns FALSE if overflows 15 bits: otherwise, (*pa) is incremented by b
BOOL inline SafeAddUINT15(UINT16 *pa, ULONG b)
{
LIMITED_METHOD_CONTRACT;
UINT16 a = *pa;
// first check if overflows 16 bits
if ( ((UINT16)b) != b )
{
return FALSE;
}
// now make sure that doesn't overflow 15 bits
if (((ULONG)a + b) > 0x00007FFF)
{
return FALSE;
}
(*pa) += (UINT16)b;
return TRUE;
}
// returns FALSE if overflows 16 bits: otherwise, (*pa) is incremented by b
BOOL inline SafeAddUINT16(UINT16 *pa, ULONG b)
{
UINT16 a = *pa;
if ( ((UINT16)b) != b )
{
return FALSE;
}
// now make sure that doesn't overflow 16 bits
if (((ULONG)a + b) > 0x0000FFFF)
{
return FALSE;
}
(*pa) += (UINT16)b;
return TRUE;
}
// returns FALSE if overflow: otherwise, (*pa) is incremented by b
BOOL inline SafeAddUINT32(UINT32 *pa, UINT32 b)
{
LIMITED_METHOD_CONTRACT;
UINT32 a = *pa;
if ( ((UINT32)(a + b)) < a)
{
return FALSE;
}
(*pa) += b;
return TRUE;
}
// returns FALSE if overflow: otherwise, (*pa) is incremented by b
BOOL inline SafeAddULONG(ULONG *pa, ULONG b)
{
LIMITED_METHOD_CONTRACT;
ULONG a = *pa;
if ( ((ULONG)(a + b)) < a)
{
return FALSE;
}
(*pa) += b;
return TRUE;
}
// returns FALSE if overflow: otherwise, (*pa) is multiplied by b
BOOL inline SafeMulSIZE_T(SIZE_T *pa, SIZE_T b)
{
LIMITED_METHOD_CONTRACT;
#ifdef _DEBUG_IMPL
{
//Make sure SIZE_T is unsigned
SIZE_T m = ((SIZE_T)(-1));
SIZE_T z = 0;
_ASSERTE(m > z);
}
#endif
SIZE_T a = *pa;
const SIZE_T m = ((SIZE_T)(-1));
if ( (m / b) < a )
{
return FALSE;
}
(*pa) *= b;
return TRUE;
}
//************************************************************************
// CQuickHeap
//
// A fast non-multithread-safe heap for short term use.
// Destroying the heap frees all blocks allocated from the heap.
// Blocks cannot be freed individually.
//
// The heap uses COM+ exceptions to report errors.
//
// The heap does not use any internal synchronization so it is not
// multithreadsafe.
//************************************************************************
class CQuickHeap
{
public:
CQuickHeap();
~CQuickHeap();
//---------------------------------------------------------------
// Allocates a block of "sz" bytes. If there's not enough
// memory, throws an OutOfMemoryError.
//---------------------------------------------------------------
LPVOID Alloc(UINT sz);
private:
enum {
#ifdef _DEBUG
kBlockSize = 24
#else
kBlockSize = 1024
#endif
};
// The QuickHeap allocates QuickBlock's as needed and chains
// them in a single-linked list. Most QuickBlocks have a size
// of kBlockSize bytes (not counting m_next), and individual
// allocation requests are suballocated from them.
// Allocation requests of greater than kBlockSize are satisfied
// by allocating a special big QuickBlock of the right size.
struct QuickBlock
{
QuickBlock *m_next;
BYTE m_bytes[1];
};
// Linked list of QuickBlock's.
QuickBlock *m_pFirstQuickBlock;
// Offset to next available byte in m_pFirstQuickBlock.
LPBYTE m_pNextFree;
// Linked list of big QuickBlock's
QuickBlock *m_pFirstBigQuickBlock;
};
void PrintToStdOutA(const char *pszString);
void PrintToStdOutW(const WCHAR *pwzString);
void PrintToStdErrA(const char *pszString);
void PrintToStdErrW(const WCHAR *pwzString);
void NPrintToStdOutA(const char *pszString, size_t nbytes);
void NPrintToStdOutW(const WCHAR *pwzString, size_t nchars);
void NPrintToStdErrA(const char *pszString, size_t nbytes);
void NPrintToStdErrW(const WCHAR *pwzString, size_t nchars);
#include "nativevaraccessors.h"
// --------------------------------------------------------------------------------
// GCX macros
//
// These are the normal way to change or assert the GC mode of a thread. They handle
// the required stack discipline in mode switches with an autodestructor which
// automatically triggers on leaving the current scope.
//
// Usage:
// GCX_COOP(); Switch to cooperative mode, assume thread is setup
// GCX_PREEMP(); Switch to preemptive mode, NOP if no thread setup
// GCX_COOP_THREAD_EXISTS(Thread*); Fast switch to cooperative mode, must pass non-null Thread
// GCX_PREEMP_THREAD_EXISTS(Thread*); Fast switch to preemptive mode, must pass non-null Thread
//
// (There is an intentional asymmetry between GCX_COOP and GCX_PREEMP. GCX_COOP
// asserts if you call it without having a Thread setup. GCX_PREEMP becomes a NOP.
// This is because all unmanaged threads are effectively preemp.)
//
// (There is actually one more case here - an "EE worker thread" such as the debugger
// thread or GC thread, which we don't want to call SetupThread() on, but which is
// effectively in cooperative mode due to explicit cooperation with the collector.
// This case is not handled by these macros; the current working assumption is that
// such threads never use them. But at some point we may have to consider
// this case if there is utility code which is called from those threads.)
//
// GCX_MAYBE_*(BOOL); Same as above, but only do the switch if BOOL is TRUE.
//
// GCX_ASSERT_*(); Same as above, but assert mode rather than switch to mode.
// Note that assert is applied during backout as well.
// No overhead in a free build.
//
// GCX_FORBID(); Add "ForbidGC" semantics to a cooperative mode situation.
// Asserts that the thread will not trigger a GC or
// reach a GC-safe point, or call anything that might
// do one of these things.
//
// GCX_NOTRIGGER(); "ForbidGC" without the automatic assertion for coop mode.
//
// --------------------------------------------------------------------------------
template<BOOL COOPERATIVE>
class AutoCleanupGCAssert;
template<BOOL COOPERATIVE>
class GCAssert;
typedef AutoCleanupGCAssert<TRUE> AutoCleanupGCAssertCoop;
typedef AutoCleanupGCAssert<FALSE> AutoCleanupGCAssertPreemp;
typedef GCAssert<TRUE> GCAssertCoop;
typedef GCAssert<FALSE> GCAssertPreemp;
#if !defined(DACCESS_COMPILE)
#ifdef ENABLE_CONTRACTS_IMPL
#define GCX_COOP() GCCoop __gcHolder("GCX_COOP", __FUNCTION__, __FILE__, __LINE__)
#define GCX_COOP_NO_DTOR() GCCoopNoDtor __gcHolder; __gcHolder.Enter(TRUE, "GCX_COOP_NO_DTOR", __FUNCTION__, __FILE__, __LINE__)
#define GCX_COOP_NO_DTOR_END() __gcHolder.Leave();
#else
#define GCX_COOP() GCCoop __gcHolder
#define GCX_COOP_NO_DTOR() GCCoopNoDtor __gcHolder; __gcHolder.Enter(TRUE)
#define GCX_COOP_NO_DTOR_END() __gcHolder.Leave();
#endif
#ifdef ENABLE_CONTRACTS_IMPL
#define GCX_PREEMP() GCPreemp __gcHolder("GCX_PREEMP", __FUNCTION__, __FILE__, __LINE__)
#define GCX_PREEMP_NO_DTOR() GCPreempNoDtor __gcHolder; __gcHolder.Enter(TRUE, "GCX_PREEMP_NO_DTOR", __FUNCTION__, __FILE__, __LINE__)
#define GCX_PREEMP_NO_DTOR_HAVE_THREAD(curThreadNullOk) GCPreempNoDtor __gcHolder; __gcHolder.Enter(curThreadNullOk, TRUE, "GCX_PREEMP_NO_DTOR_HAVE_THREAD", __FUNCTION__, __FILE__, __LINE__)
#define GCX_PREEMP_NO_DTOR_END() __gcHolder.Leave();
#else
#define GCX_PREEMP() GCPreemp __gcHolder
#define GCX_PREEMP_NO_DTOR_HAVE_THREAD(curThreadNullOk) GCPreempNoDtor __gcHolder; __gcHolder.Enter(curThreadNullOk, TRUE)
#define GCX_PREEMP_NO_DTOR() GCPreempNoDtor __gcHolder; __gcHolder.Enter(TRUE)
#define GCX_PREEMP_NO_DTOR_END() __gcHolder.Leave()
#endif
#ifdef ENABLE_CONTRACTS_IMPL
#define GCX_COOP_THREAD_EXISTS(curThread) GCCoopThreadExists __gcHolder((curThread), "GCX_COOP_THREAD_EXISTS", __FUNCTION__, __FILE__, __LINE__)
#else
#define GCX_COOP_THREAD_EXISTS(curThread) GCCoopThreadExists __gcHolder((curThread))
#endif
#ifdef ENABLE_CONTRACTS_IMPL
#define GCX_PREEMP_THREAD_EXISTS(curThread) GCPreempThreadExists __gcHolder((curThread), "GCX_PREEMP_THREAD_EXISTS", __FUNCTION__, __FILE__, __LINE__)
#else
#define GCX_PREEMP_THREAD_EXISTS(curThread) GCPreempThreadExists __gcHolder((curThread))
#endif
#ifdef ENABLE_CONTRACTS_IMPL
#define GCX_MAYBE_COOP(_cond) GCCoop __gcHolder(_cond, "GCX_MAYBE_COOP", __FUNCTION__, __FILE__, __LINE__)
#define GCX_MAYBE_COOP_NO_DTOR(_cond) GCCoopNoDtor __gcHolder; __gcHolder.Enter(_cond, "GCX_MAYBE_COOP_NO_DTOR", __FUNCTION__, __FILE__, __LINE__)
#define GCX_MAYBE_COOP_NO_DTOR_END() __gcHolder.Leave();
#else
#define GCX_MAYBE_COOP(_cond) GCCoop __gcHolder(_cond)
#define GCX_MAYBE_COOP_NO_DTOR(_cond) GCCoopNoDtor __gcHolder; __gcHolder.Enter(_cond)
#define GCX_MAYBE_COOP_NO_DTOR_END() __gcHolder.Leave();
#endif
#ifdef ENABLE_CONTRACTS_IMPL
#define GCX_MAYBE_PREEMP(_cond) GCPreemp __gcHolder(_cond, "GCX_MAYBE_PREEMP", __FUNCTION__, __FILE__, __LINE__)
#define GCX_MAYBE_PREEMP_NO_DTOR(_cond) GCPreempNoDtor __gcHolder; __gcHolder.Enter(_cond, "GCX_MAYBE_PREEMP_NO_DTOR", __FUNCTION__, __FILE__, __LINE__)
#define GCX_MAYBE_PREEMP_NO_DTOR_END() __gcHolder.Leave();
#else
#define GCX_MAYBE_PREEMP(_cond) GCPreemp __gcHolder(_cond)
#define GCX_MAYBE_PREEMP_NO_DTOR(_cond) GCPreempNoDtor __gcHolder; __gcHolder.Enter(_cond)
#define GCX_MAYBE_PREEMP_NO_DTOR_END() __gcHolder.Leave()
#endif
#ifdef ENABLE_CONTRACTS_IMPL
#define GCX_MAYBE_COOP_THREAD_EXISTS(curThread, _cond) GCCoopThreadExists __gcHolder((curThread), (_cond), "GCX_MAYBE_COOP_THREAD_EXISTS", __FUNCTION__, __FILE__, __LINE__)
#else
#define GCX_MAYBE_COOP_THREAD_EXISTS(curThread, _cond) GCCoopThreadExists __gcHolder((curThread), (_cond))
#endif
#ifdef ENABLE_CONTRACTS_IMPL
#define GCX_MAYBE_PREEMP_THREAD_EXISTS(curThread, _cond) GCPreempThreadExists __gcHolder((curThread), (_cond), "GCX_MAYBE_PREEMP_THREAD_EXISTS", __FUNCTION__, __FILE__, __LINE__)
#else
#define GCX_MAYBE_PREEMP_THREAD_EXISTS(curThread, _cond) GCPreempThreadExists __gcHolder((curThread), (_cond))
#endif
// This has a potential race with the GC thread. It is currently
// used for a few cases where (a) we potentially haven't started up the EE yet, or
// (b) we are on a "special thread".
#ifdef ENABLE_CONTRACTS_IMPL
#define GCX_COOP_NO_THREAD_BROKEN() GCCoopHackNoThread __gcHolder("GCX_COOP_NO_THREAD_BROKEN", __FUNCTION__, __FILE__, __LINE__)
#else
#define GCX_COOP_NO_THREAD_BROKEN() GCCoopHackNoThread __gcHolder
#endif
#ifdef ENABLE_CONTRACTS_IMPL
#define GCX_MAYBE_COOP_NO_THREAD_BROKEN(_cond) GCCoopHackNoThread __gcHolder(_cond, "GCX_MAYBE_COOP_NO_THREAD_BROKEN", __FUNCTION__, __FILE__, __LINE__)
#else
#define GCX_MAYBE_COOP_NO_THREAD_BROKEN(_cond) GCCoopHackNoThread __gcHolder(_cond)
#endif
#else // !defined(DACCESS_COMPILE)
#define GCX_COOP()
#define GCX_COOP_NO_DTOR()
#define GCX_COOP_NO_DTOR_END()
#define GCX_PREEMP()
#define GCX_PREEMP_NO_DTOR()
#define GCX_PREEMP_NO_DTOR_HAVE_THREAD(curThreadNullOk)
#define GCX_PREEMP_NO_DTOR_END()
#define GCX_MAYBE_PREEMP(_cond)
#define GCX_COOP_NO_THREAD_BROKEN()
#define GCX_MAYBE_COOP_NO_THREAD_BROKEN(_cond)
#define GCX_PREEMP_THREAD_EXISTS(curThread)
#define GCX_COOP_THREAD_EXISTS(curThread)
#define GCX_POP()
#endif // !defined(DACCESS_COMPILE)
#if defined(_DEBUG_IMPL)
#define GCX_ASSERT_PREEMP() ::AutoCleanupGCAssertPreemp __gcHolder
#define GCX_ASSERT_COOP() ::AutoCleanupGCAssertCoop __gcHolder
#define BEGIN_GCX_ASSERT_COOP \
{ \
GCAssertCoop __gcHolder; \
__gcHolder.BeginGCAssert()
#define END_GCX_ASSERT_COOP \
__gcHolder.EndGCAssert(); \
}
#define BEGIN_GCX_ASSERT_PREEMP \
{ \
GCAssertPreemp __gcHolder; \
__gcHolder.BeginGCAssert()
#define END_GCX_ASSERT_PREEMP \
__gcHolder.EndGCAssert(); \
}
#else
#define GCX_ASSERT_PREEMP()
#define GCX_ASSERT_COOP()
#define BEGIN_GCX_ASSERT_COOP \
{
#define END_GCX_ASSERT_COOP \
}
#define BEGIN_GCX_ASSERT_PREEMP \
{
#define END_GCX_ASSERT_PREEMP \
}
#endif
#ifdef ENABLE_CONTRACTS_IMPL
#define GCX_FORBID() ::GCForbid __gcForbidHolder(__FUNCTION__, __FILE__, __LINE__)
#define GCX_NOTRIGGER() ::GCNoTrigger __gcNoTriggerHolder(__FUNCTION__, __FILE__, __LINE__)
#define GCX_MAYBE_FORBID(fConditional) ::GCForbid __gcForbidHolder(fConditional, __FUNCTION__, __FILE__, __LINE__)
#define GCX_MAYBE_NOTRIGGER(fConditional) ::GCNoTrigger __gcNoTriggerHolder(fConditional, __FUNCTION__, __FILE__, __LINE__)
#else
#define GCX_FORBID()
#define GCX_NOTRIGGER()
#define GCX_MAYBE_FORBID(fConditional)
#define GCX_MAYBE_NOTRIGGER(fConditional)
#endif
typedef BOOL (*FnLockOwner)(LPVOID);
struct LockOwner
{
LPVOID lock;
FnLockOwner lockOwnerFunc;
};
// this is the standard lockowner for things that require a lock owner but which really don't
// need any validation due to their simple/safe semantics
// the classic example of this is a hash table that is initialized and then never grows
extern LockOwner g_lockTrustMeIAmThreadSafe;
// The OS ThreadId is not a stable ID for a thread we a host uses fiber instead of Thread.
// For each managed Thread, we have a stable and unique id in Thread object. For other threads,
// e.g. Server GC or Concurrent GC thread, debugger helper thread, we do not have a Thread object,
// and we use OS ThreadId to identify them since they are not managed by a host.
class EEThreadId
{
private:
void *m_FiberPtrId;
public:
#ifdef _DEBUG
EEThreadId()
: m_FiberPtrId(NULL)
{
LIMITED_METHOD_CONTRACT;
}
#endif
void SetToCurrentThread()
{
WRAPPER_NO_CONTRACT;
m_FiberPtrId = ClrTeb::GetFiberPtrId();
}
bool IsCurrentThread() const
{
WRAPPER_NO_CONTRACT;
return (m_FiberPtrId == ClrTeb::GetFiberPtrId());
}
#ifdef _DEBUG
bool IsUnknown() const
{
LIMITED_METHOD_CONTRACT;
return m_FiberPtrId == NULL;
}
#endif
void Clear()
{
LIMITED_METHOD_CONTRACT;
m_FiberPtrId = NULL;
}
};
#ifndef TARGET_UNIX
HMODULE CLRLoadLibraryEx(LPCWSTR lpLibFileName, HANDLE hFile, DWORD dwFlags);
#endif // !TARGET_UNIX
HMODULE CLRLoadLibrary(LPCWSTR lpLibFileName);
BOOL CLRFreeLibrary(HMODULE hModule);
LPVOID
CLRMapViewOfFile(
IN HANDLE hFileMappingObject,
IN DWORD dwDesiredAccess,
IN DWORD dwFileOffsetHigh,
IN DWORD dwFileOffsetLow,
IN SIZE_T dwNumberOfBytesToMap,
IN LPVOID lpBaseAddress = NULL);
BOOL
CLRUnmapViewOfFile(
IN LPVOID lpBaseAddress
);
#ifndef DACCESS_COMPILE
FORCEINLINE void VoidCLRUnmapViewOfFile(void *ptr) { CLRUnmapViewOfFile(ptr); }
typedef Wrapper<void *, DoNothing, VoidCLRUnmapViewOfFile> CLRMapViewHolder;
#else
typedef Wrapper<void *, DoNothing, DoNothing> CLRMapViewHolder;
#endif
#ifdef TARGET_UNIX
#ifndef DACCESS_COMPILE
FORCEINLINE void VoidPALUnloadPEFile(void *ptr) { PAL_LOADUnloadPEFile(ptr); }
typedef Wrapper<void *, DoNothing, VoidPALUnloadPEFile> PALPEFileHolder;
#else
typedef Wrapper<void *, DoNothing, DoNothing> PALPEFileHolder;
#endif
#endif // TARGET_UNIX
#define SetupThreadForComCall(OOMRetVal) \
MAKE_CURRENT_THREAD_AVAILABLE_EX(GetThreadNULLOk()); \
if (CURRENT_THREAD == NULL) \
{ \
CURRENT_THREAD = SetupThreadNoThrow(); \
if (CURRENT_THREAD == NULL) \
return OOMRetVal; \
} \
#define SetupForComCallHR() SetupThreadForComCall(E_OUTOFMEMORY)
#define SetupForComCallDWORD() SetupThreadForComCall(ERROR_OUTOFMEMORY)
// A holder for NATIVE_LIBRARY_HANDLE.
FORCEINLINE void VoidFreeNativeLibrary(NATIVE_LIBRARY_HANDLE h)
{
WRAPPER_NO_CONTRACT;
if (h == NULL)
return;
#ifdef HOST_UNIX
PAL_FreeLibraryDirect(h);
#else
FreeLibrary(h);
#endif
}
typedef Wrapper<NATIVE_LIBRARY_HANDLE, DoNothing<NATIVE_LIBRARY_HANDLE>, VoidFreeNativeLibrary, NULL> NativeLibraryHandleHolder;
extern thread_local size_t t_CantStopCount;
// For debugging, we can track arbitrary Can't-Stop regions.
// In V1.0, this was on the Thread object, but we need to track this for threads w/o a Thread object.
FORCEINLINE void IncCantStopCount()
{
t_CantStopCount++;
}
FORCEINLINE void DecCantStopCount()
{
t_CantStopCount--;
}
typedef StateHolder<IncCantStopCount, DecCantStopCount> CantStopHolder;
#ifdef _DEBUG
// For debug-only, this can be used w/ a holder to ensure that we're keeping our CS count balanced.
// We should never use this w/ control flow.
inline size_t GetCantStopCount()
{
return t_CantStopCount;
}
// At places where we know we're calling out to native code, we can assert that we're NOT in a CS region.
// This is _debug only since we only use it for asserts; not for real code-flow control in a retail build.
inline bool IsInCantStopRegion()
{
return (GetCantStopCount() > 0);
}
#endif // _DEBUG
BOOL IsValidMethodCodeNotification(USHORT Notification);
typedef DPTR(struct JITNotification) PTR_JITNotification;
struct JITNotification
{
USHORT state; // values from CLRDataMethodCodeNotification
TADDR clrModule;
mdToken methodToken;
JITNotification() { SetFree(); }
BOOL IsFree() { return state == CLRDATA_METHNOTIFY_NONE; }
void SetFree() { state = CLRDATA_METHNOTIFY_NONE; clrModule = NULL; methodToken = 0; }
void SetState(TADDR moduleIn, mdToken tokenIn, USHORT NType)
{
_ASSERTE(IsValidMethodCodeNotification(NType));
clrModule = moduleIn;
methodToken = tokenIn;
state = NType;
}
};
// The maximum number of TADDR sized arguments that the SOS exception notification can use
#define MAX_CLR_NOTIFICATION_ARGS 3
GARY_DECL(size_t, g_clrNotificationArguments, MAX_CLR_NOTIFICATION_ARGS);
extern void InitializeClrNotifications();
GPTR_DECL(JITNotification, g_pNotificationTable);
GVAL_DECL(ULONG32, g_dacNotificationFlags);
#if defined(TARGET_UNIX) && !defined(DACCESS_COMPILE)
inline void
InitializeJITNotificationTable()
{
g_pNotificationTable = new (nothrow) JITNotification[1001];
}
#endif // TARGET_UNIX && !DACCESS_COMPILE
class JITNotifications
{
public:
JITNotifications(JITNotification *jitTable);
BOOL SetNotification(TADDR clrModule, mdToken token, USHORT NType);
USHORT Requested(TADDR clrModule, mdToken token);
// if clrModule is NULL, all active notifications are changed to NType
BOOL SetAllNotifications(TADDR clrModule,USHORT NType,BOOL *changedOut);
inline BOOL IsActive() { LIMITED_METHOD_CONTRACT; return m_jitTable!=NULL; }
UINT GetTableSize();
#ifdef DACCESS_COMPILE
static JITNotification *InitializeNotificationTable(UINT TableSize);
// Updates target table from host copy
BOOL UpdateOutOfProcTable();
#endif
private:
UINT GetLength();
void IncrementLength();
void DecrementLength();
BOOL FindItem(TADDR clrModule, mdToken token, UINT *indexOut);
JITNotification *m_jitTable;
};
typedef DPTR(struct GcNotification) PTR_GcNotification;
inline
BOOL IsValidGcNotification(GcEvt_t evType)
{ return (evType < GC_EVENT_TYPE_MAX); }
#define CLRDATA_GC_NONE 0
struct GcNotification
{
GcEvtArgs ev;
GcNotification() { SetFree(); }
BOOL IsFree() { return ev.typ == CLRDATA_GC_NONE; }
void SetFree() { memset(this, 0, sizeof(*this)); ev.typ = (GcEvt_t) CLRDATA_GC_NONE; }
void Set(GcEvtArgs ev_)
{
_ASSERTE(IsValidGcNotification(ev_.typ));
ev = ev_;
}
BOOL IsMatch(GcEvtArgs ev_)
{
LIMITED_METHOD_CONTRACT;
if (ev.typ != ev_.typ)
{
return FALSE;
}
switch (ev.typ)
{
case GC_MARK_END:
if (ev_.condemnedGeneration == 0 ||
(ev.condemnedGeneration & ev_.condemnedGeneration) != 0)
{
return TRUE;
}
break;
default:
break;
}
return FALSE;
}
};
GPTR_DECL(GcNotification, g_pGcNotificationTable);
class GcNotifications
{
public:
GcNotifications(GcNotification *gcTable);
BOOL SetNotification(GcEvtArgs ev);
GcEvtArgs* GetNotification(GcEvtArgs ev)
{
LIMITED_METHOD_CONTRACT;
UINT idx;
if (FindItem(ev, &idx))
{
return &m_gcTable[idx].ev;
}
else
{
return NULL;
}
}
// if clrModule is NULL, all active notifications are changed to NType
inline BOOL IsActive()
{ return m_gcTable != NULL; }
UINT GetTableSize()
{ return Size(); }
#ifdef DACCESS_COMPILE
static GcNotification *InitializeNotificationTable(UINT TableSize);
// Updates target table from host copy
BOOL UpdateOutOfProcTable();
#endif
private:
UINT& Length()
{
LIMITED_METHOD_CONTRACT;
_ASSERTE(IsActive());
UINT *pLen = (UINT *) &(m_gcTable[-1].ev.typ);
return *pLen;
}
UINT& Size()
{
_ASSERTE(IsActive());
UINT *pLen = (UINT *) &(m_gcTable[-1].ev.typ);
return *(pLen+1);
}
void IncrementLength()
{ ++Length(); }
void DecrementLength()
{ --Length(); }
BOOL FindItem(GcEvtArgs ev, UINT *indexOut);
GcNotification *m_gcTable;
};
class MethodDesc;
class Module;
class DACNotify
{
public:
// types
enum {
MODULE_LOAD_NOTIFICATION=1,
MODULE_UNLOAD_NOTIFICATION=2,
JIT_NOTIFICATION=3,
JIT_PITCHING_NOTIFICATION=4,
EXCEPTION_NOTIFICATION=5,
GC_NOTIFICATION= 6,
CATCH_ENTER_NOTIFICATION = 7,
JIT_NOTIFICATION2=8,
};
// called from the runtime
static void DoJITNotification(MethodDesc *MethodDescPtr, TADDR NativeCodeLocation);
static void DoJITPitchingNotification(MethodDesc *MethodDescPtr);
static void DoModuleLoadNotification(Module *Module);
static void DoModuleUnloadNotification(Module *Module);
static void DoExceptionNotification(class Thread* ThreadPtr);
static void DoGCNotification(const GcEvtArgs& evtargs);
static void DoExceptionCatcherEnterNotification(MethodDesc *MethodDescPtr, DWORD nativeOffset);
// called from the DAC
static int GetType(TADDR Args[]);
static BOOL ParseJITNotification(TADDR Args[], TADDR& MethodDescPtr, TADDR& NativeCodeLocation);
static BOOL ParseJITPitchingNotification(TADDR Args[], TADDR& MethodDescPtr);
static BOOL ParseModuleLoadNotification(TADDR Args[], TADDR& ModulePtr);
static BOOL ParseModuleUnloadNotification(TADDR Args[], TADDR& ModulePtr);
static BOOL ParseExceptionNotification(TADDR Args[], TADDR& ThreadPtr);
static BOOL ParseGCNotification(TADDR Args[], GcEvtArgs& evtargs);
static BOOL ParseExceptionCatcherEnterNotification(TADDR Args[], TADDR& MethodDescPtr, DWORD& nativeOffset);
};
void DACNotifyCompilationFinished(MethodDesc *pMethodDesc);
// These wrap the SString:L:CompareCaseInsenstive function in a way that makes it
// easy to fix code that uses _stricmp. _stricmp should be avoided as it uses the current
// C-runtime locale rather than the invariance culture.
//
// Note that unlike the real _stricmp, these functions unavoidably have a throws/gc_triggers/inject_fault
// contract. So if need a case-insensitive comparison in a place where you can't tolerate this contract,
// you've got a problem.
int __cdecl stricmpUTF8(const char* szStr1, const char* szStr2);
BOOL DbgIsExecutable(LPVOID lpMem, SIZE_T length);
int GetRandomInt(int maxVal);
//
//
// COMCHARACTER
//
//
class COMCharacter {
public:
//These are here for support from native code. They are never called from our managed classes.
static BOOL nativeIsWhiteSpace(WCHAR c);
static BOOL nativeIsDigit(WCHAR c);
};
// ======================================================================================
// Simple, reusable 100ns timer for normalizing ticks. For use in Q/FCalls to avoid discrepency with
// tick frequency between native and managed.
class NormalizedTimer
{
private:
static const int64_t NormalizedTicksPerSecond = 10000000 /* 100ns ticks per second (1e7) */;
static Volatile<double> s_frequency;
LARGE_INTEGER startTimestamp;
LARGE_INTEGER stopTimestamp;
#if _DEBUG
bool isRunning = false;
#endif // _DEBUG
public:
NormalizedTimer()
{
LIMITED_METHOD_CONTRACT;
if (s_frequency.Load() == -1)
{
double frequency;
LARGE_INTEGER qpfValue;
QueryPerformanceFrequency(&qpfValue);
frequency = static_cast<double>(qpfValue.QuadPart);
frequency /= NormalizedTicksPerSecond;
s_frequency.Store(frequency);
}
startTimestamp.QuadPart = 0;
startTimestamp.QuadPart = 0;
}
// ======================================================================================
// Start the timer
inline
void Start()
{
LIMITED_METHOD_CONTRACT;
_ASSERTE(!isRunning);
QueryPerformanceCounter(&startTimestamp);
#if _DEBUG
isRunning = true;
#endif // _DEBUG
}
// ======================================================================================
// stop the timer. If called before starting, sets the start time to the same as the stop
inline
void Stop()
{
LIMITED_METHOD_CONTRACT;
_ASSERTE(isRunning);
QueryPerformanceCounter(&stopTimestamp);
#if _DEBUG
isRunning = false;
#endif // _DEBUG
}
// ======================================================================================
// Return elapsed ticks. This will stop a running timer.
// Will return 0 if called out of order.
// Only recalculated this value if it has been stopped/started since previous calculation.
inline
int64_t Elapsed100nsTicks()
{
LIMITED_METHOD_CONTRACT;
_ASSERTE(!isRunning);
_ASSERTE(startTimestamp.QuadPart > 0);
_ASSERTE(stopTimestamp.QuadPart > 0);
return static_cast<int64_t>((stopTimestamp.QuadPart - startTimestamp.QuadPart) / s_frequency);
}
};
#ifdef _DEBUG
#define FORCEINLINE_NONDEBUG
#else
#define FORCEINLINE_NONDEBUG FORCEINLINE
#endif
#ifndef TARGET_UNIX
// Extract the file version from an executable.
HRESULT GetFileVersion(LPCWSTR wszFilePath, ULARGE_INTEGER* pFileVersion);
#endif // !TARGET_UNIX
#ifdef TARGET_64BIT
// We use modified Daniel Lemire's fastmod algorithm (https://github.com/dotnet/runtime/pull/406),
// which allows to avoid the long multiplication if the divisor is less than 2**31.
// This is a copy of HashHelpers.cs, see that impl (or linked PR) for more details
inline UINT64 GetFastModMultiplier(UINT32 divisor)
{
return UINT64_MAX / divisor + 1;
}
inline UINT32 FastMod(UINT32 value, UINT32 divisor, UINT64 multiplier)
{
return (UINT32)(((((multiplier * value) >> 32) + 1) * divisor) >> 32);
}
#endif
#endif /* _H_UTIL */
| 1 |
dotnet/runtime | 65,926 | FastMod for EEHashTable (Faster virtual generics) | Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| EgorBo | "2022-02-27T11:34:01Z" | "2022-03-01T19:43:37Z" | a5a7836f8193414fe64232d5c386b6402eb70e19 | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | FastMod for EEHashTable (Faster virtual generics). Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| ./src/mono/mono/utils/options.h | /**
* \file Runtime options
*
* Copyright 2020 Microsoft
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#ifndef __MONO_UTILS_FLAGS_H__
#define __MONO_UTILS_FLAGS_H__
#include <config.h>
#include <glib.h>
#include "mono/utils/mono-error.h"
/* Declare list of options */
/* Each option will declare an exported C variable named mono_opt_... */
MONO_BEGIN_DECLS
#define DEFINE_OPTION_FULL(flag_type, ctype, c_name, cmd_name, def_value, comment) \
MONO_API_DATA ctype mono_opt_##c_name;
#define DEFINE_OPTION_READONLY(flag_type, ctype, c_name, cmd_name, def_value, comment) \
static const ctype mono_opt_##c_name = def_value;
#include "options-def.h"
MONO_END_DECLS
void mono_options_print_usage (void);
void mono_options_parse_options (const char **args, int argc, int *out_argc, MonoError *error);
#endif
| /**
* \file Runtime options
*
* Copyright 2020 Microsoft
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#ifndef __MONO_UTILS_FLAGS_H__
#define __MONO_UTILS_FLAGS_H__
#include <config.h>
#include <glib.h>
#include "mono/utils/mono-error.h"
/* Declare list of options */
/* Each option will declare an exported C variable named mono_opt_... */
MONO_BEGIN_DECLS
#define DEFINE_OPTION_FULL(flag_type, ctype, c_name, cmd_name, def_value, comment) \
MONO_API_DATA ctype mono_opt_##c_name;
#define DEFINE_OPTION_READONLY(flag_type, ctype, c_name, cmd_name, def_value, comment) \
static const ctype mono_opt_##c_name = def_value;
#include "options-def.h"
MONO_END_DECLS
void mono_options_print_usage (void);
void mono_options_parse_options (const char **args, int argc, int *out_argc, MonoError *error);
#endif
| -1 |
dotnet/runtime | 65,926 | FastMod for EEHashTable (Faster virtual generics) | Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| EgorBo | "2022-02-27T11:34:01Z" | "2022-03-01T19:43:37Z" | a5a7836f8193414fe64232d5c386b6402eb70e19 | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | FastMod for EEHashTable (Faster virtual generics). Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| ./src/coreclr/inc/corhlprpriv.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*****************************************************************************
** **
** Corhlprpriv.h - **
** **
*****************************************************************************/
#ifndef __CORHLPRPRIV_H__
#define __CORHLPRPRIV_H__
#include "corhlpr.h"
#include "fstring.h"
#if defined(_MSC_VER) && defined(HOST_X86)
#pragma optimize("y", on) // If routines don't get inlined, don't pay the EBP frame penalty
#endif
//*****************************************************************************
//
//***** Utility helpers
//
//*****************************************************************************
#ifndef SOS_INCLUDE
//*****************************************************************************
//
// **** CQuickBytes
// This helper class is useful for cases where 90% of the time you allocate 512
// or less bytes for a data structure. This class contains a 512 byte buffer.
// Alloc() will return a pointer to this buffer if your allocation is small
// enough, otherwise it asks the heap for a larger buffer which is freed for
// you. No mutex locking is required for the small allocation case, making the
// code run faster, less heap fragmentation, etc... Each instance will allocate
// 520 bytes, so use accordinly.
//
//*****************************************************************************
namespace NSQuickBytesHelper
{
template <BOOL bThrow>
struct _AllocBytes;
template <>
struct _AllocBytes<TRUE>
{
static BYTE *Invoke(SIZE_T iItems)
{
return NEW_THROWS(iItems);
}
};
template <>
struct _AllocBytes<FALSE>
{
static BYTE *Invoke(SIZE_T iItems)
{
return NEW_NOTHROW(iItems);
}
};
};
void DECLSPEC_NORETURN ThrowHR(HRESULT hr);
template <SIZE_T SIZE, SIZE_T INCREMENT>
class CQuickMemoryBase
{
protected:
template <typename ELEM_T>
static ELEM_T Min(ELEM_T a, ELEM_T b)
{ return a < b ? a : b; }
template <typename ELEM_T>
static ELEM_T Max(ELEM_T a, ELEM_T b)
{ return a < b ? b : a; }
// bGrow - indicates that this is a resize and that the original data
// needs to be copied over.
// bThrow - indicates whether or not memory allocations will throw.
template <BOOL bGrow, BOOL bThrow>
void *_Alloc(SIZE_T iItems)
{
#if defined(_BLD_CLR) && defined(_DEBUG)
{ // Exercise heap for OOM-fault injection purposes
BYTE * pb = NSQuickBytesHelper::_AllocBytes<bThrow>::Invoke(iItems);
_ASSERTE(!bThrow || pb != NULL); // _AllocBytes would have thrown if bThrow == TRUE
if (pb == NULL) return NULL; // bThrow == FALSE and we failed to allocate memory
delete [] pb; // Success, delete allocated memory.
}
#endif
if (iItems <= cbTotal)
{ // Fits within existing memory allocation
iSize = iItems;
}
else if (iItems <= SIZE)
{ // Will fit in internal buffer.
if (pbBuff == NULL)
{ // Any previous allocation is in the internal buffer and the new
// allocation fits in the internal buffer, so just update the size.
iSize = iItems;
cbTotal = SIZE;
}
else
{ // There was a previous allocation, sitting in pbBuff
if (bGrow)
{ // If growing, need to copy any existing data over.
memcpy(&rgData[0], pbBuff, Min(cbTotal, SIZE));
}
delete [] pbBuff;
pbBuff = NULL;
iSize = iItems;
cbTotal = SIZE;
}
}
else
{ // Need to allocate a new buffer
SIZE_T cbTotalNew = iItems + (bGrow ? INCREMENT : 0);
BYTE * pbBuffNew = NSQuickBytesHelper::_AllocBytes<bThrow>::Invoke(cbTotalNew);
if (!bThrow && pbBuffNew == NULL)
{ // Allocation failed. Zero out structure.
if (pbBuff != NULL)
{ // Delete old buffer
delete [] pbBuff;
}
pbBuff = NULL;
iSize = 0;
cbTotal = 0;
return NULL;
}
if (bGrow && cbTotal > 0)
{ // If growing, need to copy any existing data over.
memcpy(pbBuffNew, (BYTE *)Ptr(), Min(cbTotal, cbTotalNew));
}
if (pbBuff != NULL)
{ // Delete old pre-existing buffer
delete [] pbBuff;
pbBuff = NULL;
}
pbBuff = pbBuffNew;
cbTotal = cbTotalNew;
iSize = iItems;
}
return Ptr();
}
public:
void Init()
{
pbBuff = 0;
iSize = 0;
cbTotal = SIZE;
}
void Destroy()
{
if (pbBuff)
{
delete [] pbBuff;
pbBuff = 0;
}
}
void *AllocThrows(SIZE_T iItems)
{
return _Alloc<FALSE /*bGrow*/, TRUE /*bThrow*/>(iItems);
}
void *AllocNoThrow(SIZE_T iItems)
{
return _Alloc<FALSE /*bGrow*/, FALSE /*bThrow*/>(iItems);
}
void ReSizeThrows(SIZE_T iItems)
{
_Alloc<TRUE /*bGrow*/, TRUE /*bThrow*/>(iItems);
}
#ifdef __GNUC__
// This makes sure that we will not get an undefined symbol
// when building a release version of libcoreclr using LLVM/GCC.
__attribute__((used))
#endif // __GNUC__
HRESULT ReSizeNoThrow(SIZE_T iItems);
void Shrink(SIZE_T iItems)
{
_ASSERTE(iItems <= cbTotal);
iSize = iItems;
}
operator PVOID()
{
return ((pbBuff) ? pbBuff : (PVOID)&rgData[0]);
}
void *Ptr()
{
return ((pbBuff) ? pbBuff : (PVOID)&rgData[0]);
}
const void *Ptr() const
{
return ((pbBuff) ? pbBuff : (PVOID)&rgData[0]);
}
SIZE_T Size() const
{
return (iSize);
}
SIZE_T MaxSize() const
{
return (cbTotal);
}
void Maximize()
{
iSize = cbTotal;
}
// Convert UTF8 string to UNICODE string, optimized for speed
HRESULT ConvertUtf8_UnicodeNoThrow(const char * utf8str)
{
bool allAscii;
DWORD length;
HRESULT hr = FString::Utf8_Unicode_Length(utf8str, & allAscii, & length);
if (SUCCEEDED(hr))
{
LPWSTR buffer = (LPWSTR) AllocNoThrow((length + 1) * sizeof(WCHAR));
if (buffer == NULL)
{
hr = E_OUTOFMEMORY;
}
else
{
hr = FString::Utf8_Unicode(utf8str, allAscii, buffer, length);
}
}
return hr;
}
// Convert UTF8 string to UNICODE string, optimized for speed
void ConvertUtf8_Unicode(const char * utf8str)
{
bool allAscii;
DWORD length;
HRESULT hr = FString::Utf8_Unicode_Length(utf8str, & allAscii, & length);
if (SUCCEEDED(hr))
{
LPWSTR buffer = (LPWSTR) AllocThrows((length + 1) * sizeof(WCHAR));
hr = FString::Utf8_Unicode(utf8str, allAscii, buffer, length);
}
if (FAILED(hr))
{
ThrowHR(hr);
}
}
// Convert UNICODE string to UTF8 string, optimized for speed
void ConvertUnicode_Utf8(const WCHAR * pString)
{
bool allAscii;
DWORD length;
HRESULT hr = FString::Unicode_Utf8_Length(pString, & allAscii, & length);
if (SUCCEEDED(hr))
{
LPSTR buffer = (LPSTR) AllocThrows((length + 1) * sizeof(char));
hr = FString::Unicode_Utf8(pString, allAscii, buffer, length);
}
if (FAILED(hr))
{
ThrowHR(hr);
}
}
// Copy single byte string and hold it
const char * SetStringNoThrow(const char * pStr, SIZE_T len)
{
LPSTR buffer = (LPSTR) AllocNoThrow(len + 1);
if (buffer != NULL)
{
memcpy(buffer, pStr, len);
buffer[len] = 0;
}
return buffer;
}
#ifdef DACCESS_COMPILE
void
EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
{
// Assume that 'this' is enumerated, either explicitly
// or because this class is embedded in another.
DacEnumMemoryRegion(dac_cast<TADDR>(pbBuff), iSize);
}
#endif // DACCESS_COMPILE
BYTE *pbBuff;
SIZE_T iSize; // number of bytes used
SIZE_T cbTotal; // total bytes allocated in the buffer
// use UINT64 to enforce the alignment of the memory
UINT64 rgData[(SIZE+sizeof(UINT64)-1)/sizeof(UINT64)];
};
// These should be multiples of 8 so that data can be naturally aligned.
#define CQUICKBYTES_BASE_SIZE 512
#define CQUICKBYTES_INCREMENTAL_SIZE 128
class CQuickBytesBase : public CQuickMemoryBase<CQUICKBYTES_BASE_SIZE, CQUICKBYTES_INCREMENTAL_SIZE>
{
};
class CQuickBytes : public CQuickBytesBase
{
public:
CQuickBytes()
{
Init();
}
~CQuickBytes()
{
Destroy();
}
};
/* to be used as static variable - no constructor/destructor, assumes zero
initialized memory */
class CQuickBytesStatic : public CQuickBytesBase
{
};
template <SIZE_T CQUICKBYTES_BASE_SPECIFY_SIZE>
class CQuickBytesSpecifySizeBase : public CQuickMemoryBase<CQUICKBYTES_BASE_SPECIFY_SIZE, CQUICKBYTES_INCREMENTAL_SIZE>
{
};
template <SIZE_T CQUICKBYTES_BASE_SPECIFY_SIZE>
class CQuickBytesSpecifySize : public CQuickBytesSpecifySizeBase<CQUICKBYTES_BASE_SPECIFY_SIZE>
{
public:
CQuickBytesSpecifySize()
{
this->Init();
}
~CQuickBytesSpecifySize()
{
this->Destroy();
}
};
/* to be used as static variable - no constructor/destructor, assumes zero
initialized memory */
template <SIZE_T CQUICKBYTES_BASE_SPECIFY_SIZE>
class CQuickBytesSpecifySizeStatic : public CQuickBytesSpecifySizeBase<CQUICKBYTES_BASE_SPECIFY_SIZE>
{
};
template <class T> class CQuickArrayBase : public CQuickBytesBase
{
public:
T* AllocThrows(SIZE_T iItems)
{
CheckOverflowThrows(iItems);
return (T*)CQuickBytesBase::AllocThrows(iItems * sizeof(T));
}
void ReSizeThrows(SIZE_T iItems)
{
CheckOverflowThrows(iItems);
CQuickBytesBase::ReSizeThrows(iItems * sizeof(T));
}
T* AllocNoThrow(SIZE_T iItems)
{
if (!CheckOverflowNoThrow(iItems))
{
return NULL;
}
return (T*)CQuickBytesBase::AllocNoThrow(iItems * sizeof(T));
}
HRESULT ReSizeNoThrow(SIZE_T iItems)
{
if (!CheckOverflowNoThrow(iItems))
{
return E_OUTOFMEMORY;
}
return CQuickBytesBase::ReSizeNoThrow(iItems * sizeof(T));
}
void Shrink(SIZE_T iItems)
{
CQuickBytesBase::Shrink(iItems * sizeof(T));
}
T* Ptr()
{
return (T*) CQuickBytesBase::Ptr();
}
const T* Ptr() const
{
return (T*) CQuickBytesBase::Ptr();
}
SIZE_T Size() const
{
return CQuickBytesBase::Size() / sizeof(T);
}
SIZE_T MaxSize() const
{
return CQuickBytesBase::cbTotal / sizeof(T);
}
T& operator[] (SIZE_T ix)
{
_ASSERTE(ix < Size());
return *(Ptr() + ix);
}
const T& operator[] (SIZE_T ix) const
{
_ASSERTE(ix < Size());
return *(Ptr() + ix);
}
private:
inline
BOOL CheckOverflowNoThrow(SIZE_T iItems)
{
SIZE_T totalSize = iItems * sizeof(T);
if (totalSize / sizeof(T) != iItems)
{
return FALSE;
}
return TRUE;
}
inline
void CheckOverflowThrows(SIZE_T iItems)
{
if (!CheckOverflowNoThrow(iItems))
{
THROW_OUT_OF_MEMORY();
}
}
};
template <class T> class CQuickArray : public CQuickArrayBase<T>
{
public:
CQuickArray<T>()
{
this->Init();
}
~CQuickArray<T>()
{
this->Destroy();
}
};
// This is actually more of a stack with array access. Essentially, you can
// only add elements through Push and remove them through Pop, but you can
// access and modify any random element with the index operator. You cannot
// access elements that have not been added.
template <class T>
class CQuickArrayList : protected CQuickArray<T>
{
private:
SIZE_T m_curSize;
public:
// Make these specific functions public.
using CQuickArray<T>::AllocThrows;
using CQuickArray<T>::ReSizeThrows;
using CQuickArray<T>::AllocNoThrow;
using CQuickArray<T>::ReSizeNoThrow;
using CQuickArray<T>::MaxSize;
using CQuickArray<T>::Ptr;
CQuickArrayList()
: m_curSize(0)
{
this->Init();
}
~CQuickArrayList()
{
this->Destroy();
}
// Can only access values that have been pushed.
T& operator[] (SIZE_T ix)
{
_ASSERTE(ix < m_curSize);
return CQuickArray<T>::operator[](ix);
}
// Can only access values that have been pushed.
const T& operator[] (SIZE_T ix) const
{
_ASSERTE(ix < m_curSize);
return CQuickArray<T>::operator[](ix);
}
// THROWS: Resizes if necessary.
void Push(const T & value)
{
// Resize if necessary - thows.
if (m_curSize + 1 >= CQuickArray<T>::Size())
ReSizeThrows((m_curSize + 1) * 2);
// Append element to end of array.
_ASSERTE(m_curSize + 1 < CQuickArray<T>::Size());
SIZE_T ix = m_curSize++;
(*this)[ix] = value;
}
// NOTHROW: Resizes if necessary.
BOOL PushNoThrow(const T & value)
{
// Resize if necessary - nothow.
if (m_curSize + 1 >= CQuickArray<T>::Size()) {
if (ReSizeNoThrow((m_curSize + 1) * 2) != NOERROR)
return FALSE;
}
// Append element to end of array.
_ASSERTE(m_curSize + 1 < CQuickArray<T>::Size());
SIZE_T ix = m_curSize++;
(*this)[ix] = value;
return TRUE;
}
T Pop()
{
_ASSERTE(m_curSize > 0);
T retval = (*this)[m_curSize - 1];
INDEBUG(ZeroMemory(&(this->Ptr()[m_curSize - 1]), sizeof(T));)
--m_curSize;
return retval;
}
SIZE_T Size() const
{
return m_curSize;
}
void Shrink()
{
CQuickArray<T>::Shrink(m_curSize);
}
};
/* to be used as static variable - no constructor/destructor, assumes zero
initialized memory */
template <class T> class CQuickArrayStatic : public CQuickArrayBase<T>
{
};
typedef CQuickArrayBase<WCHAR> CQuickWSTRBase;
typedef CQuickArray<WCHAR> CQuickWSTR;
typedef CQuickArrayStatic<WCHAR> CQuickWSTRStatic;
typedef CQuickArrayBase<CHAR> CQuickSTRBase;
typedef CQuickArray<CHAR> CQuickSTR;
typedef CQuickArrayStatic<CHAR> CQuickSTRStatic;
class RidBitmap
{
public:
HRESULT InsertToken(mdToken token)
{
HRESULT hr = S_OK;
mdToken rid = RidFromToken(token);
SIZE_T index = rid / 8;
BYTE bit = (1 << (rid % 8));
if (index >= buffer.Size())
{
SIZE_T oldSize = buffer.Size();
SIZE_T newSize = index+1+oldSize/8;
IfFailRet(buffer.ReSizeNoThrow(newSize));
memset(&buffer[oldSize], 0, newSize-oldSize);
}
buffer[index] |= bit;
return hr;
}
bool IsTokenInBitmap(mdToken token)
{
mdToken rid = RidFromToken(token);
SIZE_T index = rid / 8;
BYTE bit = (1 << (rid % 8));
return ((index < buffer.Size()) && (buffer[index] & bit));
}
void Reset()
{
if (buffer.Size())
{
memset(&buffer[0], 0, buffer.Size());
}
}
private:
CQuickArray<BYTE> buffer;
};
//*****************************************************************************
//
//***** Signature helpers
//
//*****************************************************************************
HRESULT _CountBytesOfOneArg(
PCCOR_SIGNATURE pbSig,
ULONG *pcbTotal);
HRESULT _GetFixedSigOfVarArg( // S_OK or error.
PCCOR_SIGNATURE pvSigBlob, // [IN] point to a blob of CLR signature
ULONG cbSigBlob, // [IN] size of signature
CQuickBytes *pqbSig, // [OUT] output buffer for fixed part of VarArg Signature
ULONG *pcbSigBlob); // [OUT] number of bytes written to the above output buffer
#endif //!SOS_INCLUDE
#if defined(_MSC_VER) && defined(TARGET_X86)
#pragma optimize("", on) // restore command line default optimizations
#endif
//---------------------------------------------------------------------------------------
//
// Reads compressed integer from buffer pData, fills the result to *pnDataOut. Advances buffer pointer.
// Doesn't read behind the end of the buffer (the end starts at pDataEnd).
//
inline
__checkReturn
HRESULT
CorSigUncompressData_EndPtr(
PCCOR_SIGNATURE & pData, // [IN,OUT] Buffer
PCCOR_SIGNATURE pDataEnd, // End of buffer
DWORD * pnDataOut) // [OUT] Compressed integer read from the buffer
{
_ASSERTE(pData <= pDataEnd);
HRESULT hr = S_OK;
INT_PTR cbDataSize = pDataEnd - pData;
if (cbDataSize > 4)
{ // Compressed integer cannot be bigger than 4 bytes
cbDataSize = 4;
}
DWORD dwDataSize = (DWORD)cbDataSize;
ULONG cbDataOutLength;
IfFailRet(CorSigUncompressData(
pData,
dwDataSize,
pnDataOut,
&cbDataOutLength));
pData += cbDataOutLength;
return hr;
} // CorSigUncompressData_EndPtr
//---------------------------------------------------------------------------------------
//
// Reads CorElementType (1 byte) from buffer pData, fills the result to *pTypeOut. Advances buffer pointer.
// Doesn't read behind the end of the buffer (the end starts at pDataEnd).
//
inline
__checkReturn
HRESULT
CorSigUncompressElementType_EndPtr(
PCCOR_SIGNATURE & pData, // [IN,OUT] Buffer
PCCOR_SIGNATURE pDataEnd, // End of buffer
CorElementType * pTypeOut) // [OUT] ELEMENT_TYPE_* value read from the buffer
{
_ASSERTE(pData <= pDataEnd);
// We don't expect pData > pDataEnd, but the runtime check doesn't cost much and it is more secure in
// case caller has a bug
if (pData >= pDataEnd)
{ // No data
return META_E_BAD_SIGNATURE;
}
// Read 'type' as 1 byte
*pTypeOut = (CorElementType)*pData;
pData++;
return S_OK;
} // CorSigUncompressElementType_EndPtr
//---------------------------------------------------------------------------------------
//
// Reads pointer (4/8 bytes) from buffer pData, fills the result to *ppvPointerOut. Advances buffer pointer.
// Doesn't read behind the end of the buffer (the end starts at pDataEnd).
//
inline
__checkReturn
HRESULT
CorSigUncompressPointer_EndPtr(
PCCOR_SIGNATURE & pData, // [IN,OUT] Buffer
PCCOR_SIGNATURE pDataEnd, // End of buffer
void ** ppvPointerOut) // [OUT] Pointer value read from the buffer
{
_ASSERTE(pData <= pDataEnd);
// We could just skip this check as pointers should be only in trusted (and therefore correct)
// signatures and we check for that on the caller side, but it won't hurt to have this check and it will
// make it easier to catch invalid signatures in trusted code (e.g. IL stubs, NGEN images, etc.)
if (pData + sizeof(void *) > pDataEnd)
{ // Not enough data in the buffer
_ASSERTE(!"This signature is invalid. Note that caller should check that it is not comming from untrusted source!");
return META_E_BAD_SIGNATURE;
}
*ppvPointerOut = *(void * UNALIGNED *)pData;
pData += sizeof(void *);
return S_OK;
} // CorSigUncompressPointer_EndPtr
//---------------------------------------------------------------------------------------
//
// Reads compressed TypeDef/TypeRef/TypeSpec token, fills the result to *pnDataOut. Advances buffer pointer.
// Doesn't read behind the end of the buffer (the end starts at pDataEnd).
//
inline
__checkReturn
HRESULT
CorSigUncompressToken_EndPtr(
PCCOR_SIGNATURE & pData, // [IN,OUT] Buffer
PCCOR_SIGNATURE pDataEnd, // End of buffer
mdToken * ptkTokenOut) // [OUT] Token read from the buffer
{
_ASSERTE(pData <= pDataEnd);
HRESULT hr = S_OK;
INT_PTR cbDataSize = pDataEnd - pData;
if (cbDataSize > 4)
{ // Compressed token cannot be bigger than 4 bytes
cbDataSize = 4;
}
DWORD dwDataSize = (DWORD)cbDataSize;
uint32_t cbTokenOutLength;
IfFailRet(CorSigUncompressToken(
pData,
dwDataSize,
ptkTokenOut,
&cbTokenOutLength));
pData += cbTokenOutLength;
return hr;
} // CorSigUncompressToken_EndPtr
#endif // __CORHLPRPRIV_H__
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*****************************************************************************
** **
** Corhlprpriv.h - **
** **
*****************************************************************************/
#ifndef __CORHLPRPRIV_H__
#define __CORHLPRPRIV_H__
#include "corhlpr.h"
#include "fstring.h"
#if defined(_MSC_VER) && defined(HOST_X86)
#pragma optimize("y", on) // If routines don't get inlined, don't pay the EBP frame penalty
#endif
//*****************************************************************************
//
//***** Utility helpers
//
//*****************************************************************************
#ifndef SOS_INCLUDE
//*****************************************************************************
//
// **** CQuickBytes
// This helper class is useful for cases where 90% of the time you allocate 512
// or less bytes for a data structure. This class contains a 512 byte buffer.
// Alloc() will return a pointer to this buffer if your allocation is small
// enough, otherwise it asks the heap for a larger buffer which is freed for
// you. No mutex locking is required for the small allocation case, making the
// code run faster, less heap fragmentation, etc... Each instance will allocate
// 520 bytes, so use accordinly.
//
//*****************************************************************************
namespace NSQuickBytesHelper
{
template <BOOL bThrow>
struct _AllocBytes;
template <>
struct _AllocBytes<TRUE>
{
static BYTE *Invoke(SIZE_T iItems)
{
return NEW_THROWS(iItems);
}
};
template <>
struct _AllocBytes<FALSE>
{
static BYTE *Invoke(SIZE_T iItems)
{
return NEW_NOTHROW(iItems);
}
};
};
void DECLSPEC_NORETURN ThrowHR(HRESULT hr);
template <SIZE_T SIZE, SIZE_T INCREMENT>
class CQuickMemoryBase
{
protected:
template <typename ELEM_T>
static ELEM_T Min(ELEM_T a, ELEM_T b)
{ return a < b ? a : b; }
template <typename ELEM_T>
static ELEM_T Max(ELEM_T a, ELEM_T b)
{ return a < b ? b : a; }
// bGrow - indicates that this is a resize and that the original data
// needs to be copied over.
// bThrow - indicates whether or not memory allocations will throw.
template <BOOL bGrow, BOOL bThrow>
void *_Alloc(SIZE_T iItems)
{
#if defined(_BLD_CLR) && defined(_DEBUG)
{ // Exercise heap for OOM-fault injection purposes
BYTE * pb = NSQuickBytesHelper::_AllocBytes<bThrow>::Invoke(iItems);
_ASSERTE(!bThrow || pb != NULL); // _AllocBytes would have thrown if bThrow == TRUE
if (pb == NULL) return NULL; // bThrow == FALSE and we failed to allocate memory
delete [] pb; // Success, delete allocated memory.
}
#endif
if (iItems <= cbTotal)
{ // Fits within existing memory allocation
iSize = iItems;
}
else if (iItems <= SIZE)
{ // Will fit in internal buffer.
if (pbBuff == NULL)
{ // Any previous allocation is in the internal buffer and the new
// allocation fits in the internal buffer, so just update the size.
iSize = iItems;
cbTotal = SIZE;
}
else
{ // There was a previous allocation, sitting in pbBuff
if (bGrow)
{ // If growing, need to copy any existing data over.
memcpy(&rgData[0], pbBuff, Min(cbTotal, SIZE));
}
delete [] pbBuff;
pbBuff = NULL;
iSize = iItems;
cbTotal = SIZE;
}
}
else
{ // Need to allocate a new buffer
SIZE_T cbTotalNew = iItems + (bGrow ? INCREMENT : 0);
BYTE * pbBuffNew = NSQuickBytesHelper::_AllocBytes<bThrow>::Invoke(cbTotalNew);
if (!bThrow && pbBuffNew == NULL)
{ // Allocation failed. Zero out structure.
if (pbBuff != NULL)
{ // Delete old buffer
delete [] pbBuff;
}
pbBuff = NULL;
iSize = 0;
cbTotal = 0;
return NULL;
}
if (bGrow && cbTotal > 0)
{ // If growing, need to copy any existing data over.
memcpy(pbBuffNew, (BYTE *)Ptr(), Min(cbTotal, cbTotalNew));
}
if (pbBuff != NULL)
{ // Delete old pre-existing buffer
delete [] pbBuff;
pbBuff = NULL;
}
pbBuff = pbBuffNew;
cbTotal = cbTotalNew;
iSize = iItems;
}
return Ptr();
}
public:
void Init()
{
pbBuff = 0;
iSize = 0;
cbTotal = SIZE;
}
void Destroy()
{
if (pbBuff)
{
delete [] pbBuff;
pbBuff = 0;
}
}
void *AllocThrows(SIZE_T iItems)
{
return _Alloc<FALSE /*bGrow*/, TRUE /*bThrow*/>(iItems);
}
void *AllocNoThrow(SIZE_T iItems)
{
return _Alloc<FALSE /*bGrow*/, FALSE /*bThrow*/>(iItems);
}
void ReSizeThrows(SIZE_T iItems)
{
_Alloc<TRUE /*bGrow*/, TRUE /*bThrow*/>(iItems);
}
#ifdef __GNUC__
// This makes sure that we will not get an undefined symbol
// when building a release version of libcoreclr using LLVM/GCC.
__attribute__((used))
#endif // __GNUC__
HRESULT ReSizeNoThrow(SIZE_T iItems);
void Shrink(SIZE_T iItems)
{
_ASSERTE(iItems <= cbTotal);
iSize = iItems;
}
operator PVOID()
{
return ((pbBuff) ? pbBuff : (PVOID)&rgData[0]);
}
void *Ptr()
{
return ((pbBuff) ? pbBuff : (PVOID)&rgData[0]);
}
const void *Ptr() const
{
return ((pbBuff) ? pbBuff : (PVOID)&rgData[0]);
}
SIZE_T Size() const
{
return (iSize);
}
SIZE_T MaxSize() const
{
return (cbTotal);
}
void Maximize()
{
iSize = cbTotal;
}
// Convert UTF8 string to UNICODE string, optimized for speed
HRESULT ConvertUtf8_UnicodeNoThrow(const char * utf8str)
{
bool allAscii;
DWORD length;
HRESULT hr = FString::Utf8_Unicode_Length(utf8str, & allAscii, & length);
if (SUCCEEDED(hr))
{
LPWSTR buffer = (LPWSTR) AllocNoThrow((length + 1) * sizeof(WCHAR));
if (buffer == NULL)
{
hr = E_OUTOFMEMORY;
}
else
{
hr = FString::Utf8_Unicode(utf8str, allAscii, buffer, length);
}
}
return hr;
}
// Convert UTF8 string to UNICODE string, optimized for speed
void ConvertUtf8_Unicode(const char * utf8str)
{
bool allAscii;
DWORD length;
HRESULT hr = FString::Utf8_Unicode_Length(utf8str, & allAscii, & length);
if (SUCCEEDED(hr))
{
LPWSTR buffer = (LPWSTR) AllocThrows((length + 1) * sizeof(WCHAR));
hr = FString::Utf8_Unicode(utf8str, allAscii, buffer, length);
}
if (FAILED(hr))
{
ThrowHR(hr);
}
}
// Convert UNICODE string to UTF8 string, optimized for speed
void ConvertUnicode_Utf8(const WCHAR * pString)
{
bool allAscii;
DWORD length;
HRESULT hr = FString::Unicode_Utf8_Length(pString, & allAscii, & length);
if (SUCCEEDED(hr))
{
LPSTR buffer = (LPSTR) AllocThrows((length + 1) * sizeof(char));
hr = FString::Unicode_Utf8(pString, allAscii, buffer, length);
}
if (FAILED(hr))
{
ThrowHR(hr);
}
}
// Copy single byte string and hold it
const char * SetStringNoThrow(const char * pStr, SIZE_T len)
{
LPSTR buffer = (LPSTR) AllocNoThrow(len + 1);
if (buffer != NULL)
{
memcpy(buffer, pStr, len);
buffer[len] = 0;
}
return buffer;
}
#ifdef DACCESS_COMPILE
void
EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
{
// Assume that 'this' is enumerated, either explicitly
// or because this class is embedded in another.
DacEnumMemoryRegion(dac_cast<TADDR>(pbBuff), iSize);
}
#endif // DACCESS_COMPILE
BYTE *pbBuff;
SIZE_T iSize; // number of bytes used
SIZE_T cbTotal; // total bytes allocated in the buffer
// use UINT64 to enforce the alignment of the memory
UINT64 rgData[(SIZE+sizeof(UINT64)-1)/sizeof(UINT64)];
};
// These should be multiples of 8 so that data can be naturally aligned.
#define CQUICKBYTES_BASE_SIZE 512
#define CQUICKBYTES_INCREMENTAL_SIZE 128
class CQuickBytesBase : public CQuickMemoryBase<CQUICKBYTES_BASE_SIZE, CQUICKBYTES_INCREMENTAL_SIZE>
{
};
class CQuickBytes : public CQuickBytesBase
{
public:
CQuickBytes()
{
Init();
}
~CQuickBytes()
{
Destroy();
}
};
/* to be used as static variable - no constructor/destructor, assumes zero
initialized memory */
class CQuickBytesStatic : public CQuickBytesBase
{
};
template <SIZE_T CQUICKBYTES_BASE_SPECIFY_SIZE>
class CQuickBytesSpecifySizeBase : public CQuickMemoryBase<CQUICKBYTES_BASE_SPECIFY_SIZE, CQUICKBYTES_INCREMENTAL_SIZE>
{
};
template <SIZE_T CQUICKBYTES_BASE_SPECIFY_SIZE>
class CQuickBytesSpecifySize : public CQuickBytesSpecifySizeBase<CQUICKBYTES_BASE_SPECIFY_SIZE>
{
public:
CQuickBytesSpecifySize()
{
this->Init();
}
~CQuickBytesSpecifySize()
{
this->Destroy();
}
};
/* to be used as static variable - no constructor/destructor, assumes zero
initialized memory */
template <SIZE_T CQUICKBYTES_BASE_SPECIFY_SIZE>
class CQuickBytesSpecifySizeStatic : public CQuickBytesSpecifySizeBase<CQUICKBYTES_BASE_SPECIFY_SIZE>
{
};
template <class T> class CQuickArrayBase : public CQuickBytesBase
{
public:
T* AllocThrows(SIZE_T iItems)
{
CheckOverflowThrows(iItems);
return (T*)CQuickBytesBase::AllocThrows(iItems * sizeof(T));
}
void ReSizeThrows(SIZE_T iItems)
{
CheckOverflowThrows(iItems);
CQuickBytesBase::ReSizeThrows(iItems * sizeof(T));
}
T* AllocNoThrow(SIZE_T iItems)
{
if (!CheckOverflowNoThrow(iItems))
{
return NULL;
}
return (T*)CQuickBytesBase::AllocNoThrow(iItems * sizeof(T));
}
HRESULT ReSizeNoThrow(SIZE_T iItems)
{
if (!CheckOverflowNoThrow(iItems))
{
return E_OUTOFMEMORY;
}
return CQuickBytesBase::ReSizeNoThrow(iItems * sizeof(T));
}
void Shrink(SIZE_T iItems)
{
CQuickBytesBase::Shrink(iItems * sizeof(T));
}
T* Ptr()
{
return (T*) CQuickBytesBase::Ptr();
}
const T* Ptr() const
{
return (T*) CQuickBytesBase::Ptr();
}
SIZE_T Size() const
{
return CQuickBytesBase::Size() / sizeof(T);
}
SIZE_T MaxSize() const
{
return CQuickBytesBase::cbTotal / sizeof(T);
}
T& operator[] (SIZE_T ix)
{
_ASSERTE(ix < Size());
return *(Ptr() + ix);
}
const T& operator[] (SIZE_T ix) const
{
_ASSERTE(ix < Size());
return *(Ptr() + ix);
}
private:
inline
BOOL CheckOverflowNoThrow(SIZE_T iItems)
{
SIZE_T totalSize = iItems * sizeof(T);
if (totalSize / sizeof(T) != iItems)
{
return FALSE;
}
return TRUE;
}
inline
void CheckOverflowThrows(SIZE_T iItems)
{
if (!CheckOverflowNoThrow(iItems))
{
THROW_OUT_OF_MEMORY();
}
}
};
template <class T> class CQuickArray : public CQuickArrayBase<T>
{
public:
CQuickArray<T>()
{
this->Init();
}
~CQuickArray<T>()
{
this->Destroy();
}
};
// This is actually more of a stack with array access. Essentially, you can
// only add elements through Push and remove them through Pop, but you can
// access and modify any random element with the index operator. You cannot
// access elements that have not been added.
template <class T>
class CQuickArrayList : protected CQuickArray<T>
{
private:
SIZE_T m_curSize;
public:
// Make these specific functions public.
using CQuickArray<T>::AllocThrows;
using CQuickArray<T>::ReSizeThrows;
using CQuickArray<T>::AllocNoThrow;
using CQuickArray<T>::ReSizeNoThrow;
using CQuickArray<T>::MaxSize;
using CQuickArray<T>::Ptr;
CQuickArrayList()
: m_curSize(0)
{
this->Init();
}
~CQuickArrayList()
{
this->Destroy();
}
// Can only access values that have been pushed.
T& operator[] (SIZE_T ix)
{
_ASSERTE(ix < m_curSize);
return CQuickArray<T>::operator[](ix);
}
// Can only access values that have been pushed.
const T& operator[] (SIZE_T ix) const
{
_ASSERTE(ix < m_curSize);
return CQuickArray<T>::operator[](ix);
}
// THROWS: Resizes if necessary.
void Push(const T & value)
{
// Resize if necessary - thows.
if (m_curSize + 1 >= CQuickArray<T>::Size())
ReSizeThrows((m_curSize + 1) * 2);
// Append element to end of array.
_ASSERTE(m_curSize + 1 < CQuickArray<T>::Size());
SIZE_T ix = m_curSize++;
(*this)[ix] = value;
}
// NOTHROW: Resizes if necessary.
BOOL PushNoThrow(const T & value)
{
// Resize if necessary - nothow.
if (m_curSize + 1 >= CQuickArray<T>::Size()) {
if (ReSizeNoThrow((m_curSize + 1) * 2) != NOERROR)
return FALSE;
}
// Append element to end of array.
_ASSERTE(m_curSize + 1 < CQuickArray<T>::Size());
SIZE_T ix = m_curSize++;
(*this)[ix] = value;
return TRUE;
}
T Pop()
{
_ASSERTE(m_curSize > 0);
T retval = (*this)[m_curSize - 1];
INDEBUG(ZeroMemory(&(this->Ptr()[m_curSize - 1]), sizeof(T));)
--m_curSize;
return retval;
}
SIZE_T Size() const
{
return m_curSize;
}
void Shrink()
{
CQuickArray<T>::Shrink(m_curSize);
}
};
/* to be used as static variable - no constructor/destructor, assumes zero
initialized memory */
template <class T> class CQuickArrayStatic : public CQuickArrayBase<T>
{
};
typedef CQuickArrayBase<WCHAR> CQuickWSTRBase;
typedef CQuickArray<WCHAR> CQuickWSTR;
typedef CQuickArrayStatic<WCHAR> CQuickWSTRStatic;
typedef CQuickArrayBase<CHAR> CQuickSTRBase;
typedef CQuickArray<CHAR> CQuickSTR;
typedef CQuickArrayStatic<CHAR> CQuickSTRStatic;
class RidBitmap
{
public:
HRESULT InsertToken(mdToken token)
{
HRESULT hr = S_OK;
mdToken rid = RidFromToken(token);
SIZE_T index = rid / 8;
BYTE bit = (1 << (rid % 8));
if (index >= buffer.Size())
{
SIZE_T oldSize = buffer.Size();
SIZE_T newSize = index+1+oldSize/8;
IfFailRet(buffer.ReSizeNoThrow(newSize));
memset(&buffer[oldSize], 0, newSize-oldSize);
}
buffer[index] |= bit;
return hr;
}
bool IsTokenInBitmap(mdToken token)
{
mdToken rid = RidFromToken(token);
SIZE_T index = rid / 8;
BYTE bit = (1 << (rid % 8));
return ((index < buffer.Size()) && (buffer[index] & bit));
}
void Reset()
{
if (buffer.Size())
{
memset(&buffer[0], 0, buffer.Size());
}
}
private:
CQuickArray<BYTE> buffer;
};
//*****************************************************************************
//
//***** Signature helpers
//
//*****************************************************************************
HRESULT _CountBytesOfOneArg(
PCCOR_SIGNATURE pbSig,
ULONG *pcbTotal);
HRESULT _GetFixedSigOfVarArg( // S_OK or error.
PCCOR_SIGNATURE pvSigBlob, // [IN] point to a blob of CLR signature
ULONG cbSigBlob, // [IN] size of signature
CQuickBytes *pqbSig, // [OUT] output buffer for fixed part of VarArg Signature
ULONG *pcbSigBlob); // [OUT] number of bytes written to the above output buffer
#endif //!SOS_INCLUDE
#if defined(_MSC_VER) && defined(TARGET_X86)
#pragma optimize("", on) // restore command line default optimizations
#endif
//---------------------------------------------------------------------------------------
//
// Reads compressed integer from buffer pData, fills the result to *pnDataOut. Advances buffer pointer.
// Doesn't read behind the end of the buffer (the end starts at pDataEnd).
//
inline
__checkReturn
HRESULT
CorSigUncompressData_EndPtr(
PCCOR_SIGNATURE & pData, // [IN,OUT] Buffer
PCCOR_SIGNATURE pDataEnd, // End of buffer
DWORD * pnDataOut) // [OUT] Compressed integer read from the buffer
{
_ASSERTE(pData <= pDataEnd);
HRESULT hr = S_OK;
INT_PTR cbDataSize = pDataEnd - pData;
if (cbDataSize > 4)
{ // Compressed integer cannot be bigger than 4 bytes
cbDataSize = 4;
}
DWORD dwDataSize = (DWORD)cbDataSize;
ULONG cbDataOutLength;
IfFailRet(CorSigUncompressData(
pData,
dwDataSize,
pnDataOut,
&cbDataOutLength));
pData += cbDataOutLength;
return hr;
} // CorSigUncompressData_EndPtr
//---------------------------------------------------------------------------------------
//
// Reads CorElementType (1 byte) from buffer pData, fills the result to *pTypeOut. Advances buffer pointer.
// Doesn't read behind the end of the buffer (the end starts at pDataEnd).
//
inline
__checkReturn
HRESULT
CorSigUncompressElementType_EndPtr(
PCCOR_SIGNATURE & pData, // [IN,OUT] Buffer
PCCOR_SIGNATURE pDataEnd, // End of buffer
CorElementType * pTypeOut) // [OUT] ELEMENT_TYPE_* value read from the buffer
{
_ASSERTE(pData <= pDataEnd);
// We don't expect pData > pDataEnd, but the runtime check doesn't cost much and it is more secure in
// case caller has a bug
if (pData >= pDataEnd)
{ // No data
return META_E_BAD_SIGNATURE;
}
// Read 'type' as 1 byte
*pTypeOut = (CorElementType)*pData;
pData++;
return S_OK;
} // CorSigUncompressElementType_EndPtr
//---------------------------------------------------------------------------------------
//
// Reads pointer (4/8 bytes) from buffer pData, fills the result to *ppvPointerOut. Advances buffer pointer.
// Doesn't read behind the end of the buffer (the end starts at pDataEnd).
//
inline
__checkReturn
HRESULT
CorSigUncompressPointer_EndPtr(
PCCOR_SIGNATURE & pData, // [IN,OUT] Buffer
PCCOR_SIGNATURE pDataEnd, // End of buffer
void ** ppvPointerOut) // [OUT] Pointer value read from the buffer
{
_ASSERTE(pData <= pDataEnd);
// We could just skip this check as pointers should be only in trusted (and therefore correct)
// signatures and we check for that on the caller side, but it won't hurt to have this check and it will
// make it easier to catch invalid signatures in trusted code (e.g. IL stubs, NGEN images, etc.)
if (pData + sizeof(void *) > pDataEnd)
{ // Not enough data in the buffer
_ASSERTE(!"This signature is invalid. Note that caller should check that it is not comming from untrusted source!");
return META_E_BAD_SIGNATURE;
}
*ppvPointerOut = *(void * UNALIGNED *)pData;
pData += sizeof(void *);
return S_OK;
} // CorSigUncompressPointer_EndPtr
//---------------------------------------------------------------------------------------
//
// Reads compressed TypeDef/TypeRef/TypeSpec token, fills the result to *pnDataOut. Advances buffer pointer.
// Doesn't read behind the end of the buffer (the end starts at pDataEnd).
//
inline
__checkReturn
HRESULT
CorSigUncompressToken_EndPtr(
PCCOR_SIGNATURE & pData, // [IN,OUT] Buffer
PCCOR_SIGNATURE pDataEnd, // End of buffer
mdToken * ptkTokenOut) // [OUT] Token read from the buffer
{
_ASSERTE(pData <= pDataEnd);
HRESULT hr = S_OK;
INT_PTR cbDataSize = pDataEnd - pData;
if (cbDataSize > 4)
{ // Compressed token cannot be bigger than 4 bytes
cbDataSize = 4;
}
DWORD dwDataSize = (DWORD)cbDataSize;
uint32_t cbTokenOutLength;
IfFailRet(CorSigUncompressToken(
pData,
dwDataSize,
ptkTokenOut,
&cbTokenOutLength));
pData += cbTokenOutLength;
return hr;
} // CorSigUncompressToken_EndPtr
#endif // __CORHLPRPRIV_H__
| -1 |
dotnet/runtime | 65,926 | FastMod for EEHashTable (Faster virtual generics) | Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| EgorBo | "2022-02-27T11:34:01Z" | "2022-03-01T19:43:37Z" | a5a7836f8193414fe64232d5c386b6402eb70e19 | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | FastMod for EEHashTable (Faster virtual generics). Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| ./src/coreclr/tools/superpmi/superpmi-shim-simple/icorjitcompiler.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#ifndef _ICorJitCompiler
#define _ICorJitCompiler
#include "runtimedetails.h"
class interceptor_ICJC : public ICorJitCompiler
{
#include "icorjitcompilerimpl.h"
public:
// Added to help us track the original icjc and be able to easily indirect to it.
ICorJitCompiler* original_ICorJitCompiler;
};
#endif
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#ifndef _ICorJitCompiler
#define _ICorJitCompiler
#include "runtimedetails.h"
class interceptor_ICJC : public ICorJitCompiler
{
#include "icorjitcompilerimpl.h"
public:
// Added to help us track the original icjc and be able to easily indirect to it.
ICorJitCompiler* original_ICorJitCompiler;
};
#endif
| -1 |
dotnet/runtime | 65,926 | FastMod for EEHashTable (Faster virtual generics) | Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| EgorBo | "2022-02-27T11:34:01Z" | "2022-03-01T19:43:37Z" | a5a7836f8193414fe64232d5c386b6402eb70e19 | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | FastMod for EEHashTable (Faster virtual generics). Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| ./src/mono/mono/utils/memcheck.h |
/*
----------------------------------------------------------------
Notice that the following BSD-style license applies to this one
file (memcheck.h) only. The rest of Valgrind is licensed under the
terms of the GNU General Public License, version 2, unless
otherwise indicated. See the COPYING file in the source
distribution for details.
----------------------------------------------------------------
This file is part of MemCheck, a heavyweight Valgrind tool for
detecting memory errors.
Copyright (C) 2000-2013 Julian Seward. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. The origin of this software must not be misrepresented; you must
not claim that you wrote the original software. If you use this
software in a product, an acknowledgment in the product
documentation would be appreciated but is not required.
3. Altered source versions must be plainly marked as such, and must
not be misrepresented as being the original software.
4. The name of the author may not be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
----------------------------------------------------------------
Notice that the above BSD-style license applies to this one file
(memcheck.h) only. The entire rest of Valgrind is licensed under
the terms of the GNU General Public License, version 2. See the
COPYING file in the source distribution for details.
----------------------------------------------------------------
*/
#ifndef __MEMCHECK_H
#define __MEMCHECK_H
/* This file is for inclusion into client (your!) code.
You can use these macros to manipulate and query memory permissions
inside your own programs.
See comment near the top of valgrind.h on how to use them.
*/
#include "valgrind.h"
/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !!
This enum comprises an ABI exported by Valgrind to programs
which use client requests. DO NOT CHANGE THE ORDER OF THESE
ENTRIES, NOR DELETE ANY -- add new ones at the end. */
typedef
enum {
VG_USERREQ__MAKE_MEM_NOACCESS = VG_USERREQ_TOOL_BASE('M','C'),
VG_USERREQ__MAKE_MEM_UNDEFINED,
VG_USERREQ__MAKE_MEM_DEFINED,
VG_USERREQ__DISCARD,
VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE,
VG_USERREQ__CHECK_MEM_IS_DEFINED,
VG_USERREQ__DO_LEAK_CHECK,
VG_USERREQ__COUNT_LEAKS,
VG_USERREQ__GET_VBITS,
VG_USERREQ__SET_VBITS,
VG_USERREQ__CREATE_BLOCK,
VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE,
/* Not next to VG_USERREQ__COUNT_LEAKS because it was added later. */
VG_USERREQ__COUNT_LEAK_BLOCKS,
VG_USERREQ__ENABLE_ADDR_ERROR_REPORTING_IN_RANGE,
VG_USERREQ__DISABLE_ADDR_ERROR_REPORTING_IN_RANGE,
/* This is just for memcheck's internal use - don't use it */
_VG_USERREQ__MEMCHECK_RECORD_OVERLAP_ERROR
= VG_USERREQ_TOOL_BASE('M','C') + 256
} Vg_MemCheckClientRequest;
/* Client-code macros to manipulate the state of memory. */
/* Mark memory at _qzz_addr as unaddressable for _qzz_len bytes. */
#define VALGRIND_MAKE_MEM_NOACCESS(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__MAKE_MEM_NOACCESS, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
/* Similarly, mark memory at _qzz_addr as addressable but undefined
for _qzz_len bytes. */
#define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__MAKE_MEM_UNDEFINED, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
/* Similarly, mark memory at _qzz_addr as addressable and defined
for _qzz_len bytes. */
#define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__MAKE_MEM_DEFINED, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
/* Similar to VALGRIND_MAKE_MEM_DEFINED except that addressability is
not altered: bytes which are addressable are marked as defined,
but those which are not addressable are left unchanged. */
#define VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
/* Create a block-description handle. The description is an ascii
string which is included in any messages pertaining to addresses
within the specified memory range. Has no other effect on the
properties of the memory range. */
#define VALGRIND_CREATE_BLOCK(_qzz_addr,_qzz_len, _qzz_desc) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__CREATE_BLOCK, \
(_qzz_addr), (_qzz_len), (_qzz_desc), \
0, 0)
/* Discard a block-description-handle. Returns 1 for an
invalid handle, 0 for a valid handle. */
#define VALGRIND_DISCARD(_qzz_blkindex) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__DISCARD, \
0, (_qzz_blkindex), 0, 0, 0)
/* Client-code macros to check the state of memory. */
/* Check that memory at _qzz_addr is addressable for _qzz_len bytes.
If suitable addressibility is not established, Valgrind prints an
error message and returns the address of the first offending byte.
Otherwise it returns zero. */
#define VALGRIND_CHECK_MEM_IS_ADDRESSABLE(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
/* Check that memory at _qzz_addr is addressable and defined for
_qzz_len bytes. If suitable addressibility and definedness are not
established, Valgrind prints an error message and returns the
address of the first offending byte. Otherwise it returns zero. */
#define VALGRIND_CHECK_MEM_IS_DEFINED(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__CHECK_MEM_IS_DEFINED, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
/* Use this macro to force the definedness and addressibility of an
lvalue to be checked. If suitable addressibility and definedness
are not established, Valgrind prints an error message and returns
the address of the first offending byte. Otherwise it returns
zero. */
#define VALGRIND_CHECK_VALUE_IS_DEFINED(__lvalue) \
VALGRIND_CHECK_MEM_IS_DEFINED( \
(volatile unsigned char *)&(__lvalue), \
(unsigned long)(sizeof (__lvalue)))
/* Do a full memory leak check (like --leak-check=full) mid-execution. */
#define VALGRIND_DO_LEAK_CHECK \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DO_LEAK_CHECK, \
0, 0, 0, 0, 0)
/* Same as VALGRIND_DO_LEAK_CHECK but only showing the entries for
which there was an increase in leaked bytes or leaked nr of blocks
since the previous leak search. */
#define VALGRIND_DO_ADDED_LEAK_CHECK \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DO_LEAK_CHECK, \
0, 1, 0, 0, 0)
/* Same as VALGRIND_DO_ADDED_LEAK_CHECK but showing entries with
increased or decreased leaked bytes/blocks since previous leak
search. */
#define VALGRIND_DO_CHANGED_LEAK_CHECK \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DO_LEAK_CHECK, \
0, 2, 0, 0, 0)
/* Do a summary memory leak check (like --leak-check=summary) mid-execution. */
#define VALGRIND_DO_QUICK_LEAK_CHECK \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DO_LEAK_CHECK, \
1, 0, 0, 0, 0)
/* Return number of leaked, dubious, reachable and suppressed bytes found by
all previous leak checks. They must be lvalues. */
#define VALGRIND_COUNT_LEAKS(leaked, dubious, reachable, suppressed) \
/* For safety on 64-bit platforms we assign the results to private
unsigned long variables, then assign these to the lvalues the user
specified, which works no matter what type 'leaked', 'dubious', etc
are. We also initialise '_qzz_leaked', etc because
VG_USERREQ__COUNT_LEAKS doesn't mark the values returned as
defined. */ \
{ \
unsigned long _qzz_leaked = 0, _qzz_dubious = 0; \
unsigned long _qzz_reachable = 0, _qzz_suppressed = 0; \
VALGRIND_DO_CLIENT_REQUEST_STMT( \
VG_USERREQ__COUNT_LEAKS, \
&_qzz_leaked, &_qzz_dubious, \
&_qzz_reachable, &_qzz_suppressed, 0); \
leaked = _qzz_leaked; \
dubious = _qzz_dubious; \
reachable = _qzz_reachable; \
suppressed = _qzz_suppressed; \
}
/* Return number of leaked, dubious, reachable and suppressed bytes found by
all previous leak checks. They must be lvalues. */
#define VALGRIND_COUNT_LEAK_BLOCKS(leaked, dubious, reachable, suppressed) \
/* For safety on 64-bit platforms we assign the results to private
unsigned long variables, then assign these to the lvalues the user
specified, which works no matter what type 'leaked', 'dubious', etc
are. We also initialise '_qzz_leaked', etc because
VG_USERREQ__COUNT_LEAKS doesn't mark the values returned as
defined. */ \
{ \
unsigned long _qzz_leaked = 0, _qzz_dubious = 0; \
unsigned long _qzz_reachable = 0, _qzz_suppressed = 0; \
VALGRIND_DO_CLIENT_REQUEST_STMT( \
VG_USERREQ__COUNT_LEAK_BLOCKS, \
&_qzz_leaked, &_qzz_dubious, \
&_qzz_reachable, &_qzz_suppressed, 0); \
leaked = _qzz_leaked; \
dubious = _qzz_dubious; \
reachable = _qzz_reachable; \
suppressed = _qzz_suppressed; \
}
/* Get the validity data for addresses [zza..zza+zznbytes-1] and copy it
into the provided zzvbits array. Return values:
0 if not running on valgrind
1 success
2 [previously indicated unaligned arrays; these are now allowed]
3 if any parts of zzsrc/zzvbits are not addressable.
The metadata is not copied in cases 0, 2 or 3 so it should be
impossible to segfault your system by using this call.
*/
#define VALGRIND_GET_VBITS(zza,zzvbits,zznbytes) \
(unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__GET_VBITS, \
(const char*)(zza), \
(char*)(zzvbits), \
(zznbytes), 0, 0)
/* Set the validity data for addresses [zza..zza+zznbytes-1], copying it
from the provided zzvbits array. Return values:
0 if not running on valgrind
1 success
2 [previously indicated unaligned arrays; these are now allowed]
3 if any parts of zza/zzvbits are not addressable.
The metadata is not copied in cases 0, 2 or 3 so it should be
impossible to segfault your system by using this call.
*/
#define VALGRIND_SET_VBITS(zza,zzvbits,zznbytes) \
(unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__SET_VBITS, \
(const char*)(zza), \
(const char*)(zzvbits), \
(zznbytes), 0, 0 )
/* Disable and re-enable reporting of addressing errors in the
specified address range. */
#define VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__DISABLE_ADDR_ERROR_REPORTING_IN_RANGE, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
#define VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__ENABLE_ADDR_ERROR_REPORTING_IN_RANGE, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
#endif
|
/*
----------------------------------------------------------------
Notice that the following BSD-style license applies to this one
file (memcheck.h) only. The rest of Valgrind is licensed under the
terms of the GNU General Public License, version 2, unless
otherwise indicated. See the COPYING file in the source
distribution for details.
----------------------------------------------------------------
This file is part of MemCheck, a heavyweight Valgrind tool for
detecting memory errors.
Copyright (C) 2000-2013 Julian Seward. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. The origin of this software must not be misrepresented; you must
not claim that you wrote the original software. If you use this
software in a product, an acknowledgment in the product
documentation would be appreciated but is not required.
3. Altered source versions must be plainly marked as such, and must
not be misrepresented as being the original software.
4. The name of the author may not be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
----------------------------------------------------------------
Notice that the above BSD-style license applies to this one file
(memcheck.h) only. The entire rest of Valgrind is licensed under
the terms of the GNU General Public License, version 2. See the
COPYING file in the source distribution for details.
----------------------------------------------------------------
*/
#ifndef __MEMCHECK_H
#define __MEMCHECK_H
/* This file is for inclusion into client (your!) code.
You can use these macros to manipulate and query memory permissions
inside your own programs.
See comment near the top of valgrind.h on how to use them.
*/
#include "valgrind.h"
/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !!
This enum comprises an ABI exported by Valgrind to programs
which use client requests. DO NOT CHANGE THE ORDER OF THESE
ENTRIES, NOR DELETE ANY -- add new ones at the end. */
typedef
enum {
VG_USERREQ__MAKE_MEM_NOACCESS = VG_USERREQ_TOOL_BASE('M','C'),
VG_USERREQ__MAKE_MEM_UNDEFINED,
VG_USERREQ__MAKE_MEM_DEFINED,
VG_USERREQ__DISCARD,
VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE,
VG_USERREQ__CHECK_MEM_IS_DEFINED,
VG_USERREQ__DO_LEAK_CHECK,
VG_USERREQ__COUNT_LEAKS,
VG_USERREQ__GET_VBITS,
VG_USERREQ__SET_VBITS,
VG_USERREQ__CREATE_BLOCK,
VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE,
/* Not next to VG_USERREQ__COUNT_LEAKS because it was added later. */
VG_USERREQ__COUNT_LEAK_BLOCKS,
VG_USERREQ__ENABLE_ADDR_ERROR_REPORTING_IN_RANGE,
VG_USERREQ__DISABLE_ADDR_ERROR_REPORTING_IN_RANGE,
/* This is just for memcheck's internal use - don't use it */
_VG_USERREQ__MEMCHECK_RECORD_OVERLAP_ERROR
= VG_USERREQ_TOOL_BASE('M','C') + 256
} Vg_MemCheckClientRequest;
/* Client-code macros to manipulate the state of memory. */
/* Mark memory at _qzz_addr as unaddressable for _qzz_len bytes. */
#define VALGRIND_MAKE_MEM_NOACCESS(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__MAKE_MEM_NOACCESS, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
/* Similarly, mark memory at _qzz_addr as addressable but undefined
for _qzz_len bytes. */
#define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__MAKE_MEM_UNDEFINED, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
/* Similarly, mark memory at _qzz_addr as addressable and defined
for _qzz_len bytes. */
#define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__MAKE_MEM_DEFINED, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
/* Similar to VALGRIND_MAKE_MEM_DEFINED except that addressability is
not altered: bytes which are addressable are marked as defined,
but those which are not addressable are left unchanged. */
#define VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
/* Create a block-description handle. The description is an ascii
string which is included in any messages pertaining to addresses
within the specified memory range. Has no other effect on the
properties of the memory range. */
#define VALGRIND_CREATE_BLOCK(_qzz_addr,_qzz_len, _qzz_desc) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__CREATE_BLOCK, \
(_qzz_addr), (_qzz_len), (_qzz_desc), \
0, 0)
/* Discard a block-description-handle. Returns 1 for an
invalid handle, 0 for a valid handle. */
#define VALGRIND_DISCARD(_qzz_blkindex) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__DISCARD, \
0, (_qzz_blkindex), 0, 0, 0)
/* Client-code macros to check the state of memory. */
/* Check that memory at _qzz_addr is addressable for _qzz_len bytes.
If suitable addressibility is not established, Valgrind prints an
error message and returns the address of the first offending byte.
Otherwise it returns zero. */
#define VALGRIND_CHECK_MEM_IS_ADDRESSABLE(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
/* Check that memory at _qzz_addr is addressable and defined for
_qzz_len bytes. If suitable addressibility and definedness are not
established, Valgrind prints an error message and returns the
address of the first offending byte. Otherwise it returns zero. */
#define VALGRIND_CHECK_MEM_IS_DEFINED(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__CHECK_MEM_IS_DEFINED, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
/* Use this macro to force the definedness and addressibility of an
lvalue to be checked. If suitable addressibility and definedness
are not established, Valgrind prints an error message and returns
the address of the first offending byte. Otherwise it returns
zero. */
#define VALGRIND_CHECK_VALUE_IS_DEFINED(__lvalue) \
VALGRIND_CHECK_MEM_IS_DEFINED( \
(volatile unsigned char *)&(__lvalue), \
(unsigned long)(sizeof (__lvalue)))
/* Do a full memory leak check (like --leak-check=full) mid-execution. */
#define VALGRIND_DO_LEAK_CHECK \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DO_LEAK_CHECK, \
0, 0, 0, 0, 0)
/* Same as VALGRIND_DO_LEAK_CHECK but only showing the entries for
which there was an increase in leaked bytes or leaked nr of blocks
since the previous leak search. */
#define VALGRIND_DO_ADDED_LEAK_CHECK \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DO_LEAK_CHECK, \
0, 1, 0, 0, 0)
/* Same as VALGRIND_DO_ADDED_LEAK_CHECK but showing entries with
increased or decreased leaked bytes/blocks since previous leak
search. */
#define VALGRIND_DO_CHANGED_LEAK_CHECK \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DO_LEAK_CHECK, \
0, 2, 0, 0, 0)
/* Do a summary memory leak check (like --leak-check=summary) mid-execution. */
#define VALGRIND_DO_QUICK_LEAK_CHECK \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DO_LEAK_CHECK, \
1, 0, 0, 0, 0)
/* Return number of leaked, dubious, reachable and suppressed bytes found by
all previous leak checks. They must be lvalues. */
#define VALGRIND_COUNT_LEAKS(leaked, dubious, reachable, suppressed) \
/* For safety on 64-bit platforms we assign the results to private
unsigned long variables, then assign these to the lvalues the user
specified, which works no matter what type 'leaked', 'dubious', etc
are. We also initialise '_qzz_leaked', etc because
VG_USERREQ__COUNT_LEAKS doesn't mark the values returned as
defined. */ \
{ \
unsigned long _qzz_leaked = 0, _qzz_dubious = 0; \
unsigned long _qzz_reachable = 0, _qzz_suppressed = 0; \
VALGRIND_DO_CLIENT_REQUEST_STMT( \
VG_USERREQ__COUNT_LEAKS, \
&_qzz_leaked, &_qzz_dubious, \
&_qzz_reachable, &_qzz_suppressed, 0); \
leaked = _qzz_leaked; \
dubious = _qzz_dubious; \
reachable = _qzz_reachable; \
suppressed = _qzz_suppressed; \
}
/* Return number of leaked, dubious, reachable and suppressed bytes found by
all previous leak checks. They must be lvalues. */
#define VALGRIND_COUNT_LEAK_BLOCKS(leaked, dubious, reachable, suppressed) \
/* For safety on 64-bit platforms we assign the results to private
unsigned long variables, then assign these to the lvalues the user
specified, which works no matter what type 'leaked', 'dubious', etc
are. We also initialise '_qzz_leaked', etc because
VG_USERREQ__COUNT_LEAKS doesn't mark the values returned as
defined. */ \
{ \
unsigned long _qzz_leaked = 0, _qzz_dubious = 0; \
unsigned long _qzz_reachable = 0, _qzz_suppressed = 0; \
VALGRIND_DO_CLIENT_REQUEST_STMT( \
VG_USERREQ__COUNT_LEAK_BLOCKS, \
&_qzz_leaked, &_qzz_dubious, \
&_qzz_reachable, &_qzz_suppressed, 0); \
leaked = _qzz_leaked; \
dubious = _qzz_dubious; \
reachable = _qzz_reachable; \
suppressed = _qzz_suppressed; \
}
/* Get the validity data for addresses [zza..zza+zznbytes-1] and copy it
into the provided zzvbits array. Return values:
0 if not running on valgrind
1 success
2 [previously indicated unaligned arrays; these are now allowed]
3 if any parts of zzsrc/zzvbits are not addressable.
The metadata is not copied in cases 0, 2 or 3 so it should be
impossible to segfault your system by using this call.
*/
#define VALGRIND_GET_VBITS(zza,zzvbits,zznbytes) \
(unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__GET_VBITS, \
(const char*)(zza), \
(char*)(zzvbits), \
(zznbytes), 0, 0)
/* Set the validity data for addresses [zza..zza+zznbytes-1], copying it
from the provided zzvbits array. Return values:
0 if not running on valgrind
1 success
2 [previously indicated unaligned arrays; these are now allowed]
3 if any parts of zza/zzvbits are not addressable.
The metadata is not copied in cases 0, 2 or 3 so it should be
impossible to segfault your system by using this call.
*/
#define VALGRIND_SET_VBITS(zza,zzvbits,zznbytes) \
(unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__SET_VBITS, \
(const char*)(zza), \
(const char*)(zzvbits), \
(zznbytes), 0, 0 )
/* Disable and re-enable reporting of addressing errors in the
specified address range. */
#define VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__DISABLE_ADDR_ERROR_REPORTING_IN_RANGE, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
#define VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__ENABLE_ADDR_ERROR_REPORTING_IN_RANGE, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
#endif
| -1 |
dotnet/runtime | 65,926 | FastMod for EEHashTable (Faster virtual generics) | Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| EgorBo | "2022-02-27T11:34:01Z" | "2022-03-01T19:43:37Z" | a5a7836f8193414fe64232d5c386b6402eb70e19 | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | FastMod for EEHashTable (Faster virtual generics). Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| ./src/mono/mono/metadata/mono-perfcounters-def.h | /**
* \file
* Define the system and runtime performance counters.
* Each category is defined with the macro:
* PERFCTR_CAT(catid, name, help, type, instances, first_counter_id)
* and after that follows the counters inside the category, defined by the macro:
* PERFCTR_COUNTER(counter_id, name, help, type, field)
* field is the field inside MonoPerfCounters per predefined counters.
* Note we set it to unused for unrelated counters: it is unused
* in those cases.
*/
PERFCTR_CAT(CPU, "Processor", "", MultiInstance, CPU, CPU_USER_TIME)
PERFCTR_COUNTER(CPU_USER_TIME, "% User Time", "", Timer100Ns, unused)
PERFCTR_COUNTER(CPU_PRIV_TIME, "% Privileged Time", "", Timer100Ns, unused)
PERFCTR_COUNTER(CPU_INTR_TIME, "% Interrupt Time", "", Timer100Ns, unused)
PERFCTR_COUNTER(CPU_DCP_TIME, "% DCP Time", "", Timer100Ns, unused)
PERFCTR_COUNTER(CPU_PROC_TIME, "% Processor Time", "", Timer100NsInverse, unused)
PERFCTR_CAT(PROC, "Process", "", MultiInstance, Process, PROC_USER_TIME)
PERFCTR_COUNTER(PROC_USER_TIME, "% User Time", "", Timer100Ns, unused)
PERFCTR_COUNTER(PROC_PRIV_TIME, "% Privileged Time", "", Timer100Ns, unused)
PERFCTR_COUNTER(PROC_PROC_TIME, "% Processor Time", "", Timer100Ns, unused)
PERFCTR_COUNTER(PROC_THREADS, "Thread Count", "", NumberOfItems64, unused)
PERFCTR_COUNTER(PROC_VBYTES, "Virtual Bytes", "", NumberOfItems64, unused)
PERFCTR_COUNTER(PROC_WSET, "Working Set", "", NumberOfItems64, unused)
PERFCTR_COUNTER(PROC_PBYTES, "Private Bytes", "", NumberOfItems64, unused)
/* sample runtime counter */
PERFCTR_CAT(MONO_MEM, "Mono Memory", "", SingleInstance, Mono, MEM_NUM_OBJECTS)
PERFCTR_COUNTER(MEM_NUM_OBJECTS, "Allocated Objects", "", NumberOfItems64, unused)
PERFCTR_COUNTER(MEM_PHYS_TOTAL, "Total Physical Memory", "Physical memory installed in the machine, in bytes", NumberOfItems64, unused)
PERFCTR_COUNTER(MEM_PHYS_AVAILABLE, "Available Physical Memory", "Physical memory available in the machine, in bytes", NumberOfItems64, unused)
PERFCTR_CAT(ASPNET, "ASP.NET", "", MultiInstance, Mono, ASPNET_REQ_Q)
PERFCTR_COUNTER(ASPNET_REQ_Q, "Requests Queued", "", NumberOfItems64, aspnet_requests_queued)
PERFCTR_COUNTER(ASPNET_REQ_TOTAL, "Requests Total", "", NumberOfItems32, aspnet_requests)
PERFCTR_COUNTER(ASPNET_REQ_PSEC, "Requests/Sec", "", RateOfCountsPerSecond32, aspnet_requests)
PERFCTR_CAT(JIT, ".NET CLR JIT", "", MultiInstance, Mono, JIT_BYTES)
PERFCTR_COUNTER(JIT_BYTES, "# of IL Bytes JITted", "", NumberOfItems32, jit_bytes)
PERFCTR_COUNTER(JIT_METHODS, "# of IL Methods JITted", "", NumberOfItems32, jit_methods)
PERFCTR_COUNTER(JIT_TIME, "% Time in JIT", "", RawFraction, jit_time)
PERFCTR_COUNTER(JIT_BYTES_PSEC, "IL Bytes Jitted/Sec", "", RateOfCountsPerSecond32, jit_bytes)
PERFCTR_COUNTER(JIT_FAILURES, "Standard Jit Failures", "", NumberOfItems32, jit_failures)
PERFCTR_CAT(EXC, ".NET CLR Exceptions", "", MultiInstance, Mono, EXC_THROWN)
PERFCTR_COUNTER(EXC_THROWN, "# of Exceps Thrown", "", NumberOfItems32, exceptions_thrown)
PERFCTR_COUNTER(EXC_THROWN_PSEC, "# of Exceps Thrown/Sec", "", RateOfCountsPerSecond32, exceptions_thrown)
PERFCTR_COUNTER(EXC_FILTERS_PSEC, "# of Filters/Sec", "", RateOfCountsPerSecond32, exceptions_filters)
PERFCTR_COUNTER(EXC_FINALLYS_PSEC, "# of Finallys/Sec", "", RateOfCountsPerSecond32, exceptions_finallys)
PERFCTR_COUNTER(EXC_CATCH_DEPTH, "Throw to Catch Depth/Sec", "", NumberOfItems32, exceptions_depth)
PERFCTR_CAT(GC, ".NET CLR Memory", "", MultiInstance, Mono, GC_GEN0)
PERFCTR_COUNTER(GC_GEN0, "# Gen 0 Collections", "", NumberOfItems32, gc_collections0)
PERFCTR_COUNTER(GC_GEN1, "# Gen 1 Collections", "", NumberOfItems32, gc_collections1)
PERFCTR_COUNTER(GC_GEN2, "# Gen 2 Collections", "", NumberOfItems32, gc_collections2)
PERFCTR_COUNTER(GC_PROM0, "Promoted Memory from Gen 0", "", NumberOfItems32, gc_promotions0)
PERFCTR_COUNTER(GC_PROM1, "Promoted Memory from Gen 1", "", NumberOfItems32, gc_promotions1)
PERFCTR_COUNTER(GC_PROM0SEC, "Gen 0 Promoted Bytes/Sec", "", RateOfCountsPerSecond32, gc_promotions0)
PERFCTR_COUNTER(GC_PROM1SEC, "Gen 1 Promoted Bytes/Sec", "", RateOfCountsPerSecond32, gc_promotions1)
PERFCTR_COUNTER(GC_PROMFIN, "Promoted Finalization-Memory from Gen 0", "", NumberOfItems32, gc_promotion_finalizers)
PERFCTR_COUNTER(GC_GEN0SIZE, "Gen 0 heap size", "", NumberOfItems64, gc_gen0size)
PERFCTR_COUNTER(GC_GEN1SIZE, "Gen 1 heap size", "", NumberOfItems64, gc_gen1size)
PERFCTR_COUNTER(GC_GEN2SIZE, "Gen 2 heap size", "", NumberOfItems64, gc_gen2size)
PERFCTR_COUNTER(GC_LOSIZE, "Large Object Heap size", "", NumberOfItems32, gc_lossize)
PERFCTR_COUNTER(GC_FINSURV, "Finalization Survivors", "", NumberOfItems32, gc_fin_survivors)
PERFCTR_COUNTER(GC_NHANDLES, "# GC Handles", "", NumberOfItems32, gc_num_handles)
PERFCTR_COUNTER(GC_BYTESSEC, "Allocated Bytes/sec", "", RateOfCountsPerSecond32, gc_allocated)
PERFCTR_COUNTER(GC_INDGC, "# Induced GC", "", NumberOfItems32, gc_induced)
PERFCTR_COUNTER(GC_PERCTIME, "% Time in GC", "", RawFraction, gc_time)
PERFCTR_COUNTER(GC_BYTES, "# Bytes in all Heaps", "", NumberOfItems64, gc_total_bytes)
PERFCTR_COUNTER(GC_COMMBYTES, "# Total committed Bytes", "", NumberOfItems64, gc_committed_bytes)
PERFCTR_COUNTER(GC_RESBYTES, "# Total reserved Bytes", "", NumberOfItems64, gc_reserved_bytes)
PERFCTR_COUNTER(GC_PINNED, "# of Pinned Objects", "", NumberOfItems32, gc_num_pinned)
PERFCTR_COUNTER(GC_SYNKB, "# of Sink Blocks in use", "", NumberOfItems32, gc_sync_blocks)
PERFCTR_CAT(LOADING, ".NET CLR Loading", "", MultiInstance, Mono, LOADING_CLASSES)
PERFCTR_COUNTER(LOADING_CLASSES, "Current Classes Loaded", "", NumberOfItems32, loader_classes)
PERFCTR_COUNTER(LOADING_TOTCLASSES, "Total Classes Loaded", "", NumberOfItems32, loader_total_classes)
PERFCTR_COUNTER(LOADING_CLASSESSEC, "Rate of Classes Loaded", "", RateOfCountsPerSecond32, loader_total_classes)
PERFCTR_COUNTER(LOADING_APPDOMAINS, "Current appdomains", "", NumberOfItems32, loader_appdomains)
PERFCTR_COUNTER(LOADING_TOTAPPDOMAINS, "Total Appdomains", "", NumberOfItems32, loader_total_appdomains)
PERFCTR_COUNTER(LOADING_APPDOMAINSEC, "Rate of appdomains", "", RateOfCountsPerSecond32, loader_total_appdomains)
PERFCTR_COUNTER(LOADING_ASSEMBLIES, "Current Assemblies", "", NumberOfItems32, loader_assemblies)
PERFCTR_COUNTER(LOADING_TOTASSEMBLIES, "Total Assemblies", "", NumberOfItems32, loader_total_assemblies)
PERFCTR_COUNTER(LOADING_ASSEMBLIESEC, "Rate of Assemblies", "", RateOfCountsPerSecond32, loader_total_assemblies)
PERFCTR_COUNTER(LOADING_FAILURES, "Total # of Load Failures", "", NumberOfItems32, loader_failures)
PERFCTR_COUNTER(LOADING_FAILURESSEC, "Rate of Load Failures", "", RateOfCountsPerSecond32, loader_failures)
PERFCTR_COUNTER(LOADING_BYTES, "Bytes in Loader Heap", "", NumberOfItems32, loader_bytes)
PERFCTR_COUNTER(LOADING_APPUNLOADED, "Total appdomains unloaded", "", NumberOfItems32, loader_appdomains_uloaded)
PERFCTR_COUNTER(LOADING_APPUNLOADEDSEC, "Rate of appdomains unloaded", "", RateOfCountsPerSecond32, loader_appdomains_uloaded)
PERFCTR_CAT(THREAD, ".NET CLR LocksAndThreads", "", MultiInstance, Mono, THREAD_CONTENTIONS)
PERFCTR_COUNTER(THREAD_CONTENTIONS, "Total # of Contentions", "", NumberOfItems32, thread_contentions)
PERFCTR_COUNTER(THREAD_CONTENTIONSSEC, "Contention Rate / sec", "", RateOfCountsPerSecond32, thread_contentions)
PERFCTR_COUNTER(THREAD_QUEUELEN, "Current Queue Length", "", NumberOfItems32, thread_queue_len)
PERFCTR_COUNTER(THREAD_QUEUELENP, "Queue Length Peak", "", NumberOfItems32, thread_queue_max)
PERFCTR_COUNTER(THREAD_QUEUELENSEC, "Queue Length / sec", "", RateOfCountsPerSecond32, thread_queue_max)
PERFCTR_COUNTER(THREAD_NUMLOG, "# of current logical Threads", "", NumberOfItems32, thread_num_logical)
PERFCTR_COUNTER(THREAD_NUMPHYS, "# of current physical Threads", "", NumberOfItems32, thread_num_physical)
PERFCTR_COUNTER(THREAD_NUMREC, "# of current recognized threads", "", NumberOfItems32, thread_cur_recognized)
PERFCTR_COUNTER(THREAD_TOTREC, "# of total recognized threads", "", NumberOfItems32, thread_num_recognized)
PERFCTR_COUNTER(THREAD_TOTRECSEC, "rate of recognized threads / sec", "", RateOfCountsPerSecond32, thread_num_recognized)
PERFCTR_CAT(INTEROP, ".NET CLR Interop", "", MultiInstance, Mono, INTEROP_NUMCCW)
PERFCTR_COUNTER(INTEROP_NUMCCW, "# of CCWs", "", NumberOfItems32, interop_num_ccw)
PERFCTR_COUNTER(INTEROP_STUBS, "# of Stubs", "", NumberOfItems32, interop_num_stubs)
PERFCTR_COUNTER(INTEROP_MARSH, "# of marshalling", "", NumberOfItems32, interop_num_marshals)
PERFCTR_CAT(SECURITY, ".NET CLR Security", "", MultiInstance, Mono, SECURITY_CHECKS)
PERFCTR_COUNTER(SECURITY_CHECKS, "Total Runtime Checks", "", NumberOfItems32, security_num_checks)
PERFCTR_COUNTER(SECURITY_LCHECKS, "# Link Time Checks", "", NumberOfItems32, security_num_link_checks)
PERFCTR_COUNTER(SECURITY_PERCTIME, "% Time in RT checks", "", RawFraction, security_time)
PERFCTR_COUNTER(SECURITY_SWDEPTH, "Stack Walk Depth", "", NumberOfItems32, security_depth)
PERFCTR_CAT(THREADPOOL, "Mono Threadpool", "", MultiInstance, Mono, THREADPOOL_WORKITEMS)
PERFCTR_COUNTER(THREADPOOL_WORKITEMS, "Work Items Added", "", NumberOfItems64, threadpool_workitems)
PERFCTR_COUNTER(THREADPOOL_WORKITEMS_PSEC, "Work Items Added/Sec", "", RateOfCountsPerSecond32, threadpool_workitems)
PERFCTR_COUNTER(THREADPOOL_IOWORKITEMS, "IO Work Items Added", "", NumberOfItems64, threadpool_ioworkitems)
PERFCTR_COUNTER(THREADPOOL_IOWORKITEMS_PSEC, "IO Work Items Added/Sec", "", RateOfCountsPerSecond32, threadpool_ioworkitems)
PERFCTR_COUNTER(THREADPOOL_THREADS, "# of Threads", "", NumberOfItems32, threadpool_threads)
PERFCTR_COUNTER(THREADPOOL_IOTHREADS, "# of IO Threads", "", NumberOfItems32, threadpool_iothreads)
PERFCTR_CAT(NETWORK, "Network Interface", "", MultiInstance, NetworkInterface, NETWORK_BYTESRECSEC)
PERFCTR_COUNTER(NETWORK_BYTESRECSEC, "Bytes Received/sec", "", RateOfCountsPerSecond64, unused)
PERFCTR_COUNTER(NETWORK_BYTESSENTSEC, "Bytes Sent/sec", "", RateOfCountsPerSecond64, unused)
PERFCTR_COUNTER(NETWORK_BYTESTOTALSEC, "Bytes Total/sec", "", RateOfCountsPerSecond64, unused)
| /**
* \file
* Define the system and runtime performance counters.
* Each category is defined with the macro:
* PERFCTR_CAT(catid, name, help, type, instances, first_counter_id)
* and after that follows the counters inside the category, defined by the macro:
* PERFCTR_COUNTER(counter_id, name, help, type, field)
* field is the field inside MonoPerfCounters per predefined counters.
* Note we set it to unused for unrelated counters: it is unused
* in those cases.
*/
PERFCTR_CAT(CPU, "Processor", "", MultiInstance, CPU, CPU_USER_TIME)
PERFCTR_COUNTER(CPU_USER_TIME, "% User Time", "", Timer100Ns, unused)
PERFCTR_COUNTER(CPU_PRIV_TIME, "% Privileged Time", "", Timer100Ns, unused)
PERFCTR_COUNTER(CPU_INTR_TIME, "% Interrupt Time", "", Timer100Ns, unused)
PERFCTR_COUNTER(CPU_DCP_TIME, "% DCP Time", "", Timer100Ns, unused)
PERFCTR_COUNTER(CPU_PROC_TIME, "% Processor Time", "", Timer100NsInverse, unused)
PERFCTR_CAT(PROC, "Process", "", MultiInstance, Process, PROC_USER_TIME)
PERFCTR_COUNTER(PROC_USER_TIME, "% User Time", "", Timer100Ns, unused)
PERFCTR_COUNTER(PROC_PRIV_TIME, "% Privileged Time", "", Timer100Ns, unused)
PERFCTR_COUNTER(PROC_PROC_TIME, "% Processor Time", "", Timer100Ns, unused)
PERFCTR_COUNTER(PROC_THREADS, "Thread Count", "", NumberOfItems64, unused)
PERFCTR_COUNTER(PROC_VBYTES, "Virtual Bytes", "", NumberOfItems64, unused)
PERFCTR_COUNTER(PROC_WSET, "Working Set", "", NumberOfItems64, unused)
PERFCTR_COUNTER(PROC_PBYTES, "Private Bytes", "", NumberOfItems64, unused)
/* sample runtime counter */
PERFCTR_CAT(MONO_MEM, "Mono Memory", "", SingleInstance, Mono, MEM_NUM_OBJECTS)
PERFCTR_COUNTER(MEM_NUM_OBJECTS, "Allocated Objects", "", NumberOfItems64, unused)
PERFCTR_COUNTER(MEM_PHYS_TOTAL, "Total Physical Memory", "Physical memory installed in the machine, in bytes", NumberOfItems64, unused)
PERFCTR_COUNTER(MEM_PHYS_AVAILABLE, "Available Physical Memory", "Physical memory available in the machine, in bytes", NumberOfItems64, unused)
PERFCTR_CAT(ASPNET, "ASP.NET", "", MultiInstance, Mono, ASPNET_REQ_Q)
PERFCTR_COUNTER(ASPNET_REQ_Q, "Requests Queued", "", NumberOfItems64, aspnet_requests_queued)
PERFCTR_COUNTER(ASPNET_REQ_TOTAL, "Requests Total", "", NumberOfItems32, aspnet_requests)
PERFCTR_COUNTER(ASPNET_REQ_PSEC, "Requests/Sec", "", RateOfCountsPerSecond32, aspnet_requests)
PERFCTR_CAT(JIT, ".NET CLR JIT", "", MultiInstance, Mono, JIT_BYTES)
PERFCTR_COUNTER(JIT_BYTES, "# of IL Bytes JITted", "", NumberOfItems32, jit_bytes)
PERFCTR_COUNTER(JIT_METHODS, "# of IL Methods JITted", "", NumberOfItems32, jit_methods)
PERFCTR_COUNTER(JIT_TIME, "% Time in JIT", "", RawFraction, jit_time)
PERFCTR_COUNTER(JIT_BYTES_PSEC, "IL Bytes Jitted/Sec", "", RateOfCountsPerSecond32, jit_bytes)
PERFCTR_COUNTER(JIT_FAILURES, "Standard Jit Failures", "", NumberOfItems32, jit_failures)
PERFCTR_CAT(EXC, ".NET CLR Exceptions", "", MultiInstance, Mono, EXC_THROWN)
PERFCTR_COUNTER(EXC_THROWN, "# of Exceps Thrown", "", NumberOfItems32, exceptions_thrown)
PERFCTR_COUNTER(EXC_THROWN_PSEC, "# of Exceps Thrown/Sec", "", RateOfCountsPerSecond32, exceptions_thrown)
PERFCTR_COUNTER(EXC_FILTERS_PSEC, "# of Filters/Sec", "", RateOfCountsPerSecond32, exceptions_filters)
PERFCTR_COUNTER(EXC_FINALLYS_PSEC, "# of Finallys/Sec", "", RateOfCountsPerSecond32, exceptions_finallys)
PERFCTR_COUNTER(EXC_CATCH_DEPTH, "Throw to Catch Depth/Sec", "", NumberOfItems32, exceptions_depth)
PERFCTR_CAT(GC, ".NET CLR Memory", "", MultiInstance, Mono, GC_GEN0)
PERFCTR_COUNTER(GC_GEN0, "# Gen 0 Collections", "", NumberOfItems32, gc_collections0)
PERFCTR_COUNTER(GC_GEN1, "# Gen 1 Collections", "", NumberOfItems32, gc_collections1)
PERFCTR_COUNTER(GC_GEN2, "# Gen 2 Collections", "", NumberOfItems32, gc_collections2)
PERFCTR_COUNTER(GC_PROM0, "Promoted Memory from Gen 0", "", NumberOfItems32, gc_promotions0)
PERFCTR_COUNTER(GC_PROM1, "Promoted Memory from Gen 1", "", NumberOfItems32, gc_promotions1)
PERFCTR_COUNTER(GC_PROM0SEC, "Gen 0 Promoted Bytes/Sec", "", RateOfCountsPerSecond32, gc_promotions0)
PERFCTR_COUNTER(GC_PROM1SEC, "Gen 1 Promoted Bytes/Sec", "", RateOfCountsPerSecond32, gc_promotions1)
PERFCTR_COUNTER(GC_PROMFIN, "Promoted Finalization-Memory from Gen 0", "", NumberOfItems32, gc_promotion_finalizers)
PERFCTR_COUNTER(GC_GEN0SIZE, "Gen 0 heap size", "", NumberOfItems64, gc_gen0size)
PERFCTR_COUNTER(GC_GEN1SIZE, "Gen 1 heap size", "", NumberOfItems64, gc_gen1size)
PERFCTR_COUNTER(GC_GEN2SIZE, "Gen 2 heap size", "", NumberOfItems64, gc_gen2size)
PERFCTR_COUNTER(GC_LOSIZE, "Large Object Heap size", "", NumberOfItems32, gc_lossize)
PERFCTR_COUNTER(GC_FINSURV, "Finalization Survivors", "", NumberOfItems32, gc_fin_survivors)
PERFCTR_COUNTER(GC_NHANDLES, "# GC Handles", "", NumberOfItems32, gc_num_handles)
PERFCTR_COUNTER(GC_BYTESSEC, "Allocated Bytes/sec", "", RateOfCountsPerSecond32, gc_allocated)
PERFCTR_COUNTER(GC_INDGC, "# Induced GC", "", NumberOfItems32, gc_induced)
PERFCTR_COUNTER(GC_PERCTIME, "% Time in GC", "", RawFraction, gc_time)
PERFCTR_COUNTER(GC_BYTES, "# Bytes in all Heaps", "", NumberOfItems64, gc_total_bytes)
PERFCTR_COUNTER(GC_COMMBYTES, "# Total committed Bytes", "", NumberOfItems64, gc_committed_bytes)
PERFCTR_COUNTER(GC_RESBYTES, "# Total reserved Bytes", "", NumberOfItems64, gc_reserved_bytes)
PERFCTR_COUNTER(GC_PINNED, "# of Pinned Objects", "", NumberOfItems32, gc_num_pinned)
PERFCTR_COUNTER(GC_SYNKB, "# of Sink Blocks in use", "", NumberOfItems32, gc_sync_blocks)
PERFCTR_CAT(LOADING, ".NET CLR Loading", "", MultiInstance, Mono, LOADING_CLASSES)
PERFCTR_COUNTER(LOADING_CLASSES, "Current Classes Loaded", "", NumberOfItems32, loader_classes)
PERFCTR_COUNTER(LOADING_TOTCLASSES, "Total Classes Loaded", "", NumberOfItems32, loader_total_classes)
PERFCTR_COUNTER(LOADING_CLASSESSEC, "Rate of Classes Loaded", "", RateOfCountsPerSecond32, loader_total_classes)
PERFCTR_COUNTER(LOADING_APPDOMAINS, "Current appdomains", "", NumberOfItems32, loader_appdomains)
PERFCTR_COUNTER(LOADING_TOTAPPDOMAINS, "Total Appdomains", "", NumberOfItems32, loader_total_appdomains)
PERFCTR_COUNTER(LOADING_APPDOMAINSEC, "Rate of appdomains", "", RateOfCountsPerSecond32, loader_total_appdomains)
PERFCTR_COUNTER(LOADING_ASSEMBLIES, "Current Assemblies", "", NumberOfItems32, loader_assemblies)
PERFCTR_COUNTER(LOADING_TOTASSEMBLIES, "Total Assemblies", "", NumberOfItems32, loader_total_assemblies)
PERFCTR_COUNTER(LOADING_ASSEMBLIESEC, "Rate of Assemblies", "", RateOfCountsPerSecond32, loader_total_assemblies)
PERFCTR_COUNTER(LOADING_FAILURES, "Total # of Load Failures", "", NumberOfItems32, loader_failures)
PERFCTR_COUNTER(LOADING_FAILURESSEC, "Rate of Load Failures", "", RateOfCountsPerSecond32, loader_failures)
PERFCTR_COUNTER(LOADING_BYTES, "Bytes in Loader Heap", "", NumberOfItems32, loader_bytes)
PERFCTR_COUNTER(LOADING_APPUNLOADED, "Total appdomains unloaded", "", NumberOfItems32, loader_appdomains_uloaded)
PERFCTR_COUNTER(LOADING_APPUNLOADEDSEC, "Rate of appdomains unloaded", "", RateOfCountsPerSecond32, loader_appdomains_uloaded)
PERFCTR_CAT(THREAD, ".NET CLR LocksAndThreads", "", MultiInstance, Mono, THREAD_CONTENTIONS)
PERFCTR_COUNTER(THREAD_CONTENTIONS, "Total # of Contentions", "", NumberOfItems32, thread_contentions)
PERFCTR_COUNTER(THREAD_CONTENTIONSSEC, "Contention Rate / sec", "", RateOfCountsPerSecond32, thread_contentions)
PERFCTR_COUNTER(THREAD_QUEUELEN, "Current Queue Length", "", NumberOfItems32, thread_queue_len)
PERFCTR_COUNTER(THREAD_QUEUELENP, "Queue Length Peak", "", NumberOfItems32, thread_queue_max)
PERFCTR_COUNTER(THREAD_QUEUELENSEC, "Queue Length / sec", "", RateOfCountsPerSecond32, thread_queue_max)
PERFCTR_COUNTER(THREAD_NUMLOG, "# of current logical Threads", "", NumberOfItems32, thread_num_logical)
PERFCTR_COUNTER(THREAD_NUMPHYS, "# of current physical Threads", "", NumberOfItems32, thread_num_physical)
PERFCTR_COUNTER(THREAD_NUMREC, "# of current recognized threads", "", NumberOfItems32, thread_cur_recognized)
PERFCTR_COUNTER(THREAD_TOTREC, "# of total recognized threads", "", NumberOfItems32, thread_num_recognized)
PERFCTR_COUNTER(THREAD_TOTRECSEC, "rate of recognized threads / sec", "", RateOfCountsPerSecond32, thread_num_recognized)
PERFCTR_CAT(INTEROP, ".NET CLR Interop", "", MultiInstance, Mono, INTEROP_NUMCCW)
PERFCTR_COUNTER(INTEROP_NUMCCW, "# of CCWs", "", NumberOfItems32, interop_num_ccw)
PERFCTR_COUNTER(INTEROP_STUBS, "# of Stubs", "", NumberOfItems32, interop_num_stubs)
PERFCTR_COUNTER(INTEROP_MARSH, "# of marshalling", "", NumberOfItems32, interop_num_marshals)
PERFCTR_CAT(SECURITY, ".NET CLR Security", "", MultiInstance, Mono, SECURITY_CHECKS)
PERFCTR_COUNTER(SECURITY_CHECKS, "Total Runtime Checks", "", NumberOfItems32, security_num_checks)
PERFCTR_COUNTER(SECURITY_LCHECKS, "# Link Time Checks", "", NumberOfItems32, security_num_link_checks)
PERFCTR_COUNTER(SECURITY_PERCTIME, "% Time in RT checks", "", RawFraction, security_time)
PERFCTR_COUNTER(SECURITY_SWDEPTH, "Stack Walk Depth", "", NumberOfItems32, security_depth)
PERFCTR_CAT(THREADPOOL, "Mono Threadpool", "", MultiInstance, Mono, THREADPOOL_WORKITEMS)
PERFCTR_COUNTER(THREADPOOL_WORKITEMS, "Work Items Added", "", NumberOfItems64, threadpool_workitems)
PERFCTR_COUNTER(THREADPOOL_WORKITEMS_PSEC, "Work Items Added/Sec", "", RateOfCountsPerSecond32, threadpool_workitems)
PERFCTR_COUNTER(THREADPOOL_IOWORKITEMS, "IO Work Items Added", "", NumberOfItems64, threadpool_ioworkitems)
PERFCTR_COUNTER(THREADPOOL_IOWORKITEMS_PSEC, "IO Work Items Added/Sec", "", RateOfCountsPerSecond32, threadpool_ioworkitems)
PERFCTR_COUNTER(THREADPOOL_THREADS, "# of Threads", "", NumberOfItems32, threadpool_threads)
PERFCTR_COUNTER(THREADPOOL_IOTHREADS, "# of IO Threads", "", NumberOfItems32, threadpool_iothreads)
PERFCTR_CAT(NETWORK, "Network Interface", "", MultiInstance, NetworkInterface, NETWORK_BYTESRECSEC)
PERFCTR_COUNTER(NETWORK_BYTESRECSEC, "Bytes Received/sec", "", RateOfCountsPerSecond64, unused)
PERFCTR_COUNTER(NETWORK_BYTESSENTSEC, "Bytes Sent/sec", "", RateOfCountsPerSecond64, unused)
PERFCTR_COUNTER(NETWORK_BYTESTOTALSEC, "Bytes Total/sec", "", RateOfCountsPerSecond64, unused)
| -1 |
dotnet/runtime | 65,926 | FastMod for EEHashTable (Faster virtual generics) | Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| EgorBo | "2022-02-27T11:34:01Z" | "2022-03-01T19:43:37Z" | a5a7836f8193414fe64232d5c386b6402eb70e19 | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | FastMod for EEHashTable (Faster virtual generics). Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| ./src/mono/mono/metadata/mempool.h | /**
* \file
*/
#ifndef _MONO_MEMPOOL_H_
#define _MONO_MEMPOOL_H_
#include <mono/utils/mono-publib.h>
typedef struct _MonoMemPool MonoMemPool;
MONO_API MonoMemPool *
mono_mempool_new (void);
MONO_API MonoMemPool *
mono_mempool_new_size (int initial_size);
MONO_API void
mono_mempool_destroy (MonoMemPool *pool);
MONO_API void
mono_mempool_invalidate (MonoMemPool *pool);
MONO_API void
mono_mempool_stats (MonoMemPool *pool);
MONO_API void*
mono_mempool_alloc (MonoMemPool *pool, unsigned int size);
#define mono_mempool_alloc(pool, size) (g_cast (mono_mempool_alloc ((pool), (size))))
MONO_API void*
mono_mempool_alloc0 (MonoMemPool *pool, unsigned int size);
#define mono_mempool_alloc0(pool, size) (g_cast (mono_mempool_alloc0 ((pool), (size))))
MONO_API mono_bool
mono_mempool_contains_addr (MonoMemPool *pool, void* addr);
MONO_API char*
mono_mempool_strdup (MonoMemPool *pool, const char *s);
MONO_API uint32_t
mono_mempool_get_allocated (MonoMemPool *pool);
#endif
| /**
* \file
*/
#ifndef _MONO_MEMPOOL_H_
#define _MONO_MEMPOOL_H_
#include <mono/utils/mono-publib.h>
typedef struct _MonoMemPool MonoMemPool;
MONO_API MonoMemPool *
mono_mempool_new (void);
MONO_API MonoMemPool *
mono_mempool_new_size (int initial_size);
MONO_API void
mono_mempool_destroy (MonoMemPool *pool);
MONO_API void
mono_mempool_invalidate (MonoMemPool *pool);
MONO_API void
mono_mempool_stats (MonoMemPool *pool);
MONO_API void*
mono_mempool_alloc (MonoMemPool *pool, unsigned int size);
#define mono_mempool_alloc(pool, size) (g_cast (mono_mempool_alloc ((pool), (size))))
MONO_API void*
mono_mempool_alloc0 (MonoMemPool *pool, unsigned int size);
#define mono_mempool_alloc0(pool, size) (g_cast (mono_mempool_alloc0 ((pool), (size))))
MONO_API mono_bool
mono_mempool_contains_addr (MonoMemPool *pool, void* addr);
MONO_API char*
mono_mempool_strdup (MonoMemPool *pool, const char *s);
MONO_API uint32_t
mono_mempool_get_allocated (MonoMemPool *pool);
#endif
| -1 |
dotnet/runtime | 65,926 | FastMod for EEHashTable (Faster virtual generics) | Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| EgorBo | "2022-02-27T11:34:01Z" | "2022-03-01T19:43:37Z" | a5a7836f8193414fe64232d5c386b6402eb70e19 | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | FastMod for EEHashTable (Faster virtual generics). Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| ./src/native/external/brotli/enc/bit_cost_inc.h | /* NOLINT(build/header_guard) */
/* Copyright 2013 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* template parameters: FN */
#define HistogramType FN(Histogram)
double FN(BrotliPopulationCost)(const HistogramType* histogram) {
static const double kOneSymbolHistogramCost = 12;
static const double kTwoSymbolHistogramCost = 20;
static const double kThreeSymbolHistogramCost = 28;
static const double kFourSymbolHistogramCost = 37;
const size_t data_size = FN(HistogramDataSize)();
int count = 0;
size_t s[5];
double bits = 0.0;
size_t i;
if (histogram->total_count_ == 0) {
return kOneSymbolHistogramCost;
}
for (i = 0; i < data_size; ++i) {
if (histogram->data_[i] > 0) {
s[count] = i;
++count;
if (count > 4) break;
}
}
if (count == 1) {
return kOneSymbolHistogramCost;
}
if (count == 2) {
return (kTwoSymbolHistogramCost + (double)histogram->total_count_);
}
if (count == 3) {
const uint32_t histo0 = histogram->data_[s[0]];
const uint32_t histo1 = histogram->data_[s[1]];
const uint32_t histo2 = histogram->data_[s[2]];
const uint32_t histomax =
BROTLI_MAX(uint32_t, histo0, BROTLI_MAX(uint32_t, histo1, histo2));
return (kThreeSymbolHistogramCost +
2 * (histo0 + histo1 + histo2) - histomax);
}
if (count == 4) {
uint32_t histo[4];
uint32_t h23;
uint32_t histomax;
for (i = 0; i < 4; ++i) {
histo[i] = histogram->data_[s[i]];
}
/* Sort */
for (i = 0; i < 4; ++i) {
size_t j;
for (j = i + 1; j < 4; ++j) {
if (histo[j] > histo[i]) {
BROTLI_SWAP(uint32_t, histo, j, i);
}
}
}
h23 = histo[2] + histo[3];
histomax = BROTLI_MAX(uint32_t, h23, histo[0]);
return (kFourSymbolHistogramCost +
3 * h23 + 2 * (histo[0] + histo[1]) - histomax);
}
{
/* In this loop we compute the entropy of the histogram and simultaneously
build a simplified histogram of the code length codes where we use the
zero repeat code 17, but we don't use the non-zero repeat code 16. */
size_t max_depth = 1;
uint32_t depth_histo[BROTLI_CODE_LENGTH_CODES] = { 0 };
const double log2total = FastLog2(histogram->total_count_);
for (i = 0; i < data_size;) {
if (histogram->data_[i] > 0) {
/* Compute -log2(P(symbol)) = -log2(count(symbol)/total_count) =
= log2(total_count) - log2(count(symbol)) */
double log2p = log2total - FastLog2(histogram->data_[i]);
/* Approximate the bit depth by round(-log2(P(symbol))) */
size_t depth = (size_t)(log2p + 0.5);
bits += histogram->data_[i] * log2p;
if (depth > 15) {
depth = 15;
}
if (depth > max_depth) {
max_depth = depth;
}
++depth_histo[depth];
++i;
} else {
/* Compute the run length of zeros and add the appropriate number of 0
and 17 code length codes to the code length code histogram. */
uint32_t reps = 1;
size_t k;
for (k = i + 1; k < data_size && histogram->data_[k] == 0; ++k) {
++reps;
}
i += reps;
if (i == data_size) {
/* Don't add any cost for the last zero run, since these are encoded
only implicitly. */
break;
}
if (reps < 3) {
depth_histo[0] += reps;
} else {
reps -= 2;
while (reps > 0) {
++depth_histo[BROTLI_REPEAT_ZERO_CODE_LENGTH];
/* Add the 3 extra bits for the 17 code length code. */
bits += 3;
reps >>= 3;
}
}
}
}
/* Add the estimated encoding cost of the code length code histogram. */
bits += (double)(18 + 2 * max_depth);
/* Add the entropy of the code length code histogram. */
bits += BitsEntropy(depth_histo, BROTLI_CODE_LENGTH_CODES);
}
return bits;
}
#undef HistogramType
| /* NOLINT(build/header_guard) */
/* Copyright 2013 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* template parameters: FN */
#define HistogramType FN(Histogram)
double FN(BrotliPopulationCost)(const HistogramType* histogram) {
static const double kOneSymbolHistogramCost = 12;
static const double kTwoSymbolHistogramCost = 20;
static const double kThreeSymbolHistogramCost = 28;
static const double kFourSymbolHistogramCost = 37;
const size_t data_size = FN(HistogramDataSize)();
int count = 0;
size_t s[5];
double bits = 0.0;
size_t i;
if (histogram->total_count_ == 0) {
return kOneSymbolHistogramCost;
}
for (i = 0; i < data_size; ++i) {
if (histogram->data_[i] > 0) {
s[count] = i;
++count;
if (count > 4) break;
}
}
if (count == 1) {
return kOneSymbolHistogramCost;
}
if (count == 2) {
return (kTwoSymbolHistogramCost + (double)histogram->total_count_);
}
if (count == 3) {
const uint32_t histo0 = histogram->data_[s[0]];
const uint32_t histo1 = histogram->data_[s[1]];
const uint32_t histo2 = histogram->data_[s[2]];
const uint32_t histomax =
BROTLI_MAX(uint32_t, histo0, BROTLI_MAX(uint32_t, histo1, histo2));
return (kThreeSymbolHistogramCost +
2 * (histo0 + histo1 + histo2) - histomax);
}
if (count == 4) {
uint32_t histo[4];
uint32_t h23;
uint32_t histomax;
for (i = 0; i < 4; ++i) {
histo[i] = histogram->data_[s[i]];
}
/* Sort */
for (i = 0; i < 4; ++i) {
size_t j;
for (j = i + 1; j < 4; ++j) {
if (histo[j] > histo[i]) {
BROTLI_SWAP(uint32_t, histo, j, i);
}
}
}
h23 = histo[2] + histo[3];
histomax = BROTLI_MAX(uint32_t, h23, histo[0]);
return (kFourSymbolHistogramCost +
3 * h23 + 2 * (histo[0] + histo[1]) - histomax);
}
{
/* In this loop we compute the entropy of the histogram and simultaneously
build a simplified histogram of the code length codes where we use the
zero repeat code 17, but we don't use the non-zero repeat code 16. */
size_t max_depth = 1;
uint32_t depth_histo[BROTLI_CODE_LENGTH_CODES] = { 0 };
const double log2total = FastLog2(histogram->total_count_);
for (i = 0; i < data_size;) {
if (histogram->data_[i] > 0) {
/* Compute -log2(P(symbol)) = -log2(count(symbol)/total_count) =
= log2(total_count) - log2(count(symbol)) */
double log2p = log2total - FastLog2(histogram->data_[i]);
/* Approximate the bit depth by round(-log2(P(symbol))) */
size_t depth = (size_t)(log2p + 0.5);
bits += histogram->data_[i] * log2p;
if (depth > 15) {
depth = 15;
}
if (depth > max_depth) {
max_depth = depth;
}
++depth_histo[depth];
++i;
} else {
/* Compute the run length of zeros and add the appropriate number of 0
and 17 code length codes to the code length code histogram. */
uint32_t reps = 1;
size_t k;
for (k = i + 1; k < data_size && histogram->data_[k] == 0; ++k) {
++reps;
}
i += reps;
if (i == data_size) {
/* Don't add any cost for the last zero run, since these are encoded
only implicitly. */
break;
}
if (reps < 3) {
depth_histo[0] += reps;
} else {
reps -= 2;
while (reps > 0) {
++depth_histo[BROTLI_REPEAT_ZERO_CODE_LENGTH];
/* Add the 3 extra bits for the 17 code length code. */
bits += 3;
reps >>= 3;
}
}
}
}
/* Add the estimated encoding cost of the code length code histogram. */
bits += (double)(18 + 2 * max_depth);
/* Add the entropy of the code length code histogram. */
bits += BitsEntropy(depth_histo, BROTLI_CODE_LENGTH_CODES);
}
return bits;
}
#undef HistogramType
| -1 |
dotnet/runtime | 65,926 | FastMod for EEHashTable (Faster virtual generics) | Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| EgorBo | "2022-02-27T11:34:01Z" | "2022-03-01T19:43:37Z" | a5a7836f8193414fe64232d5c386b6402eb70e19 | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | FastMod for EEHashTable (Faster virtual generics). Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| ./src/native/external/zlib/gzguts.h | /* gzguts.h -- zlib internal header definitions for gz* operations
* Copyright (C) 2004, 2005, 2010, 2011, 2012, 2013, 2016 Mark Adler
* For conditions of distribution and use, see copyright notice in zlib.h
*/
#ifdef _LARGEFILE64_SOURCE
# ifndef _LARGEFILE_SOURCE
# define _LARGEFILE_SOURCE 1
# endif
# ifdef _FILE_OFFSET_BITS
# undef _FILE_OFFSET_BITS
# endif
#endif
#ifdef HAVE_HIDDEN
# define ZLIB_INTERNAL __attribute__((visibility ("hidden")))
#else
# define ZLIB_INTERNAL
#endif
#include <stdio.h>
#include "zlib.h"
#ifdef STDC
# include <string.h>
# include <stdlib.h>
# include <limits.h>
#endif
#ifndef _POSIX_SOURCE
# define _POSIX_SOURCE
#endif
#include <fcntl.h>
#ifdef _WIN32
# include <stddef.h>
#endif
#if defined(__TURBOC__) || defined(_MSC_VER) || defined(_WIN32)
# include <io.h>
#endif
#if defined(_WIN32) || defined(__CYGWIN__)
# define WIDECHAR
#endif
#ifdef WINAPI_FAMILY
# define open _open
# define read _read
# define write _write
# define close _close
#endif
#ifdef NO_DEFLATE /* for compatibility with old definition */
# define NO_GZCOMPRESS
#endif
#if defined(STDC99) || (defined(__TURBOC__) && __TURBOC__ >= 0x550)
# ifndef HAVE_VSNPRINTF
# define HAVE_VSNPRINTF
# endif
#endif
#if defined(__CYGWIN__)
# ifndef HAVE_VSNPRINTF
# define HAVE_VSNPRINTF
# endif
#endif
#if defined(MSDOS) && defined(__BORLANDC__) && (BORLANDC > 0x410)
# ifndef HAVE_VSNPRINTF
# define HAVE_VSNPRINTF
# endif
#endif
#ifndef HAVE_VSNPRINTF
# ifdef MSDOS
/* vsnprintf may exist on some MS-DOS compilers (DJGPP?),
but for now we just assume it doesn't. */
# define NO_vsnprintf
# endif
# ifdef __TURBOC__
# define NO_vsnprintf
# endif
# ifdef WIN32
/* In Win32, vsnprintf is available as the "non-ANSI" _vsnprintf. */
# if !defined(vsnprintf) && !defined(NO_vsnprintf)
# if !defined(_MSC_VER) || ( defined(_MSC_VER) && _MSC_VER < 1500 )
# define vsnprintf _vsnprintf
# endif
# endif
# endif
# ifdef __SASC
# define NO_vsnprintf
# endif
# ifdef VMS
# define NO_vsnprintf
# endif
# ifdef __OS400__
# define NO_vsnprintf
# endif
# ifdef __MVS__
# define NO_vsnprintf
# endif
#endif
/* unlike snprintf (which is required in C99), _snprintf does not guarantee
null termination of the result -- however this is only used in gzlib.c where
the result is assured to fit in the space provided */
#if defined(_MSC_VER) && _MSC_VER < 1900
# define snprintf _snprintf
#endif
#ifndef local
# define local static
#endif
/* since "static" is used to mean two completely different things in C, we
define "local" for the non-static meaning of "static", for readability
(compile with -Dlocal if your debugger can't find static symbols) */
/* gz* functions always use library allocation functions */
#ifndef STDC
extern voidp malloc OF((uInt size));
extern void free OF((voidpf ptr));
#endif
/* get errno and strerror definition */
#if defined UNDER_CE
# include <windows.h>
# define zstrerror() gz_strwinerror((DWORD)GetLastError())
#else
# ifndef NO_STRERROR
# include <errno.h>
# define zstrerror() strerror(errno)
# else
# define zstrerror() "stdio error (consult errno)"
# endif
#endif
/* provide prototypes for these when building zlib without LFS */
#if !defined(_LARGEFILE64_SOURCE) || _LFS64_LARGEFILE-0 == 0
ZEXTERN gzFile ZEXPORT gzopen64 OF((const char *, const char *));
ZEXTERN z_off64_t ZEXPORT gzseek64 OF((gzFile, z_off64_t, int));
ZEXTERN z_off64_t ZEXPORT gztell64 OF((gzFile));
ZEXTERN z_off64_t ZEXPORT gzoffset64 OF((gzFile));
#endif
/* default memLevel */
#if MAX_MEM_LEVEL >= 8
# define DEF_MEM_LEVEL 8
#else
# define DEF_MEM_LEVEL MAX_MEM_LEVEL
#endif
/* default i/o buffer size -- double this for output when reading (this and
twice this must be able to fit in an unsigned type) */
#define GZBUFSIZE 8192
/* gzip modes, also provide a little integrity check on the passed structure */
#define GZ_NONE 0
#define GZ_READ 7247
#define GZ_WRITE 31153
#define GZ_APPEND 1 /* mode set to GZ_WRITE after the file is opened */
/* values for gz_state how */
#define LOOK 0 /* look for a gzip header */
#define COPY 1 /* copy input directly */
#define GZIP 2 /* decompress a gzip stream */
/* internal gzip file state data structure */
typedef struct {
/* exposed contents for gzgetc() macro */
struct gzFile_s x; /* "x" for exposed */
/* x.have: number of bytes available at x.next */
/* x.next: next output data to deliver or write */
/* x.pos: current position in uncompressed data */
/* used for both reading and writing */
int mode; /* see gzip modes above */
int fd; /* file descriptor */
char *path; /* path or fd for error messages */
unsigned size; /* buffer size, zero if not allocated yet */
unsigned want; /* requested buffer size, default is GZBUFSIZE */
unsigned char *in; /* input buffer (double-sized when writing) */
unsigned char *out; /* output buffer (double-sized when reading) */
int direct; /* 0 if processing gzip, 1 if transparent */
/* just for reading */
int how; /* 0: get header, 1: copy, 2: decompress */
z_off64_t start; /* where the gzip data started, for rewinding */
int eof; /* true if end of input file reached */
int past; /* true if read requested past end */
/* just for writing */
int level; /* compression level */
int strategy; /* compression strategy */
/* seek request */
z_off64_t skip; /* amount to skip (already rewound if backwards) */
int seek; /* true if seek request pending */
/* error information */
int err; /* error code */
char *msg; /* error message */
/* zlib inflate or deflate stream */
z_stream strm; /* stream structure in-place (not a pointer) */
} gz_state;
typedef gz_state FAR *gz_statep;
/* shared functions */
void ZLIB_INTERNAL gz_error OF((gz_statep, int, const char *));
#if defined UNDER_CE
char ZLIB_INTERNAL *gz_strwinerror OF((DWORD error));
#endif
/* GT_OFF(x), where x is an unsigned value, is true if x > maximum z_off64_t
value -- needed when comparing unsigned to z_off64_t, which is signed
(possible z_off64_t types off_t, off64_t, and long are all signed) */
#ifdef INT_MAX
# define GT_OFF(x) (sizeof(int) == sizeof(z_off64_t) && (x) > INT_MAX)
#else
unsigned ZLIB_INTERNAL gz_intmax OF((void));
# define GT_OFF(x) (sizeof(int) == sizeof(z_off64_t) && (x) > gz_intmax())
#endif
| /* gzguts.h -- zlib internal header definitions for gz* operations
* Copyright (C) 2004, 2005, 2010, 2011, 2012, 2013, 2016 Mark Adler
* For conditions of distribution and use, see copyright notice in zlib.h
*/
#ifdef _LARGEFILE64_SOURCE
# ifndef _LARGEFILE_SOURCE
# define _LARGEFILE_SOURCE 1
# endif
# ifdef _FILE_OFFSET_BITS
# undef _FILE_OFFSET_BITS
# endif
#endif
#ifdef HAVE_HIDDEN
# define ZLIB_INTERNAL __attribute__((visibility ("hidden")))
#else
# define ZLIB_INTERNAL
#endif
#include <stdio.h>
#include "zlib.h"
#ifdef STDC
# include <string.h>
# include <stdlib.h>
# include <limits.h>
#endif
#ifndef _POSIX_SOURCE
# define _POSIX_SOURCE
#endif
#include <fcntl.h>
#ifdef _WIN32
# include <stddef.h>
#endif
#if defined(__TURBOC__) || defined(_MSC_VER) || defined(_WIN32)
# include <io.h>
#endif
#if defined(_WIN32) || defined(__CYGWIN__)
# define WIDECHAR
#endif
#ifdef WINAPI_FAMILY
# define open _open
# define read _read
# define write _write
# define close _close
#endif
#ifdef NO_DEFLATE /* for compatibility with old definition */
# define NO_GZCOMPRESS
#endif
#if defined(STDC99) || (defined(__TURBOC__) && __TURBOC__ >= 0x550)
# ifndef HAVE_VSNPRINTF
# define HAVE_VSNPRINTF
# endif
#endif
#if defined(__CYGWIN__)
# ifndef HAVE_VSNPRINTF
# define HAVE_VSNPRINTF
# endif
#endif
#if defined(MSDOS) && defined(__BORLANDC__) && (BORLANDC > 0x410)
# ifndef HAVE_VSNPRINTF
# define HAVE_VSNPRINTF
# endif
#endif
#ifndef HAVE_VSNPRINTF
# ifdef MSDOS
/* vsnprintf may exist on some MS-DOS compilers (DJGPP?),
but for now we just assume it doesn't. */
# define NO_vsnprintf
# endif
# ifdef __TURBOC__
# define NO_vsnprintf
# endif
# ifdef WIN32
/* In Win32, vsnprintf is available as the "non-ANSI" _vsnprintf. */
# if !defined(vsnprintf) && !defined(NO_vsnprintf)
# if !defined(_MSC_VER) || ( defined(_MSC_VER) && _MSC_VER < 1500 )
# define vsnprintf _vsnprintf
# endif
# endif
# endif
# ifdef __SASC
# define NO_vsnprintf
# endif
# ifdef VMS
# define NO_vsnprintf
# endif
# ifdef __OS400__
# define NO_vsnprintf
# endif
# ifdef __MVS__
# define NO_vsnprintf
# endif
#endif
/* unlike snprintf (which is required in C99), _snprintf does not guarantee
null termination of the result -- however this is only used in gzlib.c where
the result is assured to fit in the space provided */
#if defined(_MSC_VER) && _MSC_VER < 1900
# define snprintf _snprintf
#endif
#ifndef local
# define local static
#endif
/* since "static" is used to mean two completely different things in C, we
define "local" for the non-static meaning of "static", for readability
(compile with -Dlocal if your debugger can't find static symbols) */
/* gz* functions always use library allocation functions */
#ifndef STDC
extern voidp malloc OF((uInt size));
extern void free OF((voidpf ptr));
#endif
/* get errno and strerror definition */
#if defined UNDER_CE
# include <windows.h>
# define zstrerror() gz_strwinerror((DWORD)GetLastError())
#else
# ifndef NO_STRERROR
# include <errno.h>
# define zstrerror() strerror(errno)
# else
# define zstrerror() "stdio error (consult errno)"
# endif
#endif
/* provide prototypes for these when building zlib without LFS */
#if !defined(_LARGEFILE64_SOURCE) || _LFS64_LARGEFILE-0 == 0
ZEXTERN gzFile ZEXPORT gzopen64 OF((const char *, const char *));
ZEXTERN z_off64_t ZEXPORT gzseek64 OF((gzFile, z_off64_t, int));
ZEXTERN z_off64_t ZEXPORT gztell64 OF((gzFile));
ZEXTERN z_off64_t ZEXPORT gzoffset64 OF((gzFile));
#endif
/* default memLevel */
#if MAX_MEM_LEVEL >= 8
# define DEF_MEM_LEVEL 8
#else
# define DEF_MEM_LEVEL MAX_MEM_LEVEL
#endif
/* default i/o buffer size -- double this for output when reading (this and
twice this must be able to fit in an unsigned type) */
#define GZBUFSIZE 8192
/* gzip modes, also provide a little integrity check on the passed structure */
#define GZ_NONE 0
#define GZ_READ 7247
#define GZ_WRITE 31153
#define GZ_APPEND 1 /* mode set to GZ_WRITE after the file is opened */
/* values for gz_state how */
#define LOOK 0 /* look for a gzip header */
#define COPY 1 /* copy input directly */
#define GZIP 2 /* decompress a gzip stream */
/* internal gzip file state data structure */
typedef struct {
/* exposed contents for gzgetc() macro */
struct gzFile_s x; /* "x" for exposed */
/* x.have: number of bytes available at x.next */
/* x.next: next output data to deliver or write */
/* x.pos: current position in uncompressed data */
/* used for both reading and writing */
int mode; /* see gzip modes above */
int fd; /* file descriptor */
char *path; /* path or fd for error messages */
unsigned size; /* buffer size, zero if not allocated yet */
unsigned want; /* requested buffer size, default is GZBUFSIZE */
unsigned char *in; /* input buffer (double-sized when writing) */
unsigned char *out; /* output buffer (double-sized when reading) */
int direct; /* 0 if processing gzip, 1 if transparent */
/* just for reading */
int how; /* 0: get header, 1: copy, 2: decompress */
z_off64_t start; /* where the gzip data started, for rewinding */
int eof; /* true if end of input file reached */
int past; /* true if read requested past end */
/* just for writing */
int level; /* compression level */
int strategy; /* compression strategy */
/* seek request */
z_off64_t skip; /* amount to skip (already rewound if backwards) */
int seek; /* true if seek request pending */
/* error information */
int err; /* error code */
char *msg; /* error message */
/* zlib inflate or deflate stream */
z_stream strm; /* stream structure in-place (not a pointer) */
} gz_state;
typedef gz_state FAR *gz_statep;
/* shared functions */
void ZLIB_INTERNAL gz_error OF((gz_statep, int, const char *));
#if defined UNDER_CE
char ZLIB_INTERNAL *gz_strwinerror OF((DWORD error));
#endif
/* GT_OFF(x), where x is an unsigned value, is true if x > maximum z_off64_t
value -- needed when comparing unsigned to z_off64_t, which is signed
(possible z_off64_t types off_t, off64_t, and long are all signed) */
#ifdef INT_MAX
# define GT_OFF(x) (sizeof(int) == sizeof(z_off64_t) && (x) > INT_MAX)
#else
unsigned ZLIB_INTERNAL gz_intmax OF((void));
# define GT_OFF(x) (sizeof(int) == sizeof(z_off64_t) && (x) > gz_intmax())
#endif
| -1 |
dotnet/runtime | 65,926 | FastMod for EEHashTable (Faster virtual generics) | Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| EgorBo | "2022-02-27T11:34:01Z" | "2022-03-01T19:43:37Z" | a5a7836f8193414fe64232d5c386b6402eb70e19 | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | FastMod for EEHashTable (Faster virtual generics). Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| ./src/coreclr/pal/src/libunwind_mac/include/endian.h | // This is an incomplete & imprecice implementation of the
// standard file by the same name
#pragma once
#define __LITTLE_ENDIAN 1234
#define __BIG_ENDIAN 4321
#define __BYTE_ORDER __LITTLE_ENDIAN
| // This is an incomplete & imprecice implementation of the
// standard file by the same name
#pragma once
#define __LITTLE_ENDIAN 1234
#define __BIG_ENDIAN 4321
#define __BYTE_ORDER __LITTLE_ENDIAN
| -1 |
dotnet/runtime | 65,926 | FastMod for EEHashTable (Faster virtual generics) | Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| EgorBo | "2022-02-27T11:34:01Z" | "2022-03-01T19:43:37Z" | a5a7836f8193414fe64232d5c386b6402eb70e19 | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | FastMod for EEHashTable (Faster virtual generics). Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| ./src/coreclr/pal/inc/rt/intrin.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "palrt.h"
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "palrt.h"
| -1 |
dotnet/runtime | 65,926 | FastMod for EEHashTable (Faster virtual generics) | Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| EgorBo | "2022-02-27T11:34:01Z" | "2022-03-01T19:43:37Z" | a5a7836f8193414fe64232d5c386b6402eb70e19 | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | FastMod for EEHashTable (Faster virtual generics). Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| ./src/native/external/rapidjson/internal/pow10.h | // Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef RAPIDJSON_POW10_
#define RAPIDJSON_POW10_
#include "../rapidjson.h"
RAPIDJSON_NAMESPACE_BEGIN
namespace internal {
//! Computes integer powers of 10 in double (10.0^n).
/*! This function uses lookup table for fast and accurate results.
\param n non-negative exponent. Must <= 308.
\return 10.0^n
*/
inline double Pow10(int n) {
static const double e[] = { // 1e-0...1e308: 309 * 8 bytes = 2472 bytes
1e+0,
1e+1, 1e+2, 1e+3, 1e+4, 1e+5, 1e+6, 1e+7, 1e+8, 1e+9, 1e+10, 1e+11, 1e+12, 1e+13, 1e+14, 1e+15, 1e+16, 1e+17, 1e+18, 1e+19, 1e+20,
1e+21, 1e+22, 1e+23, 1e+24, 1e+25, 1e+26, 1e+27, 1e+28, 1e+29, 1e+30, 1e+31, 1e+32, 1e+33, 1e+34, 1e+35, 1e+36, 1e+37, 1e+38, 1e+39, 1e+40,
1e+41, 1e+42, 1e+43, 1e+44, 1e+45, 1e+46, 1e+47, 1e+48, 1e+49, 1e+50, 1e+51, 1e+52, 1e+53, 1e+54, 1e+55, 1e+56, 1e+57, 1e+58, 1e+59, 1e+60,
1e+61, 1e+62, 1e+63, 1e+64, 1e+65, 1e+66, 1e+67, 1e+68, 1e+69, 1e+70, 1e+71, 1e+72, 1e+73, 1e+74, 1e+75, 1e+76, 1e+77, 1e+78, 1e+79, 1e+80,
1e+81, 1e+82, 1e+83, 1e+84, 1e+85, 1e+86, 1e+87, 1e+88, 1e+89, 1e+90, 1e+91, 1e+92, 1e+93, 1e+94, 1e+95, 1e+96, 1e+97, 1e+98, 1e+99, 1e+100,
1e+101,1e+102,1e+103,1e+104,1e+105,1e+106,1e+107,1e+108,1e+109,1e+110,1e+111,1e+112,1e+113,1e+114,1e+115,1e+116,1e+117,1e+118,1e+119,1e+120,
1e+121,1e+122,1e+123,1e+124,1e+125,1e+126,1e+127,1e+128,1e+129,1e+130,1e+131,1e+132,1e+133,1e+134,1e+135,1e+136,1e+137,1e+138,1e+139,1e+140,
1e+141,1e+142,1e+143,1e+144,1e+145,1e+146,1e+147,1e+148,1e+149,1e+150,1e+151,1e+152,1e+153,1e+154,1e+155,1e+156,1e+157,1e+158,1e+159,1e+160,
1e+161,1e+162,1e+163,1e+164,1e+165,1e+166,1e+167,1e+168,1e+169,1e+170,1e+171,1e+172,1e+173,1e+174,1e+175,1e+176,1e+177,1e+178,1e+179,1e+180,
1e+181,1e+182,1e+183,1e+184,1e+185,1e+186,1e+187,1e+188,1e+189,1e+190,1e+191,1e+192,1e+193,1e+194,1e+195,1e+196,1e+197,1e+198,1e+199,1e+200,
1e+201,1e+202,1e+203,1e+204,1e+205,1e+206,1e+207,1e+208,1e+209,1e+210,1e+211,1e+212,1e+213,1e+214,1e+215,1e+216,1e+217,1e+218,1e+219,1e+220,
1e+221,1e+222,1e+223,1e+224,1e+225,1e+226,1e+227,1e+228,1e+229,1e+230,1e+231,1e+232,1e+233,1e+234,1e+235,1e+236,1e+237,1e+238,1e+239,1e+240,
1e+241,1e+242,1e+243,1e+244,1e+245,1e+246,1e+247,1e+248,1e+249,1e+250,1e+251,1e+252,1e+253,1e+254,1e+255,1e+256,1e+257,1e+258,1e+259,1e+260,
1e+261,1e+262,1e+263,1e+264,1e+265,1e+266,1e+267,1e+268,1e+269,1e+270,1e+271,1e+272,1e+273,1e+274,1e+275,1e+276,1e+277,1e+278,1e+279,1e+280,
1e+281,1e+282,1e+283,1e+284,1e+285,1e+286,1e+287,1e+288,1e+289,1e+290,1e+291,1e+292,1e+293,1e+294,1e+295,1e+296,1e+297,1e+298,1e+299,1e+300,
1e+301,1e+302,1e+303,1e+304,1e+305,1e+306,1e+307,1e+308
};
RAPIDJSON_ASSERT(n >= 0 && n <= 308);
return e[n];
}
} // namespace internal
RAPIDJSON_NAMESPACE_END
#endif // RAPIDJSON_POW10_
| // Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef RAPIDJSON_POW10_
#define RAPIDJSON_POW10_
#include "../rapidjson.h"
RAPIDJSON_NAMESPACE_BEGIN
namespace internal {
//! Computes integer powers of 10 in double (10.0^n).
/*! This function uses lookup table for fast and accurate results.
\param n non-negative exponent. Must <= 308.
\return 10.0^n
*/
inline double Pow10(int n) {
static const double e[] = { // 1e-0...1e308: 309 * 8 bytes = 2472 bytes
1e+0,
1e+1, 1e+2, 1e+3, 1e+4, 1e+5, 1e+6, 1e+7, 1e+8, 1e+9, 1e+10, 1e+11, 1e+12, 1e+13, 1e+14, 1e+15, 1e+16, 1e+17, 1e+18, 1e+19, 1e+20,
1e+21, 1e+22, 1e+23, 1e+24, 1e+25, 1e+26, 1e+27, 1e+28, 1e+29, 1e+30, 1e+31, 1e+32, 1e+33, 1e+34, 1e+35, 1e+36, 1e+37, 1e+38, 1e+39, 1e+40,
1e+41, 1e+42, 1e+43, 1e+44, 1e+45, 1e+46, 1e+47, 1e+48, 1e+49, 1e+50, 1e+51, 1e+52, 1e+53, 1e+54, 1e+55, 1e+56, 1e+57, 1e+58, 1e+59, 1e+60,
1e+61, 1e+62, 1e+63, 1e+64, 1e+65, 1e+66, 1e+67, 1e+68, 1e+69, 1e+70, 1e+71, 1e+72, 1e+73, 1e+74, 1e+75, 1e+76, 1e+77, 1e+78, 1e+79, 1e+80,
1e+81, 1e+82, 1e+83, 1e+84, 1e+85, 1e+86, 1e+87, 1e+88, 1e+89, 1e+90, 1e+91, 1e+92, 1e+93, 1e+94, 1e+95, 1e+96, 1e+97, 1e+98, 1e+99, 1e+100,
1e+101,1e+102,1e+103,1e+104,1e+105,1e+106,1e+107,1e+108,1e+109,1e+110,1e+111,1e+112,1e+113,1e+114,1e+115,1e+116,1e+117,1e+118,1e+119,1e+120,
1e+121,1e+122,1e+123,1e+124,1e+125,1e+126,1e+127,1e+128,1e+129,1e+130,1e+131,1e+132,1e+133,1e+134,1e+135,1e+136,1e+137,1e+138,1e+139,1e+140,
1e+141,1e+142,1e+143,1e+144,1e+145,1e+146,1e+147,1e+148,1e+149,1e+150,1e+151,1e+152,1e+153,1e+154,1e+155,1e+156,1e+157,1e+158,1e+159,1e+160,
1e+161,1e+162,1e+163,1e+164,1e+165,1e+166,1e+167,1e+168,1e+169,1e+170,1e+171,1e+172,1e+173,1e+174,1e+175,1e+176,1e+177,1e+178,1e+179,1e+180,
1e+181,1e+182,1e+183,1e+184,1e+185,1e+186,1e+187,1e+188,1e+189,1e+190,1e+191,1e+192,1e+193,1e+194,1e+195,1e+196,1e+197,1e+198,1e+199,1e+200,
1e+201,1e+202,1e+203,1e+204,1e+205,1e+206,1e+207,1e+208,1e+209,1e+210,1e+211,1e+212,1e+213,1e+214,1e+215,1e+216,1e+217,1e+218,1e+219,1e+220,
1e+221,1e+222,1e+223,1e+224,1e+225,1e+226,1e+227,1e+228,1e+229,1e+230,1e+231,1e+232,1e+233,1e+234,1e+235,1e+236,1e+237,1e+238,1e+239,1e+240,
1e+241,1e+242,1e+243,1e+244,1e+245,1e+246,1e+247,1e+248,1e+249,1e+250,1e+251,1e+252,1e+253,1e+254,1e+255,1e+256,1e+257,1e+258,1e+259,1e+260,
1e+261,1e+262,1e+263,1e+264,1e+265,1e+266,1e+267,1e+268,1e+269,1e+270,1e+271,1e+272,1e+273,1e+274,1e+275,1e+276,1e+277,1e+278,1e+279,1e+280,
1e+281,1e+282,1e+283,1e+284,1e+285,1e+286,1e+287,1e+288,1e+289,1e+290,1e+291,1e+292,1e+293,1e+294,1e+295,1e+296,1e+297,1e+298,1e+299,1e+300,
1e+301,1e+302,1e+303,1e+304,1e+305,1e+306,1e+307,1e+308
};
RAPIDJSON_ASSERT(n >= 0 && n <= 308);
return e[n];
}
} // namespace internal
RAPIDJSON_NAMESPACE_END
#endif // RAPIDJSON_POW10_
| -1 |
dotnet/runtime | 65,926 | FastMod for EEHashTable (Faster virtual generics) | Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| EgorBo | "2022-02-27T11:34:01Z" | "2022-03-01T19:43:37Z" | a5a7836f8193414fe64232d5c386b6402eb70e19 | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | FastMod for EEHashTable (Faster virtual generics). Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| ./src/coreclr/pal/src/libunwind/src/oop/_OOP_internal.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#ifndef _OOP_internal_h
#define _OOP_internal_h
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#ifdef HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#include <errno.h>
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include "libunwind_i.h"
#include "dwarf-eh.h"
#include "dwarf_i.h"
#endif /* _OOP_internal_h */
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#ifndef _OOP_internal_h
#define _OOP_internal_h
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#ifdef HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#include <errno.h>
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include "libunwind_i.h"
#include "dwarf-eh.h"
#include "dwarf_i.h"
#endif /* _OOP_internal_h */
| -1 |
dotnet/runtime | 65,926 | FastMod for EEHashTable (Faster virtual generics) | Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| EgorBo | "2022-02-27T11:34:01Z" | "2022-03-01T19:43:37Z" | a5a7836f8193414fe64232d5c386b6402eb70e19 | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | FastMod for EEHashTable (Faster virtual generics). Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| ./src/coreclr/pal/tests/palsuite/threading/DuplicateHandle/test11/myexitcode.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================
**
** Source: duplicatehandle/test11/myexitcode.h
**
** Purpose: Define an exit code constant.
**
**
**=========================================================*/
#define TEST_EXIT_CODE 31
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================
**
** Source: duplicatehandle/test11/myexitcode.h
**
** Purpose: Define an exit code constant.
**
**
**=========================================================*/
#define TEST_EXIT_CODE 31
| -1 |
dotnet/runtime | 65,926 | FastMod for EEHashTable (Faster virtual generics) | Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| EgorBo | "2022-02-27T11:34:01Z" | "2022-03-01T19:43:37Z" | a5a7836f8193414fe64232d5c386b6402eb70e19 | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | FastMod for EEHashTable (Faster virtual generics). Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| ./src/coreclr/pal/src/libunwind/include/tdep-x86/dwarf-config.h | /* libunwind - a platform-independent unwind library
Copyright (c) 2003 Hewlett-Packard Development Company, L.P.
Contributed by David Mosberger-Tang <davidm@hpl.hp.com>
This file is part of libunwind.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
#ifndef dwarf_config_h
#define dwarf_config_h
/* This matches the value used by GCC (see
gcc/config/i386.h:DWARF_FRAME_REGISTERS), which leaves plenty of
room for expansion. */
#define DWARF_NUM_PRESERVED_REGS 17
#define DWARF_REGNUM_MAP_LENGTH 19
/* Return TRUE if the ADDR_SPACE uses big-endian byte-order. */
#define dwarf_is_big_endian(addr_space) 0
/* Convert a pointer to a dwarf_cursor structure to a pointer to
unw_cursor_t. */
#define dwarf_to_cursor(c) ((unw_cursor_t *) (c))
typedef struct dwarf_loc
{
unw_word_t val;
#ifndef UNW_LOCAL_ONLY
unw_word_t type; /* see X86_LOC_TYPE_* macros. */
#endif
}
dwarf_loc_t;
#endif /* dwarf_config_h */
| /* libunwind - a platform-independent unwind library
Copyright (c) 2003 Hewlett-Packard Development Company, L.P.
Contributed by David Mosberger-Tang <davidm@hpl.hp.com>
This file is part of libunwind.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
#ifndef dwarf_config_h
#define dwarf_config_h
/* This matches the value used by GCC (see
gcc/config/i386.h:DWARF_FRAME_REGISTERS), which leaves plenty of
room for expansion. */
#define DWARF_NUM_PRESERVED_REGS 17
#define DWARF_REGNUM_MAP_LENGTH 19
/* Return TRUE if the ADDR_SPACE uses big-endian byte-order. */
#define dwarf_is_big_endian(addr_space) 0
/* Convert a pointer to a dwarf_cursor structure to a pointer to
unw_cursor_t. */
#define dwarf_to_cursor(c) ((unw_cursor_t *) (c))
typedef struct dwarf_loc
{
unw_word_t val;
#ifndef UNW_LOCAL_ONLY
unw_word_t type; /* see X86_LOC_TYPE_* macros. */
#endif
}
dwarf_loc_t;
#endif /* dwarf_config_h */
| -1 |
dotnet/runtime | 65,926 | FastMod for EEHashTable (Faster virtual generics) | Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| EgorBo | "2022-02-27T11:34:01Z" | "2022-03-01T19:43:37Z" | a5a7836f8193414fe64232d5c386b6402eb70e19 | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | FastMod for EEHashTable (Faster virtual generics). Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| ./src/coreclr/tools/superpmi/mcs/verbasmdump.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//----------------------------------------------------------
// verbASMDump.h - verb that
//----------------------------------------------------------
#ifndef _verbASMDump
#define _verbASMDump
class verbASMDump
{
public:
static int DoWork(const char* nameOfInput1, const char* nameOfOutput, int indexCount, const int* indexes);
};
#endif
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//----------------------------------------------------------
// verbASMDump.h - verb that
//----------------------------------------------------------
#ifndef _verbASMDump
#define _verbASMDump
class verbASMDump
{
public:
static int DoWork(const char* nameOfInput1, const char* nameOfOutput, int indexCount, const int* indexes);
};
#endif
| -1 |
dotnet/runtime | 65,926 | FastMod for EEHashTable (Faster virtual generics) | Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| EgorBo | "2022-02-27T11:34:01Z" | "2022-03-01T19:43:37Z" | a5a7836f8193414fe64232d5c386b6402eb70e19 | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | FastMod for EEHashTable (Faster virtual generics). Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| ./src/coreclr/jit/sm.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// State machine header used ONLY in the JIT.
//
#ifndef __sm_h__
#define __sm_h__
#include "smcommon.h"
extern const SMState* gp_SMStates;
extern const JumpTableCell* gp_SMJumpTableCells;
extern const short* gp_StateWeights;
class CodeSeqSM // Represent a particualr run of the state machine
// For example, it maintains the array of counts for the terminated states.
// These counts should be stored in per method based for them to be correct
// under multithreadeded environment.
{
public:
Compiler* pComp;
const SMState* States;
const JumpTableCell* JumpTableCells;
const short* StateWeights; // Weight for each state. Including non-terminate states.
SM_STATE_ID curState;
int NativeSize; // This is a signed integer!
void Start(Compiler* comp);
void Reset();
void End();
void Run(SM_OPCODE opcode DEBUGARG(int level));
SM_STATE_ID GetDestState(SM_STATE_ID srcState, SM_OPCODE opcode);
// Matched a termination state
inline void TermStateMatch(SM_STATE_ID stateID DEBUGARG(bool verbose))
{
assert(States[stateID].term);
#ifdef DEBUG
#ifndef SMGEN_COMPILE
if (verbose)
{
printf("weight=%3d : state %3d [ %s ]\n", StateWeights[stateID], stateID, StateDesc(stateID));
}
#endif // SMGEN_COMPILE
#endif // DEBUG
NativeSize += StateWeights[stateID];
}
// Given an SM opcode retrieve the weight for this single opcode state.
// For example, ID for single opcode state SM_NOSHOW is 2.
inline short GetWeightForOpcode(SM_OPCODE opcode)
{
SM_STATE_ID stateID = ((SM_STATE_ID)opcode) + SM_STATE_ID_START + 1;
return StateWeights[stateID];
}
#ifdef DEBUG
const char* StateDesc(SM_STATE_ID stateID);
#endif
static SM_OPCODE MapToSMOpcode(OPCODE opcode);
};
#endif /* __sm_h__ */
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// State machine header used ONLY in the JIT.
//
#ifndef __sm_h__
#define __sm_h__
#include "smcommon.h"
extern const SMState* gp_SMStates;
extern const JumpTableCell* gp_SMJumpTableCells;
extern const short* gp_StateWeights;
class CodeSeqSM // Represent a particualr run of the state machine
// For example, it maintains the array of counts for the terminated states.
// These counts should be stored in per method based for them to be correct
// under multithreadeded environment.
{
public:
Compiler* pComp;
const SMState* States;
const JumpTableCell* JumpTableCells;
const short* StateWeights; // Weight for each state. Including non-terminate states.
SM_STATE_ID curState;
int NativeSize; // This is a signed integer!
void Start(Compiler* comp);
void Reset();
void End();
void Run(SM_OPCODE opcode DEBUGARG(int level));
SM_STATE_ID GetDestState(SM_STATE_ID srcState, SM_OPCODE opcode);
// Matched a termination state
inline void TermStateMatch(SM_STATE_ID stateID DEBUGARG(bool verbose))
{
assert(States[stateID].term);
#ifdef DEBUG
#ifndef SMGEN_COMPILE
if (verbose)
{
printf("weight=%3d : state %3d [ %s ]\n", StateWeights[stateID], stateID, StateDesc(stateID));
}
#endif // SMGEN_COMPILE
#endif // DEBUG
NativeSize += StateWeights[stateID];
}
// Given an SM opcode retrieve the weight for this single opcode state.
// For example, ID for single opcode state SM_NOSHOW is 2.
inline short GetWeightForOpcode(SM_OPCODE opcode)
{
SM_STATE_ID stateID = ((SM_STATE_ID)opcode) + SM_STATE_ID_START + 1;
return StateWeights[stateID];
}
#ifdef DEBUG
const char* StateDesc(SM_STATE_ID stateID);
#endif
static SM_OPCODE MapToSMOpcode(OPCODE opcode);
};
#endif /* __sm_h__ */
| -1 |
dotnet/runtime | 65,926 | FastMod for EEHashTable (Faster virtual generics) | Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| EgorBo | "2022-02-27T11:34:01Z" | "2022-03-01T19:43:37Z" | a5a7836f8193414fe64232d5c386b6402eb70e19 | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | FastMod for EEHashTable (Faster virtual generics). Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| ./src/coreclr/vm/arm/virtualcallstubcpu.hpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// VirtualCallStubCpu.hpp
//
#ifndef _VIRTUAL_CALL_STUB_ARM_H
#define _VIRTUAL_CALL_STUB_ARM_H
#ifdef DECLARE_DATA
#include "asmconstants.h"
#endif
//#define STUB_LOGGING
#include <pshpack1.h> // Since we are placing code, we want byte packing of the structs
#define USES_LOOKUP_STUBS 1
/*********************************************************************************************
Stubs that contain code are all part of larger structs called Holders. There is a
Holder for each kind of stub, i.e XXXStub is contained with XXXHolder. Holders are
essentially an implementation trick that allowed rearranging the code sequences more
easily while trying out different alternatives, and for dealing with any alignment
issues in a way that was mostly immune to the actually code sequences. These Holders
should be revisited when the stub code sequences are fixed, since in many cases they
add extra space to a stub that is not really needed.
Stubs are placed in cache and hash tables. Since unaligned access of data in memory
is very slow, the keys used in those tables should be aligned. The things used as keys
typically also occur in the generated code, e.g. a token as an immediate part of an instruction.
For now, to avoid alignment computations as different code strategies are tried out, the key
fields are all in the Holders. Eventually, many of these fields should be dropped, and the instruction
streams aligned so that the immediate fields fall on aligned boundaries.
*/
#if USES_LOOKUP_STUBS
struct LookupStub;
struct LookupHolder;
/*LookupStub**************************************************************************************
Virtual and interface call sites are initially setup to point at LookupStubs.
This is because the runtime type of the <this> pointer is not yet known,
so the target cannot be resolved. Note: if the jit is able to determine the runtime type
of the <this> pointer, it should be generating a direct call not a virtual or interface call.
This stub pushes a lookup token onto the stack to identify the sought after method, and then
jumps into the EE (VirtualCallStubManager::ResolveWorkerStub) to effectuate the lookup and
transfer of control to the appropriate target method implementation, perhaps patching of the call site
along the way to point to a more appropriate stub. Hence callsites that point to LookupStubs
get quickly changed to point to another kind of stub.
*/
struct LookupStub
{
inline PCODE entryPoint() { LIMITED_METHOD_CONTRACT; return (PCODE)&_entryPoint[0] + THUMB_CODE; }
inline size_t token() { LIMITED_METHOD_CONTRACT; return _token; }
inline size_t size() { LIMITED_METHOD_CONTRACT; return sizeof(LookupStub); }
private:
friend struct LookupHolder;
const static int entryPointLen = 4;
WORD _entryPoint[entryPointLen];
PCODE _resolveWorkerTarget; // xx xx xx xx target address
size_t _token; // xx xx xx xx 32-bit constant
};
/* LookupHolders are the containers for LookupStubs, they provide for any alignment of
stubs as necessary. In the case of LookupStubs, alignment is necessary since
LookupStubs are placed in a hash table keyed by token. */
struct LookupHolder
{
static void InitializeStatic() { LIMITED_METHOD_CONTRACT; }
void Initialize(LookupHolder* pLookupHolderRX, PCODE resolveWorkerTarget, size_t dispatchToken);
LookupStub* stub() { LIMITED_METHOD_CONTRACT; return &_stub; }
static LookupHolder* FromLookupEntry(PCODE lookupEntry);
private:
friend struct LookupStub;
LookupStub _stub;
};
#endif // USES_LOOKUP_STUBS
struct DispatchStub;
struct DispatchHolder;
/*DispatchStub**************************************************************************************
Monomorphic and mostly monomorphic call sites eventually point to DispatchStubs.
A dispatch stub has an expected type (expectedMT), target address (target) and fail address (failure).
If the calling frame does in fact have the <this> type be of the expected type, then
control is transfered to the target address, the method implementation. If not,
then control is transfered to the fail address, a fail stub (see below) where a polymorphic
lookup is done to find the correct address to go to.
implementation note: Order, choice of instructions, and branch directions
should be carefully tuned since it can have an inordinate effect on performance. Particular
attention needs to be paid to the effects on the BTB and branch prediction, both in the small
and in the large, i.e. it needs to run well in the face of BTB overflow--using static predictions.
Note that since this stub is only used for mostly monomorphic callsites (ones that are not, get patched
to something else), therefore the conditional jump "jne failure" is mostly not taken, and hence it is important
that the branch prediction staticly predict this, which means it must be a forward jump. The alternative
is to reverse the order of the jumps and make sure that the resulting conditional jump "je implTarget"
is statically predicted as taken, i.e a backward jump. The current choice was taken since it was easier
to control the placement of the stubs than control the placement of the jitted code and the stubs. */
struct DispatchStub
{
inline PCODE entryPoint() { LIMITED_METHOD_CONTRACT; return (PCODE)(&_entryPoint[0]) + THUMB_CODE; }
inline size_t expectedMT() { LIMITED_METHOD_CONTRACT; return _expectedMT; }
inline PCODE implTarget() { LIMITED_METHOD_CONTRACT; return _implTarget; }
inline TADDR implTargetSlot(EntryPointSlots::SlotType *slotTypeRef) const
{
LIMITED_METHOD_CONTRACT;
_ASSERTE(slotTypeRef != nullptr);
*slotTypeRef = EntryPointSlots::SlotType_Executable;
return (TADDR)&_implTarget;
}
inline PCODE failTarget() { LIMITED_METHOD_CONTRACT; return _failTarget; }
inline size_t size() { LIMITED_METHOD_CONTRACT; return sizeof(DispatchStub); }
private:
friend struct DispatchHolder;
const static int entryPointLen = 12;
WORD _entryPoint[entryPointLen];
size_t _expectedMT;
PCODE _failTarget;
PCODE _implTarget;
};
/* DispatchHolders are the containers for DispatchStubs, they provide for any alignment of
stubs as necessary. DispatchStubs are placed in a hashtable and in a cache. The keys for both
are the pair expectedMT and token. Efficiency of the of the hash table is not a big issue,
since lookups in it are fairly rare. Efficiency of the cache is paramount since it is accessed frequently
o(see ResolveStub below). Currently we are storing both of these fields in the DispatchHolder to simplify
alignment issues. If inlineMT in the stub itself was aligned, then it could be the expectedMT field.
While the token field can be logically gotten by following the failure target to the failEntryPoint
of the ResolveStub and then to the token over there, for perf reasons of cache access, it is duplicated here.
This allows us to use DispatchStubs in the cache. The alternative is to provide some other immutable struct
for the cache composed of the triplet (expectedMT, token, target) and some sort of reclaimation scheme when
they are thrown out of the cache via overwrites (since concurrency will make the obvious approaches invalid).
*/
/* @workaround for ee resolution - Since the EE does not currently have a resolver function that
does what we want, see notes in implementation of VirtualCallStubManager::Resolver, we are
using dispatch stubs to siumulate what we want. That means that inlineTarget, which should be immutable
is in fact written. Hence we have moved target out into the holder and aligned it so we can
atomically update it. When we get a resolver function that does what we want, we can drop this field,
and live with just the inlineTarget field in the stub itself, since immutability will hold.*/
struct DispatchHolder
{
static void InitializeStatic()
{
LIMITED_METHOD_CONTRACT;
// Check that _implTarget is aligned in the DispatchHolder for backpatching
static_assert_no_msg(((offsetof(DispatchHolder, _stub) + offsetof(DispatchStub, _implTarget)) % sizeof(void *)) == 0);
}
void Initialize(DispatchHolder* pDispatchHolderRX, PCODE implTarget, PCODE failTarget, size_t expectedMT);
DispatchStub* stub() { LIMITED_METHOD_CONTRACT; return &_stub; }
static DispatchHolder* FromDispatchEntry(PCODE dispatchEntry);
private:
//force expectedMT to be aligned since used as key in hash tables.
DispatchStub _stub;
};
struct ResolveStub;
struct ResolveHolder;
/*ResolveStub**************************************************************************************
Polymorphic call sites and monomorphic calls that fail end up in a ResolverStub. There is only
one resolver stub built for any given token, even though there may be many call sites that
use that token and many distinct <this> types that are used in the calling call frames. A resolver stub
actually has two entry points, one for polymorphic call sites and one for dispatch stubs that fail on their
expectedMT test. There is a third part of the resolver stub that enters the ee when a decision should
be made about changing the callsite. Therefore, we have defined the resolver stub as three distinct pieces,
even though they are actually allocated as a single contiguous block of memory. These pieces are:
A ResolveStub has two entry points:
FailEntry - where the dispatch stub goes if the expected MT test fails. This piece of the stub does
a check to see how often we are actually failing. If failures are frequent, control transfers to the
patch piece to cause the call site to be changed from a mostly monomorphic callsite
(calls dispatch stub) to a polymorphic callsize (calls resolve stub). If failures are rare, control
transfers to the resolve piece (see ResolveStub). The failEntryPoint decrements a counter
every time it is entered. The ee at various times will add a large chunk to the counter.
ResolveEntry - does a lookup via in a cache by hashing the actual type of the calling frame s
<this> and the token identifying the (contract,method) pair desired. If found, control is transfered
to the method implementation. If not found in the cache, the token is pushed and the ee is entered via
the ResolveWorkerStub to do a full lookup and eventual transfer to the correct method implementation. Since
there is a different resolve stub for every token, the token can be inlined and the token can be pre-hashed.
The effectiveness of this approach is highly sensitive to the effectiveness of the hashing algorithm used,
as well as its speed. It turns out it is very important to make the hash function sensitive to all
of the bits of the method table, as method tables are laid out in memory in a very non-random way. Before
making any changes to the code sequences here, it is very important to measure and tune them as perf
can vary greatly, in unexpected ways, with seeming minor changes.
Implementation note - Order, choice of instructions, and branch directions
should be carefully tuned since it can have an inordinate effect on performance. Particular
attention needs to be paid to the effects on the BTB and branch prediction, both in the small
and in the large, i.e. it needs to run well in the face of BTB overflow--using static predictions.
Note that this stub is called in highly polymorphic cases, but the cache should have been sized
and the hash function chosen to maximize the cache hit case. Hence the cmp/jcc instructions should
mostly be going down the cache hit route, and it is important that this be statically predicted as so.
Hence the 3 jcc instrs need to be forward jumps. As structured, there is only one jmp/jcc that typically
gets put in the BTB since all the others typically fall straight thru. Minimizing potential BTB entries
is important. */
struct ResolveStub
{
inline PCODE failEntryPoint() { LIMITED_METHOD_CONTRACT; return (PCODE)(&_failEntryPoint[0]) + THUMB_CODE; }
inline PCODE resolveEntryPoint() { LIMITED_METHOD_CONTRACT; return (PCODE)(&_resolveEntryPoint[0]) + THUMB_CODE; }
inline PCODE slowEntryPoint() { LIMITED_METHOD_CONTRACT; return (PCODE)(&_slowEntryPoint[0]) + THUMB_CODE; }
inline INT32* pCounter() { LIMITED_METHOD_CONTRACT; return _pCounter; }
inline UINT32 hashedToken() { LIMITED_METHOD_CONTRACT; return _hashedToken >> LOG2_PTRSIZE; }
inline size_t cacheAddress() { LIMITED_METHOD_CONTRACT; return _cacheAddress; }
inline size_t token() { LIMITED_METHOD_CONTRACT; return _token; }
inline size_t size() { LIMITED_METHOD_CONTRACT; return sizeof(ResolveStub); }
private:
friend struct ResolveHolder;
const static int resolveEntryPointLen = 32;
const static int slowEntryPointLen = 4;
const static int failEntryPointLen = 14;
WORD _resolveEntryPoint[resolveEntryPointLen];
WORD _slowEntryPoint[slowEntryPointLen];
WORD _failEntryPoint[failEntryPointLen];
INT32* _pCounter;
UINT32 _hashedToken;
size_t _cacheAddress; // lookupCache
size_t _token;
size_t _tokenSlow;
PCODE _resolveWorkerTarget;
UINT32 _cacheMask;
};
/* ResolveHolders are the containers for ResolveStubs, They provide
for any alignment of the stubs as necessary. The stubs are placed in a hash table keyed by
the token for which they are built. Efficiency of access requires that this token be aligned.
For now, we have copied that field into the ResolveHolder itself, if the resolve stub is arranged such that
any of its inlined tokens (non-prehashed) is aligned, then the token field in the ResolveHolder
is not needed. */
struct ResolveHolder
{
static void InitializeStatic() { LIMITED_METHOD_CONTRACT; }
void Initialize(ResolveHolder* pResolveHolderRX,
PCODE resolveWorkerTarget, PCODE patcherTarget,
size_t dispatchToken, UINT32 hashedToken,
void * cacheAddr, INT32 * counterAddr);
ResolveStub* stub() { LIMITED_METHOD_CONTRACT; return &_stub; }
static ResolveHolder* FromFailEntry(PCODE failEntry);
static ResolveHolder* FromResolveEntry(PCODE resolveEntry);
private:
ResolveStub _stub;
};
/*VTableCallStub**************************************************************************************
These are jump stubs that perform a vtable-base virtual call. These stubs assume that an object is placed
in the first argument register (this pointer). From there, the stub extracts the MethodTable pointer, followed by the
vtable pointer, and finally jumps to the target method at a given slot in the vtable.
*/
struct VTableCallStub
{
friend struct VTableCallHolder;
inline size_t size()
{
LIMITED_METHOD_CONTRACT;
BYTE* pStubCode = (BYTE *)this;
size_t cbSize = 4; // First ldr instruction
// If we never save r0 to the red zone, we have the short version of the stub
if (*(UINT32*)(&pStubCode[cbSize]) != 0x0c04f84d)
{
return
4 + // ldr r12,[r0]
4 + // ldr r12,[r12+offset]
4 + // ldr r12,[r12+offset]
2 + // bx r12
4; // Slot value (data storage, not a real instruction)
}
cbSize += 4; // Saving r0 into red zone
cbSize += (*(WORD*)(&pStubCode[cbSize]) == 0xf8dc ? 4 : 12); // Loading of vtable into r12
cbSize += (*(WORD*)(&pStubCode[cbSize]) == 0xf8dc ? 4 : 12); // Loading of targe address into r12
return cbSize + 6 /* Restore r0, bx*/ + 4 /* Slot value */;
}
inline PCODE entryPoint() const { LIMITED_METHOD_CONTRACT; return (PCODE)&_entryPoint[0] + THUMB_CODE; }
inline size_t token()
{
LIMITED_METHOD_CONTRACT;
DWORD slot = *(DWORD*)(reinterpret_cast<BYTE*>(this) + size() - 4);
return DispatchToken::CreateDispatchToken(slot).To_SIZE_T();
}
private:
BYTE _entryPoint[0]; // Dynamically sized stub. See Initialize() for more details.
};
/* VTableCallHolders are the containers for VTableCallStubs, they provide for any alignment of
stubs as necessary. */
struct VTableCallHolder
{
void Initialize(unsigned slot);
VTableCallStub* stub() { LIMITED_METHOD_CONTRACT; return reinterpret_cast<VTableCallStub *>(this); }
static size_t GetHolderSize(unsigned slot)
{
STATIC_CONTRACT_WRAPPER;
unsigned offsetOfIndirection = MethodTable::GetVtableOffset() + MethodTable::GetIndexOfVtableIndirection(slot) * TARGET_POINTER_SIZE;
unsigned offsetAfterIndirection = MethodTable::GetIndexAfterVtableIndirection(slot) * TARGET_POINTER_SIZE;
int indirectionsSize = (offsetOfIndirection > 0xFFF ? 12 : 4) + (offsetAfterIndirection > 0xFFF ? 12 : 4);
if (offsetOfIndirection > 0xFFF || offsetAfterIndirection > 0xFFF)
indirectionsSize += 8; // Save/restore r0 using red zone
return 6 + indirectionsSize + 4;
}
static VTableCallHolder* FromVTableCallEntry(PCODE entry)
{
LIMITED_METHOD_CONTRACT;
return (VTableCallHolder*)(entry & ~THUMB_CODE);
}
private:
// VTableCallStub follows here. It is dynamically sized on allocation because it could
// use short/long instruction sizes for the mov/jmp, depending on the slot value.
};
#include <poppack.h>
#ifdef DECLARE_DATA
#ifndef DACCESS_COMPILE
#ifdef STUB_LOGGING
extern size_t g_lookup_inline_counter;
extern size_t g_mono_call_counter;
extern size_t g_mono_miss_counter;
extern size_t g_poly_call_counter;
extern size_t g_poly_miss_counter;
#endif
TADDR StubDispatchFrame_MethodFrameVPtr;
LookupHolder* LookupHolder::FromLookupEntry(PCODE lookupEntry)
{
lookupEntry = lookupEntry & ~THUMB_CODE;
return (LookupHolder*) ( lookupEntry - offsetof(LookupHolder, _stub) - offsetof(LookupStub, _entryPoint) );
}
/* Template used to generate the stub. We generate a stub by allocating a block of
memory and copy the template over it and just update the specific fields that need
to be changed.
*/
DispatchStub dispatchInit;
DispatchHolder* DispatchHolder::FromDispatchEntry(PCODE dispatchEntry)
{
LIMITED_METHOD_CONTRACT;
dispatchEntry = dispatchEntry & ~THUMB_CODE;
DispatchHolder* dispatchHolder = (DispatchHolder*) ( dispatchEntry - offsetof(DispatchHolder, _stub) - offsetof(DispatchStub, _entryPoint) );
// _ASSERTE(dispatchHolder->_stub._entryPoint[0] == dispatchInit._entryPoint[0]);
return dispatchHolder;
}
/* Template used to generate the stub. We generate a stub by allocating a block of
memory and copy the template over it and just update the specific fields that need
to be changed.
*/
ResolveStub resolveInit;
ResolveHolder* ResolveHolder::FromFailEntry(PCODE failEntry)
{
LIMITED_METHOD_CONTRACT;
failEntry = failEntry & ~THUMB_CODE;
ResolveHolder* resolveHolder = (ResolveHolder*) ( failEntry - offsetof(ResolveHolder, _stub) - offsetof(ResolveStub, _failEntryPoint) );
// _ASSERTE(resolveHolder->_stub._resolveEntryPoint[0] == resolveInit._resolveEntryPoint[0]);
return resolveHolder;
}
ResolveHolder* ResolveHolder::FromResolveEntry(PCODE resolveEntry)
{
LIMITED_METHOD_CONTRACT;
resolveEntry = resolveEntry & ~THUMB_CODE;
ResolveHolder* resolveHolder = (ResolveHolder*) ( resolveEntry - offsetof(ResolveHolder, _stub) - offsetof(ResolveStub, _resolveEntryPoint) );
// _ASSERTE(resolveHolder->_stub._resolveEntryPoint[0] == resolveInit._resolveEntryPoint[0]);
return resolveHolder;
}
void MovRegImm(BYTE* p, int reg, TADDR imm);
void VTableCallHolder::Initialize(unsigned slot)
{
unsigned offsetOfIndirection = MethodTable::GetVtableOffset() + MethodTable::GetIndexOfVtableIndirection(slot) * TARGET_POINTER_SIZE;
unsigned offsetAfterIndirection = MethodTable::GetIndexAfterVtableIndirection(slot) * TARGET_POINTER_SIZE;
VTableCallStub* pStub = stub();
BYTE* p = (BYTE*)(pStub->entryPoint() & ~THUMB_CODE);
// ldr r12,[r0] : r12 = MethodTable pointer
*(UINT32*)p = 0xc000f8d0; p += 4;
if (offsetOfIndirection > 0xFFF || offsetAfterIndirection > 0xFFF)
{
// str r0, [sp, #-4]. Save r0 in the red zone
*(UINT32*)p = 0x0c04f84d; p += 4;
}
if (offsetOfIndirection > 0xFFF)
{
// mov r0, offsetOfIndirection
MovRegImm(p, 0, offsetOfIndirection); p += 8;
// ldr r12, [r12, r0]
*(UINT32*)p = 0xc000f85c; p += 4;
}
else
{
// ldr r12, [r12 + offset]
*(WORD *)p = 0xf8dc; p += 2;
*(WORD *)p = (WORD)(offsetOfIndirection | 0xc000); p += 2;
}
if (offsetAfterIndirection > 0xFFF)
{
// mov r0, offsetAfterIndirection
MovRegImm(p, 0, offsetAfterIndirection); p += 8;
// ldr r12, [r12, r0]
*(UINT32*)p = 0xc000f85c; p += 4;
}
else
{
// ldr r12, [r12 + offset]
*(WORD *)p = 0xf8dc; p += 2;
*(WORD *)p = (WORD)(offsetAfterIndirection | 0xc000); p += 2;
}
if (offsetOfIndirection > 0xFFF || offsetAfterIndirection > 0xFFF)
{
// ldr r0, [sp, #-4]. Restore r0 from the red zone.
*(UINT32*)p = 0x0c04f85d; p += 4;
}
// bx r12
*(UINT16*)p = 0x4760; p += 2;
// Store the slot value here for convenience. Not a real instruction (unreachable anyways)
*(UINT32*)p = slot; p += 4;
_ASSERT(p == (BYTE*)(stub()->entryPoint() & ~THUMB_CODE) + VTableCallHolder::GetHolderSize(slot));
_ASSERT(stub()->size() == VTableCallHolder::GetHolderSize(slot));
}
#endif // DACCESS_COMPILE
VirtualCallStubManager::StubKind VirtualCallStubManager::predictStubKind(PCODE stubStartAddress)
{
SUPPORTS_DAC;
#ifdef DACCESS_COMPILE
return SK_BREAKPOINT; // Dac always uses the slower lookup
#else
StubKind stubKind = SK_UNKNOWN;
TADDR pInstr = PCODEToPINSTR(stubStartAddress);
EX_TRY
{
// If stubStartAddress is completely bogus, then this might AV,
// so we protect it with SEH. An AV here is OK.
AVInRuntimeImplOkayHolder AVOkay;
WORD firstWord = *((WORD*) pInstr);
if (*((UINT32*)pInstr) == 0xc000f8d0)
{
// Confirm the thrid word belongs to the vtable stub pattern
WORD thirdWord = ((WORD*)pInstr)[2];
if (thirdWord == 0xf84d /* Part of str r0, [sp, #-4] */ ||
thirdWord == 0xf8dc /* Part of ldr r12, [r12 + offset] */)
stubKind = SK_VTABLECALL;
}
if (stubKind == SK_UNKNOWN)
{
//Assuming that RESOLVE_STUB_FIRST_WORD & DISPATCH_STUB_FIRST_WORD have same values
if (firstWord == DISPATCH_STUB_FIRST_WORD)
{
WORD thirdWord = ((WORD*)pInstr)[2];
if (thirdWord == 0xf84d)
{
stubKind = SK_DISPATCH;
}
else if (thirdWord == 0xb460)
{
stubKind = SK_RESOLVE;
}
}
else if (firstWord == 0xf8df)
{
stubKind = SK_LOOKUP;
}
}
}
EX_CATCH
{
stubKind = SK_UNKNOWN;
}
EX_END_CATCH(SwallowAllExceptions);
return stubKind;
#endif // DACCESS_COMPILE
}
#endif //DECLARE_DATA
#endif // _VIRTUAL_CALL_STUB_ARM_H
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// VirtualCallStubCpu.hpp
//
#ifndef _VIRTUAL_CALL_STUB_ARM_H
#define _VIRTUAL_CALL_STUB_ARM_H
#ifdef DECLARE_DATA
#include "asmconstants.h"
#endif
//#define STUB_LOGGING
#include <pshpack1.h> // Since we are placing code, we want byte packing of the structs
#define USES_LOOKUP_STUBS 1
/*********************************************************************************************
Stubs that contain code are all part of larger structs called Holders. There is a
Holder for each kind of stub, i.e XXXStub is contained with XXXHolder. Holders are
essentially an implementation trick that allowed rearranging the code sequences more
easily while trying out different alternatives, and for dealing with any alignment
issues in a way that was mostly immune to the actually code sequences. These Holders
should be revisited when the stub code sequences are fixed, since in many cases they
add extra space to a stub that is not really needed.
Stubs are placed in cache and hash tables. Since unaligned access of data in memory
is very slow, the keys used in those tables should be aligned. The things used as keys
typically also occur in the generated code, e.g. a token as an immediate part of an instruction.
For now, to avoid alignment computations as different code strategies are tried out, the key
fields are all in the Holders. Eventually, many of these fields should be dropped, and the instruction
streams aligned so that the immediate fields fall on aligned boundaries.
*/
#if USES_LOOKUP_STUBS
struct LookupStub;
struct LookupHolder;
/*LookupStub**************************************************************************************
Virtual and interface call sites are initially setup to point at LookupStubs.
This is because the runtime type of the <this> pointer is not yet known,
so the target cannot be resolved. Note: if the jit is able to determine the runtime type
of the <this> pointer, it should be generating a direct call not a virtual or interface call.
This stub pushes a lookup token onto the stack to identify the sought after method, and then
jumps into the EE (VirtualCallStubManager::ResolveWorkerStub) to effectuate the lookup and
transfer of control to the appropriate target method implementation, perhaps patching of the call site
along the way to point to a more appropriate stub. Hence callsites that point to LookupStubs
get quickly changed to point to another kind of stub.
*/
struct LookupStub
{
inline PCODE entryPoint() { LIMITED_METHOD_CONTRACT; return (PCODE)&_entryPoint[0] + THUMB_CODE; }
inline size_t token() { LIMITED_METHOD_CONTRACT; return _token; }
inline size_t size() { LIMITED_METHOD_CONTRACT; return sizeof(LookupStub); }
private:
friend struct LookupHolder;
const static int entryPointLen = 4;
WORD _entryPoint[entryPointLen];
PCODE _resolveWorkerTarget; // xx xx xx xx target address
size_t _token; // xx xx xx xx 32-bit constant
};
/* LookupHolders are the containers for LookupStubs, they provide for any alignment of
stubs as necessary. In the case of LookupStubs, alignment is necessary since
LookupStubs are placed in a hash table keyed by token. */
struct LookupHolder
{
static void InitializeStatic() { LIMITED_METHOD_CONTRACT; }
void Initialize(LookupHolder* pLookupHolderRX, PCODE resolveWorkerTarget, size_t dispatchToken);
LookupStub* stub() { LIMITED_METHOD_CONTRACT; return &_stub; }
static LookupHolder* FromLookupEntry(PCODE lookupEntry);
private:
friend struct LookupStub;
LookupStub _stub;
};
#endif // USES_LOOKUP_STUBS
struct DispatchStub;
struct DispatchHolder;
/*DispatchStub**************************************************************************************
Monomorphic and mostly monomorphic call sites eventually point to DispatchStubs.
A dispatch stub has an expected type (expectedMT), target address (target) and fail address (failure).
If the calling frame does in fact have the <this> type be of the expected type, then
control is transfered to the target address, the method implementation. If not,
then control is transfered to the fail address, a fail stub (see below) where a polymorphic
lookup is done to find the correct address to go to.
implementation note: Order, choice of instructions, and branch directions
should be carefully tuned since it can have an inordinate effect on performance. Particular
attention needs to be paid to the effects on the BTB and branch prediction, both in the small
and in the large, i.e. it needs to run well in the face of BTB overflow--using static predictions.
Note that since this stub is only used for mostly monomorphic callsites (ones that are not, get patched
to something else), therefore the conditional jump "jne failure" is mostly not taken, and hence it is important
that the branch prediction staticly predict this, which means it must be a forward jump. The alternative
is to reverse the order of the jumps and make sure that the resulting conditional jump "je implTarget"
is statically predicted as taken, i.e a backward jump. The current choice was taken since it was easier
to control the placement of the stubs than control the placement of the jitted code and the stubs. */
struct DispatchStub
{
inline PCODE entryPoint() { LIMITED_METHOD_CONTRACT; return (PCODE)(&_entryPoint[0]) + THUMB_CODE; }
inline size_t expectedMT() { LIMITED_METHOD_CONTRACT; return _expectedMT; }
inline PCODE implTarget() { LIMITED_METHOD_CONTRACT; return _implTarget; }
inline TADDR implTargetSlot(EntryPointSlots::SlotType *slotTypeRef) const
{
LIMITED_METHOD_CONTRACT;
_ASSERTE(slotTypeRef != nullptr);
*slotTypeRef = EntryPointSlots::SlotType_Executable;
return (TADDR)&_implTarget;
}
inline PCODE failTarget() { LIMITED_METHOD_CONTRACT; return _failTarget; }
inline size_t size() { LIMITED_METHOD_CONTRACT; return sizeof(DispatchStub); }
private:
friend struct DispatchHolder;
const static int entryPointLen = 12;
WORD _entryPoint[entryPointLen];
size_t _expectedMT;
PCODE _failTarget;
PCODE _implTarget;
};
/* DispatchHolders are the containers for DispatchStubs, they provide for any alignment of
stubs as necessary. DispatchStubs are placed in a hashtable and in a cache. The keys for both
are the pair expectedMT and token. Efficiency of the of the hash table is not a big issue,
since lookups in it are fairly rare. Efficiency of the cache is paramount since it is accessed frequently
o(see ResolveStub below). Currently we are storing both of these fields in the DispatchHolder to simplify
alignment issues. If inlineMT in the stub itself was aligned, then it could be the expectedMT field.
While the token field can be logically gotten by following the failure target to the failEntryPoint
of the ResolveStub and then to the token over there, for perf reasons of cache access, it is duplicated here.
This allows us to use DispatchStubs in the cache. The alternative is to provide some other immutable struct
for the cache composed of the triplet (expectedMT, token, target) and some sort of reclaimation scheme when
they are thrown out of the cache via overwrites (since concurrency will make the obvious approaches invalid).
*/
/* @workaround for ee resolution - Since the EE does not currently have a resolver function that
does what we want, see notes in implementation of VirtualCallStubManager::Resolver, we are
using dispatch stubs to siumulate what we want. That means that inlineTarget, which should be immutable
is in fact written. Hence we have moved target out into the holder and aligned it so we can
atomically update it. When we get a resolver function that does what we want, we can drop this field,
and live with just the inlineTarget field in the stub itself, since immutability will hold.*/
struct DispatchHolder
{
static void InitializeStatic()
{
LIMITED_METHOD_CONTRACT;
// Check that _implTarget is aligned in the DispatchHolder for backpatching
static_assert_no_msg(((offsetof(DispatchHolder, _stub) + offsetof(DispatchStub, _implTarget)) % sizeof(void *)) == 0);
}
void Initialize(DispatchHolder* pDispatchHolderRX, PCODE implTarget, PCODE failTarget, size_t expectedMT);
DispatchStub* stub() { LIMITED_METHOD_CONTRACT; return &_stub; }
static DispatchHolder* FromDispatchEntry(PCODE dispatchEntry);
private:
//force expectedMT to be aligned since used as key in hash tables.
DispatchStub _stub;
};
struct ResolveStub;
struct ResolveHolder;
/*ResolveStub**************************************************************************************
Polymorphic call sites and monomorphic calls that fail end up in a ResolverStub. There is only
one resolver stub built for any given token, even though there may be many call sites that
use that token and many distinct <this> types that are used in the calling call frames. A resolver stub
actually has two entry points, one for polymorphic call sites and one for dispatch stubs that fail on their
expectedMT test. There is a third part of the resolver stub that enters the ee when a decision should
be made about changing the callsite. Therefore, we have defined the resolver stub as three distinct pieces,
even though they are actually allocated as a single contiguous block of memory. These pieces are:
A ResolveStub has two entry points:
FailEntry - where the dispatch stub goes if the expected MT test fails. This piece of the stub does
a check to see how often we are actually failing. If failures are frequent, control transfers to the
patch piece to cause the call site to be changed from a mostly monomorphic callsite
(calls dispatch stub) to a polymorphic callsize (calls resolve stub). If failures are rare, control
transfers to the resolve piece (see ResolveStub). The failEntryPoint decrements a counter
every time it is entered. The ee at various times will add a large chunk to the counter.
ResolveEntry - does a lookup via in a cache by hashing the actual type of the calling frame s
<this> and the token identifying the (contract,method) pair desired. If found, control is transfered
to the method implementation. If not found in the cache, the token is pushed and the ee is entered via
the ResolveWorkerStub to do a full lookup and eventual transfer to the correct method implementation. Since
there is a different resolve stub for every token, the token can be inlined and the token can be pre-hashed.
The effectiveness of this approach is highly sensitive to the effectiveness of the hashing algorithm used,
as well as its speed. It turns out it is very important to make the hash function sensitive to all
of the bits of the method table, as method tables are laid out in memory in a very non-random way. Before
making any changes to the code sequences here, it is very important to measure and tune them as perf
can vary greatly, in unexpected ways, with seeming minor changes.
Implementation note - Order, choice of instructions, and branch directions
should be carefully tuned since it can have an inordinate effect on performance. Particular
attention needs to be paid to the effects on the BTB and branch prediction, both in the small
and in the large, i.e. it needs to run well in the face of BTB overflow--using static predictions.
Note that this stub is called in highly polymorphic cases, but the cache should have been sized
and the hash function chosen to maximize the cache hit case. Hence the cmp/jcc instructions should
mostly be going down the cache hit route, and it is important that this be statically predicted as so.
Hence the 3 jcc instrs need to be forward jumps. As structured, there is only one jmp/jcc that typically
gets put in the BTB since all the others typically fall straight thru. Minimizing potential BTB entries
is important. */
struct ResolveStub
{
inline PCODE failEntryPoint() { LIMITED_METHOD_CONTRACT; return (PCODE)(&_failEntryPoint[0]) + THUMB_CODE; }
inline PCODE resolveEntryPoint() { LIMITED_METHOD_CONTRACT; return (PCODE)(&_resolveEntryPoint[0]) + THUMB_CODE; }
inline PCODE slowEntryPoint() { LIMITED_METHOD_CONTRACT; return (PCODE)(&_slowEntryPoint[0]) + THUMB_CODE; }
inline INT32* pCounter() { LIMITED_METHOD_CONTRACT; return _pCounter; }
inline UINT32 hashedToken() { LIMITED_METHOD_CONTRACT; return _hashedToken >> LOG2_PTRSIZE; }
inline size_t cacheAddress() { LIMITED_METHOD_CONTRACT; return _cacheAddress; }
inline size_t token() { LIMITED_METHOD_CONTRACT; return _token; }
inline size_t size() { LIMITED_METHOD_CONTRACT; return sizeof(ResolveStub); }
private:
friend struct ResolveHolder;
const static int resolveEntryPointLen = 32;
const static int slowEntryPointLen = 4;
const static int failEntryPointLen = 14;
WORD _resolveEntryPoint[resolveEntryPointLen];
WORD _slowEntryPoint[slowEntryPointLen];
WORD _failEntryPoint[failEntryPointLen];
INT32* _pCounter;
UINT32 _hashedToken;
size_t _cacheAddress; // lookupCache
size_t _token;
size_t _tokenSlow;
PCODE _resolveWorkerTarget;
UINT32 _cacheMask;
};
/* ResolveHolders are the containers for ResolveStubs, They provide
for any alignment of the stubs as necessary. The stubs are placed in a hash table keyed by
the token for which they are built. Efficiency of access requires that this token be aligned.
For now, we have copied that field into the ResolveHolder itself, if the resolve stub is arranged such that
any of its inlined tokens (non-prehashed) is aligned, then the token field in the ResolveHolder
is not needed. */
struct ResolveHolder
{
static void InitializeStatic() { LIMITED_METHOD_CONTRACT; }
void Initialize(ResolveHolder* pResolveHolderRX,
PCODE resolveWorkerTarget, PCODE patcherTarget,
size_t dispatchToken, UINT32 hashedToken,
void * cacheAddr, INT32 * counterAddr);
ResolveStub* stub() { LIMITED_METHOD_CONTRACT; return &_stub; }
static ResolveHolder* FromFailEntry(PCODE failEntry);
static ResolveHolder* FromResolveEntry(PCODE resolveEntry);
private:
ResolveStub _stub;
};
/*VTableCallStub**************************************************************************************
These are jump stubs that perform a vtable-base virtual call. These stubs assume that an object is placed
in the first argument register (this pointer). From there, the stub extracts the MethodTable pointer, followed by the
vtable pointer, and finally jumps to the target method at a given slot in the vtable.
*/
struct VTableCallStub
{
friend struct VTableCallHolder;
inline size_t size()
{
LIMITED_METHOD_CONTRACT;
BYTE* pStubCode = (BYTE *)this;
size_t cbSize = 4; // First ldr instruction
// If we never save r0 to the red zone, we have the short version of the stub
if (*(UINT32*)(&pStubCode[cbSize]) != 0x0c04f84d)
{
return
4 + // ldr r12,[r0]
4 + // ldr r12,[r12+offset]
4 + // ldr r12,[r12+offset]
2 + // bx r12
4; // Slot value (data storage, not a real instruction)
}
cbSize += 4; // Saving r0 into red zone
cbSize += (*(WORD*)(&pStubCode[cbSize]) == 0xf8dc ? 4 : 12); // Loading of vtable into r12
cbSize += (*(WORD*)(&pStubCode[cbSize]) == 0xf8dc ? 4 : 12); // Loading of targe address into r12
return cbSize + 6 /* Restore r0, bx*/ + 4 /* Slot value */;
}
inline PCODE entryPoint() const { LIMITED_METHOD_CONTRACT; return (PCODE)&_entryPoint[0] + THUMB_CODE; }
inline size_t token()
{
LIMITED_METHOD_CONTRACT;
DWORD slot = *(DWORD*)(reinterpret_cast<BYTE*>(this) + size() - 4);
return DispatchToken::CreateDispatchToken(slot).To_SIZE_T();
}
private:
BYTE _entryPoint[0]; // Dynamically sized stub. See Initialize() for more details.
};
/* VTableCallHolders are the containers for VTableCallStubs, they provide for any alignment of
stubs as necessary. */
struct VTableCallHolder
{
void Initialize(unsigned slot);
VTableCallStub* stub() { LIMITED_METHOD_CONTRACT; return reinterpret_cast<VTableCallStub *>(this); }
static size_t GetHolderSize(unsigned slot)
{
STATIC_CONTRACT_WRAPPER;
unsigned offsetOfIndirection = MethodTable::GetVtableOffset() + MethodTable::GetIndexOfVtableIndirection(slot) * TARGET_POINTER_SIZE;
unsigned offsetAfterIndirection = MethodTable::GetIndexAfterVtableIndirection(slot) * TARGET_POINTER_SIZE;
int indirectionsSize = (offsetOfIndirection > 0xFFF ? 12 : 4) + (offsetAfterIndirection > 0xFFF ? 12 : 4);
if (offsetOfIndirection > 0xFFF || offsetAfterIndirection > 0xFFF)
indirectionsSize += 8; // Save/restore r0 using red zone
return 6 + indirectionsSize + 4;
}
static VTableCallHolder* FromVTableCallEntry(PCODE entry)
{
LIMITED_METHOD_CONTRACT;
return (VTableCallHolder*)(entry & ~THUMB_CODE);
}
private:
// VTableCallStub follows here. It is dynamically sized on allocation because it could
// use short/long instruction sizes for the mov/jmp, depending on the slot value.
};
#include <poppack.h>
#ifdef DECLARE_DATA
#ifndef DACCESS_COMPILE
#ifdef STUB_LOGGING
extern size_t g_lookup_inline_counter;
extern size_t g_mono_call_counter;
extern size_t g_mono_miss_counter;
extern size_t g_poly_call_counter;
extern size_t g_poly_miss_counter;
#endif
TADDR StubDispatchFrame_MethodFrameVPtr;
LookupHolder* LookupHolder::FromLookupEntry(PCODE lookupEntry)
{
lookupEntry = lookupEntry & ~THUMB_CODE;
return (LookupHolder*) ( lookupEntry - offsetof(LookupHolder, _stub) - offsetof(LookupStub, _entryPoint) );
}
/* Template used to generate the stub. We generate a stub by allocating a block of
memory and copy the template over it and just update the specific fields that need
to be changed.
*/
DispatchStub dispatchInit;
DispatchHolder* DispatchHolder::FromDispatchEntry(PCODE dispatchEntry)
{
LIMITED_METHOD_CONTRACT;
dispatchEntry = dispatchEntry & ~THUMB_CODE;
DispatchHolder* dispatchHolder = (DispatchHolder*) ( dispatchEntry - offsetof(DispatchHolder, _stub) - offsetof(DispatchStub, _entryPoint) );
// _ASSERTE(dispatchHolder->_stub._entryPoint[0] == dispatchInit._entryPoint[0]);
return dispatchHolder;
}
/* Template used to generate the stub. We generate a stub by allocating a block of
memory and copy the template over it and just update the specific fields that need
to be changed.
*/
ResolveStub resolveInit;
ResolveHolder* ResolveHolder::FromFailEntry(PCODE failEntry)
{
LIMITED_METHOD_CONTRACT;
failEntry = failEntry & ~THUMB_CODE;
ResolveHolder* resolveHolder = (ResolveHolder*) ( failEntry - offsetof(ResolveHolder, _stub) - offsetof(ResolveStub, _failEntryPoint) );
// _ASSERTE(resolveHolder->_stub._resolveEntryPoint[0] == resolveInit._resolveEntryPoint[0]);
return resolveHolder;
}
ResolveHolder* ResolveHolder::FromResolveEntry(PCODE resolveEntry)
{
LIMITED_METHOD_CONTRACT;
resolveEntry = resolveEntry & ~THUMB_CODE;
ResolveHolder* resolveHolder = (ResolveHolder*) ( resolveEntry - offsetof(ResolveHolder, _stub) - offsetof(ResolveStub, _resolveEntryPoint) );
// _ASSERTE(resolveHolder->_stub._resolveEntryPoint[0] == resolveInit._resolveEntryPoint[0]);
return resolveHolder;
}
void MovRegImm(BYTE* p, int reg, TADDR imm);
void VTableCallHolder::Initialize(unsigned slot)
{
unsigned offsetOfIndirection = MethodTable::GetVtableOffset() + MethodTable::GetIndexOfVtableIndirection(slot) * TARGET_POINTER_SIZE;
unsigned offsetAfterIndirection = MethodTable::GetIndexAfterVtableIndirection(slot) * TARGET_POINTER_SIZE;
VTableCallStub* pStub = stub();
BYTE* p = (BYTE*)(pStub->entryPoint() & ~THUMB_CODE);
// ldr r12,[r0] : r12 = MethodTable pointer
*(UINT32*)p = 0xc000f8d0; p += 4;
if (offsetOfIndirection > 0xFFF || offsetAfterIndirection > 0xFFF)
{
// str r0, [sp, #-4]. Save r0 in the red zone
*(UINT32*)p = 0x0c04f84d; p += 4;
}
if (offsetOfIndirection > 0xFFF)
{
// mov r0, offsetOfIndirection
MovRegImm(p, 0, offsetOfIndirection); p += 8;
// ldr r12, [r12, r0]
*(UINT32*)p = 0xc000f85c; p += 4;
}
else
{
// ldr r12, [r12 + offset]
*(WORD *)p = 0xf8dc; p += 2;
*(WORD *)p = (WORD)(offsetOfIndirection | 0xc000); p += 2;
}
if (offsetAfterIndirection > 0xFFF)
{
// mov r0, offsetAfterIndirection
MovRegImm(p, 0, offsetAfterIndirection); p += 8;
// ldr r12, [r12, r0]
*(UINT32*)p = 0xc000f85c; p += 4;
}
else
{
// ldr r12, [r12 + offset]
*(WORD *)p = 0xf8dc; p += 2;
*(WORD *)p = (WORD)(offsetAfterIndirection | 0xc000); p += 2;
}
if (offsetOfIndirection > 0xFFF || offsetAfterIndirection > 0xFFF)
{
// ldr r0, [sp, #-4]. Restore r0 from the red zone.
*(UINT32*)p = 0x0c04f85d; p += 4;
}
// bx r12
*(UINT16*)p = 0x4760; p += 2;
// Store the slot value here for convenience. Not a real instruction (unreachable anyways)
*(UINT32*)p = slot; p += 4;
_ASSERT(p == (BYTE*)(stub()->entryPoint() & ~THUMB_CODE) + VTableCallHolder::GetHolderSize(slot));
_ASSERT(stub()->size() == VTableCallHolder::GetHolderSize(slot));
}
#endif // DACCESS_COMPILE
VirtualCallStubManager::StubKind VirtualCallStubManager::predictStubKind(PCODE stubStartAddress)
{
SUPPORTS_DAC;
#ifdef DACCESS_COMPILE
return SK_BREAKPOINT; // Dac always uses the slower lookup
#else
StubKind stubKind = SK_UNKNOWN;
TADDR pInstr = PCODEToPINSTR(stubStartAddress);
EX_TRY
{
// If stubStartAddress is completely bogus, then this might AV,
// so we protect it with SEH. An AV here is OK.
AVInRuntimeImplOkayHolder AVOkay;
WORD firstWord = *((WORD*) pInstr);
if (*((UINT32*)pInstr) == 0xc000f8d0)
{
// Confirm the thrid word belongs to the vtable stub pattern
WORD thirdWord = ((WORD*)pInstr)[2];
if (thirdWord == 0xf84d /* Part of str r0, [sp, #-4] */ ||
thirdWord == 0xf8dc /* Part of ldr r12, [r12 + offset] */)
stubKind = SK_VTABLECALL;
}
if (stubKind == SK_UNKNOWN)
{
//Assuming that RESOLVE_STUB_FIRST_WORD & DISPATCH_STUB_FIRST_WORD have same values
if (firstWord == DISPATCH_STUB_FIRST_WORD)
{
WORD thirdWord = ((WORD*)pInstr)[2];
if (thirdWord == 0xf84d)
{
stubKind = SK_DISPATCH;
}
else if (thirdWord == 0xb460)
{
stubKind = SK_RESOLVE;
}
}
else if (firstWord == 0xf8df)
{
stubKind = SK_LOOKUP;
}
}
}
EX_CATCH
{
stubKind = SK_UNKNOWN;
}
EX_END_CATCH(SwallowAllExceptions);
return stubKind;
#endif // DACCESS_COMPILE
}
#endif //DECLARE_DATA
#endif // _VIRTUAL_CALL_STUB_ARM_H
| -1 |
dotnet/runtime | 65,926 | FastMod for EEHashTable (Faster virtual generics) | Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| EgorBo | "2022-02-27T11:34:01Z" | "2022-03-01T19:43:37Z" | a5a7836f8193414fe64232d5c386b6402eb70e19 | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | FastMod for EEHashTable (Faster virtual generics). Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| ./src/mono/mono/metadata/object-offsets.h |
/**
\file
This is a parameterized header. It's supposed/ok to be included multiple times.
Input defines: (those to be defined by the includer file)
Required:
DECL_OFFSET(struct,field)
DECL_OFFSET2(struct,field,offset)
DECL_ALIGN2(name,alignment)
Optional:
USE_CROSS_COMPILE_OFFSETS - if defined, force the cross compiler offsets to be used, otherwise
they will only be used if MONO_CROSS_COMPILE is defined
DISABLE_METADATA_OFFSETS - Disable the definition of offsets for structures defined in metadata/.
DISABLE_JIT_OFFSETS - Disable the definition of offsets for structures defined in mini/.
The last two are needed because metadata shouldn't include JIT offsets since the structures
are not defined, while the JIT shouldn't include metadata offsets, since some of them
are GC specific, and the JIT needs to remain GC agnostic.
Output defines:
HAS_CROSS_COMPILER_OFFSETS - if set, it means we found some cross offsets, it doesnt mean we'll use it.
USED_CROSS_COMPILER_OFFSETS - if set, it means we used the cross offsets
Environment defines (from config.h and CFLAGS):
MONO_GENERATING_OFFSETS - Set by an offsets generating tool to disable the usage of any (possibly non-existing) generated header.
MONO_OFFSETS_FILE - Name of the header file containing the offsets to be used.
*/
#undef HAS_CROSS_COMPILER_OFFSETS
#undef USED_CROSS_COMPILER_OFFSETS
#if !defined (MONO_GENERATING_OFFSETS) && defined (MONO_OFFSETS_FILE)
#include MONO_OFFSETS_FILE
#endif
#ifndef USED_CROSS_COMPILER_OFFSETS
DECL_SIZE(gint8)
DECL_SIZE(gint16)
DECL_SIZE(gint32)
DECL_SIZE(gint64)
DECL_SIZE(float)
DECL_SIZE(double)
DECL_SIZE(gpointer)
// Offsets for structures defined in metadata/
#ifndef DISABLE_METADATA_OFFSETS
DECL_OFFSET(MonoObject, vtable)
DECL_OFFSET(MonoObject, synchronisation)
DECL_OFFSET(MonoClass, interface_bitmap)
DECL_OFFSET(MonoClass, _byval_arg)
DECL_OFFSET(MonoClass, cast_class)
DECL_OFFSET(MonoClass, element_class)
DECL_OFFSET(MonoClass, idepth)
DECL_OFFSET(MonoClass, instance_size)
DECL_OFFSET(MonoClass, interface_id)
DECL_OFFSET(MonoClass, max_interface_id)
DECL_OFFSET(MonoClass, parent)
DECL_OFFSET(MonoClass, rank)
DECL_OFFSET(MonoClass, sizes)
DECL_OFFSET(MonoClass, supertypes)
DECL_OFFSET(MonoClass, class_kind)
DECL_OFFSET(MonoVTable, klass)
DECL_OFFSET(MonoVTable, max_interface_id)
DECL_OFFSET(MonoVTable, interface_bitmap)
DECL_OFFSET(MonoVTable, vtable)
DECL_OFFSET(MonoVTable, rank)
DECL_OFFSET(MonoVTable, initialized)
DECL_OFFSET(MonoVTable, flags)
DECL_OFFSET(MonoVTable, type)
DECL_OFFSET(MonoVTable, runtime_generic_context)
DECL_OFFSET(MonoDomain, stack_overflow_ex)
DECL_OFFSET(MonoDelegate, target)
DECL_OFFSET(MonoDelegate, method_ptr)
DECL_OFFSET(MonoDelegate, invoke_impl)
DECL_OFFSET(MonoDelegate, method)
DECL_OFFSET(MonoDelegate, method_code)
DECL_OFFSET(MonoDelegate, method_is_virtual)
DECL_OFFSET(MonoDelegate, extra_arg)
DECL_OFFSET(MonoInternalThread, tid)
DECL_OFFSET(MonoInternalThread, small_id)
DECL_OFFSET(MonoInternalThread, static_data)
DECL_OFFSET(MonoInternalThread, last)
DECL_OFFSET(MonoMulticastDelegate, delegates)
DECL_OFFSET(MonoTransparentProxy, rp)
DECL_OFFSET(MonoTransparentProxy, remote_class)
DECL_OFFSET(MonoTransparentProxy, custom_type_info)
DECL_OFFSET(MonoRealProxy, target_domain_id)
DECL_OFFSET(MonoRealProxy, context)
DECL_OFFSET(MonoRealProxy, unwrapped_server)
DECL_OFFSET(MonoRemoteClass, proxy_class)
DECL_OFFSET(MonoArray, vector)
DECL_OFFSET(MonoArray, max_length)
DECL_OFFSET(MonoArray, bounds)
DECL_OFFSET(MonoArrayBounds, lower_bound)
DECL_OFFSET(MonoArrayBounds, length)
DECL_OFFSET(MonoSafeHandle, handle)
DECL_OFFSET(MonoHandleRef, handle)
DECL_OFFSET(MonoComInteropProxy, com_object)
DECL_OFFSET(MonoString, length)
DECL_OFFSET(MonoString, chars)
DECL_OFFSET(MonoException, message)
DECL_OFFSET(MonoException, caught_in_unmanaged)
DECL_OFFSET(MonoTypedRef, type)
DECL_OFFSET(MonoTypedRef, klass)
DECL_OFFSET(MonoTypedRef, value)
//Internal structs
DECL_OFFSET(MonoThreadsSync, status)
DECL_OFFSET(MonoThreadsSync, nest)
DECL_OFFSET(MonoProfilerCallContext, method)
DECL_OFFSET(MonoProfilerCallContext, return_value)
DECL_OFFSET(MonoProfilerCallContext, args)
#ifdef HAVE_SGEN_GC
DECL_OFFSET(SgenClientThreadInfo, in_critical_region)
DECL_OFFSET(SgenThreadInfo, tlab_next)
DECL_OFFSET(SgenThreadInfo, tlab_temp_end)
#endif
#endif //DISABLE METADATA OFFSETS
// Offsets for structures defined in mini/
#ifndef DISABLE_JIT_OFFSETS
DECL_SIZE(MonoMethodRuntimeGenericContext)
DECL_SIZE(MonoLMF)
DECL_SIZE(MonoLMFExt)
DECL_SIZE(MonoTypedRef)
DECL_SIZE(CallContext)
DECL_SIZE(MonoContext)
DECL_OFFSET(MonoLMF, previous_lmf)
DECL_OFFSET(MonoLMFExt, kind)
DECL_OFFSET(MonoLMFExt, il_state)
DECL_OFFSET(MonoMethodILState, method)
DECL_OFFSET(MonoMethodILState, il_offset)
DECL_OFFSET(MonoMethodILState, data)
DECL_OFFSET(MonoMethodRuntimeGenericContext, class_vtable)
DECL_OFFSET(MonoJitTlsData, lmf)
DECL_OFFSET(MonoJitTlsData, class_cast_from)
DECL_OFFSET(MonoJitTlsData, class_cast_to)
#ifdef TARGET_WIN32
DECL_OFFSET(MonoJitTlsData, stack_restore_ctx)
#endif
DECL_OFFSET(MonoGSharedVtMethodRuntimeInfo, locals_size)
DECL_OFFSET(MonoGSharedVtMethodRuntimeInfo, entries) //XXX more to fix here
DECL_OFFSET(MonoDelegateTrampInfo, method)
DECL_OFFSET(MonoDelegateTrampInfo, invoke_impl)
DECL_OFFSET(MonoDelegateTrampInfo, method_ptr)
// Architecture-specific offsets
// -----------------------------
#if defined(TARGET_WASM)
DECL_OFFSET(MonoContext, wasm_ip)
DECL_OFFSET(MonoContext, wasm_bp)
DECL_OFFSET(MonoContext, wasm_sp)
DECL_OFFSET(MonoContext, llvm_exc_reg)
DECL_OFFSET(MonoLMF, lmf_addr)
DECL_OFFSET(MonoLMF, method)
#elif defined(TARGET_X86)
DECL_OFFSET(MonoContext, eax)
DECL_OFFSET(MonoContext, ebx)
DECL_OFFSET(MonoContext, ecx)
DECL_OFFSET(MonoContext, edx)
DECL_OFFSET(MonoContext, edi)
DECL_OFFSET(MonoContext, esi)
DECL_OFFSET(MonoContext, esp)
DECL_OFFSET(MonoContext, ebp)
DECL_OFFSET(MonoContext, eip)
DECL_OFFSET(MonoLMF, method)
DECL_OFFSET(MonoLMF, lmf_addr)
DECL_OFFSET(MonoLMF, esp)
DECL_OFFSET(MonoLMF, ebx)
DECL_OFFSET(MonoLMF, edi)
DECL_OFFSET(MonoLMF, esi)
DECL_OFFSET(MonoLMF, ebp)
DECL_OFFSET(MonoLMF, eip)
#elif defined(TARGET_AMD64)
DECL_OFFSET(MonoContext, gregs)
DECL_OFFSET(MonoContext, fregs)
DECL_OFFSET(MonoLMF, rsp)
DECL_OFFSET(MonoLMF, rbp)
DECL_OFFSET(DynCallArgs, res)
DECL_OFFSET(DynCallArgs, fregs)
DECL_OFFSET(DynCallArgs, has_fp)
DECL_OFFSET(DynCallArgs, nstack_args)
DECL_OFFSET(DynCallArgs, regs)
DECL_OFFSET(MonoLMFTramp, ctx)
DECL_OFFSET(MonoLMFTramp, lmf_addr)
#elif defined(TARGET_ARM)
DECL_OFFSET(MonoLMF, sp)
DECL_OFFSET(MonoLMF, fp)
DECL_OFFSET(MonoLMF, ip)
DECL_OFFSET(MonoLMF, iregs)
DECL_OFFSET(MonoLMF, fregs)
DECL_OFFSET(DynCallArgs, fpregs)
DECL_OFFSET(DynCallArgs, has_fpregs)
DECL_OFFSET(DynCallArgs, regs)
DECL_OFFSET(DynCallArgs, n_stackargs)
DECL_OFFSET(SeqPointInfo, ss_tramp_addr)
#elif defined(TARGET_ARM64)
DECL_OFFSET(MonoLMF, pc)
DECL_OFFSET(MonoLMF, gregs)
DECL_OFFSET(DynCallArgs, regs)
DECL_OFFSET(DynCallArgs, fpregs)
DECL_OFFSET(DynCallArgs, n_stackargs)
DECL_OFFSET(DynCallArgs, n_fpargs)
DECL_OFFSET(DynCallArgs, n_fpret)
#elif defined(TARGET_S390X)
DECL_OFFSET(MonoLMF, pregs)
DECL_OFFSET(MonoLMF, lmf_addr)
DECL_OFFSET(MonoLMF, method)
DECL_OFFSET(MonoLMF, ebp)
DECL_OFFSET(MonoLMF, eip)
DECL_OFFSET(MonoLMF, gregs)
DECL_OFFSET(MonoLMF, fregs)
#elif defined(TARGET_RISCV)
DECL_OFFSET(MonoContext, gregs)
DECL_OFFSET(MonoContext, fregs)
#endif
// Shared architecture offfsets
// ----------------------------
#if defined(TARGET_ARM) || defined(TARGET_ARM64)
DECL_OFFSET (MonoContext, pc)
DECL_OFFSET (MonoContext, regs)
DECL_OFFSET (MonoContext, fregs)
DECL_OFFSET(MonoLMF, lmf_addr)
DECL_OFFSET(DynCallArgs, res)
DECL_OFFSET(DynCallArgs, res2)
#endif
#if defined(TARGET_ARM)
DECL_OFFSET(MonoLMF, method)
DECL_OFFSET(GSharedVtCallInfo, stack_usage)
DECL_OFFSET(GSharedVtCallInfo, vret_arg_reg)
DECL_OFFSET(GSharedVtCallInfo, ret_marshal)
DECL_OFFSET(GSharedVtCallInfo, vret_slot)
DECL_OFFSET(GSharedVtCallInfo, gsharedvt_in)
DECL_OFFSET(SeqPointInfo, ss_trigger_page)
#endif
#if defined(TARGET_ARM64)
DECL_OFFSET (MonoContext, has_fregs)
DECL_OFFSET(GSharedVtCallInfo, stack_usage)
DECL_OFFSET(GSharedVtCallInfo, gsharedvt_in)
DECL_OFFSET(GSharedVtCallInfo, ret_marshal)
DECL_OFFSET(GSharedVtCallInfo, vret_slot)
#endif
#if defined(TARGET_AMD64) || defined(TARGET_ARM64)
DECL_OFFSET(SeqPointInfo, ss_tramp_addr)
#endif
#if defined(TARGET_AMD64) || defined(TARGET_ARM) || defined(TARGET_ARM64)
DECL_OFFSET(SeqPointInfo, bp_addrs)
DECL_OFFSET(CallContext, gregs)
DECL_OFFSET(CallContext, fregs)
DECL_OFFSET(CallContext, stack_size)
DECL_OFFSET(CallContext, stack)
#endif
#if defined(TARGET_X86)
DECL_OFFSET(CallContext, eax)
DECL_OFFSET(CallContext, edx)
DECL_OFFSET(CallContext, fret)
DECL_OFFSET(CallContext, stack_size)
DECL_OFFSET(CallContext, stack)
#endif
#if defined(TARGET_X86)
DECL_OFFSET(GSharedVtCallInfo, stack_usage)
DECL_OFFSET(GSharedVtCallInfo, vret_slot)
DECL_OFFSET(GSharedVtCallInfo, vret_arg_slot)
DECL_OFFSET(GSharedVtCallInfo, ret_marshal)
DECL_OFFSET(GSharedVtCallInfo, gsharedvt_in)
#endif
#if defined(TARGET_AMD64)
DECL_OFFSET(GSharedVtCallInfo, ret_marshal)
DECL_OFFSET(GSharedVtCallInfo, vret_arg_reg)
DECL_OFFSET(GSharedVtCallInfo, vret_slot)
DECL_OFFSET(GSharedVtCallInfo, stack_usage)
DECL_OFFSET(GSharedVtCallInfo, gsharedvt_in)
#endif
DECL_OFFSET(MonoFtnDesc, arg)
DECL_OFFSET(MonoFtnDesc, addr)
#endif //DISABLE_JIT_OFFSETS
#endif //USED_CROSS_COMPILER_OFFSETS
#undef DECL_OFFSET
#undef DECL_OFFSET2
#undef DECL_ALIGN2
#undef DECL_SIZE
#undef DECL_SIZE2
#undef USE_CROSS_COMPILE_OFFSETS
|
/**
\file
This is a parameterized header. It's supposed/ok to be included multiple times.
Input defines: (those to be defined by the includer file)
Required:
DECL_OFFSET(struct,field)
DECL_OFFSET2(struct,field,offset)
DECL_ALIGN2(name,alignment)
Optional:
USE_CROSS_COMPILE_OFFSETS - if defined, force the cross compiler offsets to be used, otherwise
they will only be used if MONO_CROSS_COMPILE is defined
DISABLE_METADATA_OFFSETS - Disable the definition of offsets for structures defined in metadata/.
DISABLE_JIT_OFFSETS - Disable the definition of offsets for structures defined in mini/.
The last two are needed because metadata shouldn't include JIT offsets since the structures
are not defined, while the JIT shouldn't include metadata offsets, since some of them
are GC specific, and the JIT needs to remain GC agnostic.
Output defines:
HAS_CROSS_COMPILER_OFFSETS - if set, it means we found some cross offsets, it doesnt mean we'll use it.
USED_CROSS_COMPILER_OFFSETS - if set, it means we used the cross offsets
Environment defines (from config.h and CFLAGS):
MONO_GENERATING_OFFSETS - Set by an offsets generating tool to disable the usage of any (possibly non-existing) generated header.
MONO_OFFSETS_FILE - Name of the header file containing the offsets to be used.
*/
#undef HAS_CROSS_COMPILER_OFFSETS
#undef USED_CROSS_COMPILER_OFFSETS
#if !defined (MONO_GENERATING_OFFSETS) && defined (MONO_OFFSETS_FILE)
#include MONO_OFFSETS_FILE
#endif
#ifndef USED_CROSS_COMPILER_OFFSETS
DECL_SIZE(gint8)
DECL_SIZE(gint16)
DECL_SIZE(gint32)
DECL_SIZE(gint64)
DECL_SIZE(float)
DECL_SIZE(double)
DECL_SIZE(gpointer)
// Offsets for structures defined in metadata/
#ifndef DISABLE_METADATA_OFFSETS
DECL_OFFSET(MonoObject, vtable)
DECL_OFFSET(MonoObject, synchronisation)
DECL_OFFSET(MonoClass, interface_bitmap)
DECL_OFFSET(MonoClass, _byval_arg)
DECL_OFFSET(MonoClass, cast_class)
DECL_OFFSET(MonoClass, element_class)
DECL_OFFSET(MonoClass, idepth)
DECL_OFFSET(MonoClass, instance_size)
DECL_OFFSET(MonoClass, interface_id)
DECL_OFFSET(MonoClass, max_interface_id)
DECL_OFFSET(MonoClass, parent)
DECL_OFFSET(MonoClass, rank)
DECL_OFFSET(MonoClass, sizes)
DECL_OFFSET(MonoClass, supertypes)
DECL_OFFSET(MonoClass, class_kind)
DECL_OFFSET(MonoVTable, klass)
DECL_OFFSET(MonoVTable, max_interface_id)
DECL_OFFSET(MonoVTable, interface_bitmap)
DECL_OFFSET(MonoVTable, vtable)
DECL_OFFSET(MonoVTable, rank)
DECL_OFFSET(MonoVTable, initialized)
DECL_OFFSET(MonoVTable, flags)
DECL_OFFSET(MonoVTable, type)
DECL_OFFSET(MonoVTable, runtime_generic_context)
DECL_OFFSET(MonoDomain, stack_overflow_ex)
DECL_OFFSET(MonoDelegate, target)
DECL_OFFSET(MonoDelegate, method_ptr)
DECL_OFFSET(MonoDelegate, invoke_impl)
DECL_OFFSET(MonoDelegate, method)
DECL_OFFSET(MonoDelegate, method_code)
DECL_OFFSET(MonoDelegate, method_is_virtual)
DECL_OFFSET(MonoDelegate, extra_arg)
DECL_OFFSET(MonoInternalThread, tid)
DECL_OFFSET(MonoInternalThread, small_id)
DECL_OFFSET(MonoInternalThread, static_data)
DECL_OFFSET(MonoInternalThread, last)
DECL_OFFSET(MonoMulticastDelegate, delegates)
DECL_OFFSET(MonoTransparentProxy, rp)
DECL_OFFSET(MonoTransparentProxy, remote_class)
DECL_OFFSET(MonoTransparentProxy, custom_type_info)
DECL_OFFSET(MonoRealProxy, target_domain_id)
DECL_OFFSET(MonoRealProxy, context)
DECL_OFFSET(MonoRealProxy, unwrapped_server)
DECL_OFFSET(MonoRemoteClass, proxy_class)
DECL_OFFSET(MonoArray, vector)
DECL_OFFSET(MonoArray, max_length)
DECL_OFFSET(MonoArray, bounds)
DECL_OFFSET(MonoArrayBounds, lower_bound)
DECL_OFFSET(MonoArrayBounds, length)
DECL_OFFSET(MonoSafeHandle, handle)
DECL_OFFSET(MonoHandleRef, handle)
DECL_OFFSET(MonoComInteropProxy, com_object)
DECL_OFFSET(MonoString, length)
DECL_OFFSET(MonoString, chars)
DECL_OFFSET(MonoException, message)
DECL_OFFSET(MonoException, caught_in_unmanaged)
DECL_OFFSET(MonoTypedRef, type)
DECL_OFFSET(MonoTypedRef, klass)
DECL_OFFSET(MonoTypedRef, value)
//Internal structs
DECL_OFFSET(MonoThreadsSync, status)
DECL_OFFSET(MonoThreadsSync, nest)
DECL_OFFSET(MonoProfilerCallContext, method)
DECL_OFFSET(MonoProfilerCallContext, return_value)
DECL_OFFSET(MonoProfilerCallContext, args)
#ifdef HAVE_SGEN_GC
DECL_OFFSET(SgenClientThreadInfo, in_critical_region)
DECL_OFFSET(SgenThreadInfo, tlab_next)
DECL_OFFSET(SgenThreadInfo, tlab_temp_end)
#endif
#endif //DISABLE METADATA OFFSETS
// Offsets for structures defined in mini/
#ifndef DISABLE_JIT_OFFSETS
DECL_SIZE(MonoMethodRuntimeGenericContext)
DECL_SIZE(MonoLMF)
DECL_SIZE(MonoLMFExt)
DECL_SIZE(MonoTypedRef)
DECL_SIZE(CallContext)
DECL_SIZE(MonoContext)
DECL_OFFSET(MonoLMF, previous_lmf)
DECL_OFFSET(MonoLMFExt, kind)
DECL_OFFSET(MonoLMFExt, il_state)
DECL_OFFSET(MonoMethodILState, method)
DECL_OFFSET(MonoMethodILState, il_offset)
DECL_OFFSET(MonoMethodILState, data)
DECL_OFFSET(MonoMethodRuntimeGenericContext, class_vtable)
DECL_OFFSET(MonoJitTlsData, lmf)
DECL_OFFSET(MonoJitTlsData, class_cast_from)
DECL_OFFSET(MonoJitTlsData, class_cast_to)
#ifdef TARGET_WIN32
DECL_OFFSET(MonoJitTlsData, stack_restore_ctx)
#endif
DECL_OFFSET(MonoGSharedVtMethodRuntimeInfo, locals_size)
DECL_OFFSET(MonoGSharedVtMethodRuntimeInfo, entries) //XXX more to fix here
DECL_OFFSET(MonoDelegateTrampInfo, method)
DECL_OFFSET(MonoDelegateTrampInfo, invoke_impl)
DECL_OFFSET(MonoDelegateTrampInfo, method_ptr)
// Architecture-specific offsets
// -----------------------------
#if defined(TARGET_WASM)
DECL_OFFSET(MonoContext, wasm_ip)
DECL_OFFSET(MonoContext, wasm_bp)
DECL_OFFSET(MonoContext, wasm_sp)
DECL_OFFSET(MonoContext, llvm_exc_reg)
DECL_OFFSET(MonoLMF, lmf_addr)
DECL_OFFSET(MonoLMF, method)
#elif defined(TARGET_X86)
DECL_OFFSET(MonoContext, eax)
DECL_OFFSET(MonoContext, ebx)
DECL_OFFSET(MonoContext, ecx)
DECL_OFFSET(MonoContext, edx)
DECL_OFFSET(MonoContext, edi)
DECL_OFFSET(MonoContext, esi)
DECL_OFFSET(MonoContext, esp)
DECL_OFFSET(MonoContext, ebp)
DECL_OFFSET(MonoContext, eip)
DECL_OFFSET(MonoLMF, method)
DECL_OFFSET(MonoLMF, lmf_addr)
DECL_OFFSET(MonoLMF, esp)
DECL_OFFSET(MonoLMF, ebx)
DECL_OFFSET(MonoLMF, edi)
DECL_OFFSET(MonoLMF, esi)
DECL_OFFSET(MonoLMF, ebp)
DECL_OFFSET(MonoLMF, eip)
#elif defined(TARGET_AMD64)
DECL_OFFSET(MonoContext, gregs)
DECL_OFFSET(MonoContext, fregs)
DECL_OFFSET(MonoLMF, rsp)
DECL_OFFSET(MonoLMF, rbp)
DECL_OFFSET(DynCallArgs, res)
DECL_OFFSET(DynCallArgs, fregs)
DECL_OFFSET(DynCallArgs, has_fp)
DECL_OFFSET(DynCallArgs, nstack_args)
DECL_OFFSET(DynCallArgs, regs)
DECL_OFFSET(MonoLMFTramp, ctx)
DECL_OFFSET(MonoLMFTramp, lmf_addr)
#elif defined(TARGET_ARM)
DECL_OFFSET(MonoLMF, sp)
DECL_OFFSET(MonoLMF, fp)
DECL_OFFSET(MonoLMF, ip)
DECL_OFFSET(MonoLMF, iregs)
DECL_OFFSET(MonoLMF, fregs)
DECL_OFFSET(DynCallArgs, fpregs)
DECL_OFFSET(DynCallArgs, has_fpregs)
DECL_OFFSET(DynCallArgs, regs)
DECL_OFFSET(DynCallArgs, n_stackargs)
DECL_OFFSET(SeqPointInfo, ss_tramp_addr)
#elif defined(TARGET_ARM64)
DECL_OFFSET(MonoLMF, pc)
DECL_OFFSET(MonoLMF, gregs)
DECL_OFFSET(DynCallArgs, regs)
DECL_OFFSET(DynCallArgs, fpregs)
DECL_OFFSET(DynCallArgs, n_stackargs)
DECL_OFFSET(DynCallArgs, n_fpargs)
DECL_OFFSET(DynCallArgs, n_fpret)
#elif defined(TARGET_S390X)
DECL_OFFSET(MonoLMF, pregs)
DECL_OFFSET(MonoLMF, lmf_addr)
DECL_OFFSET(MonoLMF, method)
DECL_OFFSET(MonoLMF, ebp)
DECL_OFFSET(MonoLMF, eip)
DECL_OFFSET(MonoLMF, gregs)
DECL_OFFSET(MonoLMF, fregs)
#elif defined(TARGET_RISCV)
DECL_OFFSET(MonoContext, gregs)
DECL_OFFSET(MonoContext, fregs)
#endif
// Shared architecture offfsets
// ----------------------------
#if defined(TARGET_ARM) || defined(TARGET_ARM64)
DECL_OFFSET (MonoContext, pc)
DECL_OFFSET (MonoContext, regs)
DECL_OFFSET (MonoContext, fregs)
DECL_OFFSET(MonoLMF, lmf_addr)
DECL_OFFSET(DynCallArgs, res)
DECL_OFFSET(DynCallArgs, res2)
#endif
#if defined(TARGET_ARM)
DECL_OFFSET(MonoLMF, method)
DECL_OFFSET(GSharedVtCallInfo, stack_usage)
DECL_OFFSET(GSharedVtCallInfo, vret_arg_reg)
DECL_OFFSET(GSharedVtCallInfo, ret_marshal)
DECL_OFFSET(GSharedVtCallInfo, vret_slot)
DECL_OFFSET(GSharedVtCallInfo, gsharedvt_in)
DECL_OFFSET(SeqPointInfo, ss_trigger_page)
#endif
#if defined(TARGET_ARM64)
DECL_OFFSET (MonoContext, has_fregs)
DECL_OFFSET(GSharedVtCallInfo, stack_usage)
DECL_OFFSET(GSharedVtCallInfo, gsharedvt_in)
DECL_OFFSET(GSharedVtCallInfo, ret_marshal)
DECL_OFFSET(GSharedVtCallInfo, vret_slot)
#endif
#if defined(TARGET_AMD64) || defined(TARGET_ARM64)
DECL_OFFSET(SeqPointInfo, ss_tramp_addr)
#endif
#if defined(TARGET_AMD64) || defined(TARGET_ARM) || defined(TARGET_ARM64)
DECL_OFFSET(SeqPointInfo, bp_addrs)
DECL_OFFSET(CallContext, gregs)
DECL_OFFSET(CallContext, fregs)
DECL_OFFSET(CallContext, stack_size)
DECL_OFFSET(CallContext, stack)
#endif
#if defined(TARGET_X86)
DECL_OFFSET(CallContext, eax)
DECL_OFFSET(CallContext, edx)
DECL_OFFSET(CallContext, fret)
DECL_OFFSET(CallContext, stack_size)
DECL_OFFSET(CallContext, stack)
#endif
#if defined(TARGET_X86)
DECL_OFFSET(GSharedVtCallInfo, stack_usage)
DECL_OFFSET(GSharedVtCallInfo, vret_slot)
DECL_OFFSET(GSharedVtCallInfo, vret_arg_slot)
DECL_OFFSET(GSharedVtCallInfo, ret_marshal)
DECL_OFFSET(GSharedVtCallInfo, gsharedvt_in)
#endif
#if defined(TARGET_AMD64)
DECL_OFFSET(GSharedVtCallInfo, ret_marshal)
DECL_OFFSET(GSharedVtCallInfo, vret_arg_reg)
DECL_OFFSET(GSharedVtCallInfo, vret_slot)
DECL_OFFSET(GSharedVtCallInfo, stack_usage)
DECL_OFFSET(GSharedVtCallInfo, gsharedvt_in)
#endif
DECL_OFFSET(MonoFtnDesc, arg)
DECL_OFFSET(MonoFtnDesc, addr)
#endif //DISABLE_JIT_OFFSETS
#endif //USED_CROSS_COMPILER_OFFSETS
#undef DECL_OFFSET
#undef DECL_OFFSET2
#undef DECL_ALIGN2
#undef DECL_SIZE
#undef DECL_SIZE2
#undef USE_CROSS_COMPILE_OFFSETS
| -1 |
dotnet/runtime | 65,926 | FastMod for EEHashTable (Faster virtual generics) | Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| EgorBo | "2022-02-27T11:34:01Z" | "2022-03-01T19:43:37Z" | a5a7836f8193414fe64232d5c386b6402eb70e19 | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | FastMod for EEHashTable (Faster virtual generics). Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| ./src/mono/mono/tests/static-fields-nonconst.il | .assembly extern mscorlib {}
.assembly test {}
.module test.exe
.class public EntryPoint extends [mscorlib]System.Object {
.field public static int32 m_test = int32(2)
.method public static int32 Main() cil managed {
.entrypoint
ldsfld int32 EntryPoint::m_test
brtrue fail
ldc.i4 0
ret
fail:
ldc.i4 1
ret
}
}
| .assembly extern mscorlib {}
.assembly test {}
.module test.exe
.class public EntryPoint extends [mscorlib]System.Object {
.field public static int32 m_test = int32(2)
.method public static int32 Main() cil managed {
.entrypoint
ldsfld int32 EntryPoint::m_test
brtrue fail
ldc.i4 0
ret
fail:
ldc.i4 1
ret
}
}
| -1 |
dotnet/runtime | 65,926 | FastMod for EEHashTable (Faster virtual generics) | Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| EgorBo | "2022-02-27T11:34:01Z" | "2022-03-01T19:43:37Z" | a5a7836f8193414fe64232d5c386b6402eb70e19 | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | FastMod for EEHashTable (Faster virtual generics). Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| ./src/coreclr/pal/tests/palsuite/samples/test1/test.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================================
**
** Source: test.c
**
** Purpose: This test is an example of the basic structure of a PAL test
** suite test case.
**
**
**==========================================================================*/
#include <palsuite.h>
PALTEST(samples_test1_paltest_samples_test1, "samples/test1/paltest_samples_test1")
{
/* Initialize the PAL.
*/
if(0 != PAL_Initialize(argc, argv))
{
return FAIL;
}
Trace("\nTest #1...\n");
#ifdef WIN32
Trace("\nWe are testing under Win32 environment.\n");
#else
Trace("\nWe are testing under Non-Win32 environment.\n");
#endif
Trace("\nThis test has passed.\n");
/* Shutdown the PAL.
*/
PAL_Terminate();
return PASS;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================================
**
** Source: test.c
**
** Purpose: This test is an example of the basic structure of a PAL test
** suite test case.
**
**
**==========================================================================*/
#include <palsuite.h>
PALTEST(samples_test1_paltest_samples_test1, "samples/test1/paltest_samples_test1")
{
/* Initialize the PAL.
*/
if(0 != PAL_Initialize(argc, argv))
{
return FAIL;
}
Trace("\nTest #1...\n");
#ifdef WIN32
Trace("\nWe are testing under Win32 environment.\n");
#else
Trace("\nWe are testing under Non-Win32 environment.\n");
#endif
Trace("\nThis test has passed.\n");
/* Shutdown the PAL.
*/
PAL_Terminate();
return PASS;
}
| -1 |
dotnet/runtime | 65,926 | FastMod for EEHashTable (Faster virtual generics) | Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| EgorBo | "2022-02-27T11:34:01Z" | "2022-03-01T19:43:37Z" | a5a7836f8193414fe64232d5c386b6402eb70e19 | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | FastMod for EEHashTable (Faster virtual generics). Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| ./src/tests/JIT/IL_Conformance/Old/Conformance_Base/ldarg_i4.ilproj | <Project Sdk="Microsoft.NET.Sdk.IL">
<PropertyGroup>
<OutputType>Exe</OutputType>
<RestorePackages>true</RestorePackages>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType>PdbOnly</DebugType>
<Optimize>True</Optimize>
</PropertyGroup>
<ItemGroup>
<Compile Include="ldarg_i4.il" />
</ItemGroup>
</Project>
| <Project Sdk="Microsoft.NET.Sdk.IL">
<PropertyGroup>
<OutputType>Exe</OutputType>
<RestorePackages>true</RestorePackages>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType>PdbOnly</DebugType>
<Optimize>True</Optimize>
</PropertyGroup>
<ItemGroup>
<Compile Include="ldarg_i4.il" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime | 65,926 | FastMod for EEHashTable (Faster virtual generics) | Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| EgorBo | "2022-02-27T11:34:01Z" | "2022-03-01T19:43:37Z" | a5a7836f8193414fe64232d5c386b6402eb70e19 | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | FastMod for EEHashTable (Faster virtual generics). Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| ./src/tests/JIT/CodeGenBringUpTests/Lt1_r.csproj | <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType>PdbOnly</DebugType>
<Optimize>False</Optimize>
</PropertyGroup>
<ItemGroup>
<Compile Include="Lt1.cs" />
</ItemGroup>
</Project>
| <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType>PdbOnly</DebugType>
<Optimize>False</Optimize>
</PropertyGroup>
<ItemGroup>
<Compile Include="Lt1.cs" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime | 65,926 | FastMod for EEHashTable (Faster virtual generics) | Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| EgorBo | "2022-02-27T11:34:01Z" | "2022-03-01T19:43:37Z" | a5a7836f8193414fe64232d5c386b6402eb70e19 | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | FastMod for EEHashTable (Faster virtual generics). Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| ./src/tests/JIT/IL_Conformance/Old/Base/ldind_stind.il | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
.assembly extern legacy library mscorlib {}
.assembly ldind_stind{}
.class public ldind_stind {
.field public static int8 I1
.field public static int16 I2
.field public static int32 I4
.field public static int64 I8
.field public static float32 R4
.field public static float64 R8
.field public static native int I
.method public static int32 main(class [mscorlib]System.String[]) {
.entrypoint
.maxstack 10
ldsflda int8 ldind_stind::I1
ldc.i4 0x00000011
stind.i1
ldsflda int16 ldind_stind::I2
ldc.i4 0x00002222
stind.i2
ldsflda int32 ldind_stind::I4
ldc.i4 0x44444444
stind.i4
ldsflda int64 ldind_stind::I8
ldc.i8 0x8888888888888888
stind.i8
ldsflda float32 ldind_stind::R4
ldc.r4 float32(0x3F800000)
stind.r4
ldsflda float64 ldind_stind::R8
ldc.r8 float64(0x3FF0000000000000)
stind.r8
ldsflda native int ldind_stind::I
ldsflda native int ldind_stind::I
stind.i
ldsflda int8 ldind_stind::I1
ldind.i1
ldc.i4 0x00000011
ceq
brfalse FAIL
ldsflda int16 ldind_stind::I2
ldind.i2
ldc.i4 0x00002222
ceq
brfalse FAIL
ldsflda int32 ldind_stind::I4
ldind.i4
ldc.i4 0x44444444
ceq
brfalse FAIL
ldsflda int64 ldind_stind::I8
ldind.i8
ldc.i8 0x8888888888888888
ceq
brfalse FAIL
ldsflda float32 ldind_stind::R4
ldind.r4
ldc.r4 float32(0x3F800000)
ceq
brfalse FAIL
ldsflda float64 ldind_stind::R8
ldind.r8
ldc.r8 float64(0x3FF0000000000000)
ceq
brfalse FAIL
ldsflda native int ldind_stind::I
ldind.i
conv.i4
ldsfld native int ldind_stind::I
conv.i4
ceq
brfalse FAIL
PASS:
ldc.i4 100
ret
FAIL:
ldc.i4 0x0
ret
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
.assembly extern legacy library mscorlib {}
.assembly ldind_stind{}
.class public ldind_stind {
.field public static int8 I1
.field public static int16 I2
.field public static int32 I4
.field public static int64 I8
.field public static float32 R4
.field public static float64 R8
.field public static native int I
.method public static int32 main(class [mscorlib]System.String[]) {
.entrypoint
.maxstack 10
ldsflda int8 ldind_stind::I1
ldc.i4 0x00000011
stind.i1
ldsflda int16 ldind_stind::I2
ldc.i4 0x00002222
stind.i2
ldsflda int32 ldind_stind::I4
ldc.i4 0x44444444
stind.i4
ldsflda int64 ldind_stind::I8
ldc.i8 0x8888888888888888
stind.i8
ldsflda float32 ldind_stind::R4
ldc.r4 float32(0x3F800000)
stind.r4
ldsflda float64 ldind_stind::R8
ldc.r8 float64(0x3FF0000000000000)
stind.r8
ldsflda native int ldind_stind::I
ldsflda native int ldind_stind::I
stind.i
ldsflda int8 ldind_stind::I1
ldind.i1
ldc.i4 0x00000011
ceq
brfalse FAIL
ldsflda int16 ldind_stind::I2
ldind.i2
ldc.i4 0x00002222
ceq
brfalse FAIL
ldsflda int32 ldind_stind::I4
ldind.i4
ldc.i4 0x44444444
ceq
brfalse FAIL
ldsflda int64 ldind_stind::I8
ldind.i8
ldc.i8 0x8888888888888888
ceq
brfalse FAIL
ldsflda float32 ldind_stind::R4
ldind.r4
ldc.r4 float32(0x3F800000)
ceq
brfalse FAIL
ldsflda float64 ldind_stind::R8
ldind.r8
ldc.r8 float64(0x3FF0000000000000)
ceq
brfalse FAIL
ldsflda native int ldind_stind::I
ldind.i
conv.i4
ldsfld native int ldind_stind::I
conv.i4
ceq
brfalse FAIL
PASS:
ldc.i4 100
ret
FAIL:
ldc.i4 0x0
ret
}
}
| -1 |
dotnet/runtime | 65,926 | FastMod for EEHashTable (Faster virtual generics) | Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| EgorBo | "2022-02-27T11:34:01Z" | "2022-03-01T19:43:37Z" | a5a7836f8193414fe64232d5c386b6402eb70e19 | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | FastMod for EEHashTable (Faster virtual generics). Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| ./src/libraries/System.IO.MemoryMappedFiles/src/System/IO/MemoryMappedFiles/MemoryMappedView.Unix.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using Microsoft.Win32.SafeHandles;
using System.Diagnostics;
namespace System.IO.MemoryMappedFiles
{
internal sealed partial class MemoryMappedView
{
public static MemoryMappedView CreateView(
SafeMemoryMappedFileHandle memMappedFileHandle, MemoryMappedFileAccess access,
long requestedOffset, long requestedSize)
{
if (requestedOffset > memMappedFileHandle._capacity)
{
throw new ArgumentOutOfRangeException("offset");
}
if (requestedSize > MaxProcessAddressSpace)
{
throw new IOException(SR.ArgumentOutOfRange_CapacityLargerThanLogicalAddressSpaceNotAllowed);
}
if (requestedOffset + requestedSize > memMappedFileHandle._capacity)
{
throw new UnauthorizedAccessException();
}
if (memMappedFileHandle.IsClosed)
{
throw new ObjectDisposedException(nameof(MemoryMappedFile));
}
if (requestedSize == MemoryMappedFile.DefaultSize)
{
requestedSize = memMappedFileHandle._capacity - requestedOffset;
}
// mmap can only create views that start at a multiple of the page size. As on Windows,
// we hide this restriction form the user by creating larger views than the user requested and hiding the parts
// that the user did not request. extraMemNeeded is the amount of extra memory we allocate before the start of the
// requested view. (mmap may round up the actual length such that it is also page-aligned; we hide that by using
// the right size and not extending the size to be page-aligned.)
ulong nativeSize;
long extraMemNeeded, nativeOffset;
long pageSize = Interop.Sys.SysConf(Interop.Sys.SysConfName._SC_PAGESIZE);
Debug.Assert(pageSize > 0);
ValidateSizeAndOffset(
requestedSize, requestedOffset, pageSize,
out nativeSize, out extraMemNeeded, out nativeOffset);
// Determine whether to create the pages as private or as shared; the former is used for copy-on-write.
Interop.Sys.MemoryMappedFlags flags =
(memMappedFileHandle._access == MemoryMappedFileAccess.CopyOnWrite || access == MemoryMappedFileAccess.CopyOnWrite) ?
Interop.Sys.MemoryMappedFlags.MAP_PRIVATE :
Interop.Sys.MemoryMappedFlags.MAP_SHARED;
// If we have a file handle, get the file descriptor from it. If the handle is null,
// we'll use an anonymous backing store for the map.
SafeFileHandle fd;
if (memMappedFileHandle._fileStreamHandle != null)
{
// Get the file descriptor from the SafeFileHandle
fd = memMappedFileHandle._fileStreamHandle;
Debug.Assert(!fd.IsInvalid);
}
else
{
fd = new SafeFileHandle(new IntPtr(-1), false);
flags |= Interop.Sys.MemoryMappedFlags.MAP_ANONYMOUS;
}
// Nothing to do for options.DelayAllocatePages, since we're only creating the map
// with mmap when creating the view.
// Verify that the requested view permissions don't exceed the map's permissions
Interop.Sys.MemoryMappedProtections viewProtForVerification = GetProtections(access, forVerification: true);
Interop.Sys.MemoryMappedProtections mapProtForVerification = GetProtections(memMappedFileHandle._access, forVerification: true);
if ((viewProtForVerification & mapProtForVerification) != viewProtForVerification)
{
throw new UnauthorizedAccessException();
}
// viewProtections is strictly less than mapProtections, so use viewProtections for actually creating the map.
Interop.Sys.MemoryMappedProtections viewProtForCreation = GetProtections(access, forVerification: false);
// Create the map
IntPtr addr = IntPtr.Zero;
if (nativeSize > 0)
{
addr = Interop.Sys.MMap(
IntPtr.Zero, // don't specify an address; let the system choose one
nativeSize, // specify the rounded-size we computed so as to page align; size + extraMemNeeded
viewProtForCreation,
flags,
fd, // mmap adds a ref count to the fd, so there's no need to dup it.
nativeOffset); // specify the rounded-offset we computed so as to page align; offset - extraMemNeeded
}
else
{
// There are some corner cases where the .NET API allows the requested size to be zero, e.g. the caller is
// creating a map at the end of the capacity. We can't pass 0 to mmap, as that'll fail with EINVAL, nor can
// we create a map that extends beyond the end of the underlying file, as that'll fail on some platforms at the
// time of the map's creation. Instead, since there's no data to be read/written, it doesn't actually matter
// what backs the view, so we just create an anonymous mapping.
addr = Interop.Sys.MMap(
IntPtr.Zero,
1, // any length that's greater than zero will suffice
viewProtForCreation,
flags | Interop.Sys.MemoryMappedFlags.MAP_ANONYMOUS,
new SafeFileHandle(new IntPtr(-1), false), // ignore the actual fd even if there was one
0);
requestedSize = 0;
extraMemNeeded = 0;
}
if (addr == IntPtr.Zero) // note that shim uses null pointer, not non-null MAP_FAILED sentinel
{
throw Interop.GetExceptionForIoErrno(Interop.Sys.GetLastErrorInfo());
}
// Based on the HandleInheritability, try to prevent the memory-mapped region
// from being inherited by a forked process
if (memMappedFileHandle._inheritability == HandleInheritability.None)
{
DisableForkingIfPossible(addr, nativeSize);
}
// Create and return the view handle
var viewHandle = new SafeMemoryMappedViewHandle(addr, ownsHandle: true);
viewHandle.Initialize((ulong)nativeSize);
return new MemoryMappedView(
viewHandle,
extraMemNeeded, // the view points to offset - extraMemNeeded, so we need to shift back by extraMemNeeded
requestedSize, // only allow access to the actual size requested
access);
}
public unsafe void Flush(UIntPtr capacity)
{
if (capacity == UIntPtr.Zero)
return;
byte* ptr = null;
try
{
_viewHandle.AcquirePointer(ref ptr);
int result = Interop.Sys.MSync(
(IntPtr)ptr, (ulong)capacity,
Interop.Sys.MemoryMappedSyncFlags.MS_SYNC | Interop.Sys.MemoryMappedSyncFlags.MS_INVALIDATE);
if (result < 0)
{
throw Interop.GetExceptionForIoErrno(Interop.Sys.GetLastErrorInfo());
}
}
finally
{
if (ptr != null)
{
_viewHandle.ReleasePointer();
}
}
}
/// <summary>Attempt to prevent the specified pages from being copied into forked processes.</summary>
/// <param name="addr">The starting address.</param>
/// <param name="length">The length.</param>
private static void DisableForkingIfPossible(IntPtr addr, ulong length)
{
if (length > 0)
{
Interop.Sys.MAdvise(addr, length, Interop.Sys.MemoryAdvice.MADV_DONTFORK);
// Intentionally ignore error code -- it's just a hint and it's not supported on all systems.
}
}
/// <summary>
/// The Windows implementation limits maps to the size of the logical address space.
/// We use the same value here.
/// </summary>
private const long MaxProcessAddressSpace = 8192L * 1000 * 1000 * 1000;
/// <summary>Maps a MemoryMappedFileAccess to the associated MemoryMappedProtections.</summary>
internal static Interop.Sys.MemoryMappedProtections GetProtections(
MemoryMappedFileAccess access, bool forVerification)
{
switch (access)
{
default:
case MemoryMappedFileAccess.Read:
return Interop.Sys.MemoryMappedProtections.PROT_READ;
case MemoryMappedFileAccess.Write:
return Interop.Sys.MemoryMappedProtections.PROT_WRITE;
case MemoryMappedFileAccess.ReadWrite:
return
Interop.Sys.MemoryMappedProtections.PROT_READ |
Interop.Sys.MemoryMappedProtections.PROT_WRITE;
case MemoryMappedFileAccess.ReadExecute:
return
Interop.Sys.MemoryMappedProtections.PROT_READ |
Interop.Sys.MemoryMappedProtections.PROT_EXEC;
case MemoryMappedFileAccess.ReadWriteExecute:
return
Interop.Sys.MemoryMappedProtections.PROT_READ |
Interop.Sys.MemoryMappedProtections.PROT_WRITE |
Interop.Sys.MemoryMappedProtections.PROT_EXEC;
case MemoryMappedFileAccess.CopyOnWrite:
return forVerification ?
Interop.Sys.MemoryMappedProtections.PROT_READ :
Interop.Sys.MemoryMappedProtections.PROT_READ | Interop.Sys.MemoryMappedProtections.PROT_WRITE;
}
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using Microsoft.Win32.SafeHandles;
using System.Diagnostics;
namespace System.IO.MemoryMappedFiles
{
internal sealed partial class MemoryMappedView
{
public static MemoryMappedView CreateView(
SafeMemoryMappedFileHandle memMappedFileHandle, MemoryMappedFileAccess access,
long requestedOffset, long requestedSize)
{
if (requestedOffset > memMappedFileHandle._capacity)
{
throw new ArgumentOutOfRangeException("offset");
}
if (requestedSize > MaxProcessAddressSpace)
{
throw new IOException(SR.ArgumentOutOfRange_CapacityLargerThanLogicalAddressSpaceNotAllowed);
}
if (requestedOffset + requestedSize > memMappedFileHandle._capacity)
{
throw new UnauthorizedAccessException();
}
if (memMappedFileHandle.IsClosed)
{
throw new ObjectDisposedException(nameof(MemoryMappedFile));
}
if (requestedSize == MemoryMappedFile.DefaultSize)
{
requestedSize = memMappedFileHandle._capacity - requestedOffset;
}
// mmap can only create views that start at a multiple of the page size. As on Windows,
// we hide this restriction form the user by creating larger views than the user requested and hiding the parts
// that the user did not request. extraMemNeeded is the amount of extra memory we allocate before the start of the
// requested view. (mmap may round up the actual length such that it is also page-aligned; we hide that by using
// the right size and not extending the size to be page-aligned.)
ulong nativeSize;
long extraMemNeeded, nativeOffset;
long pageSize = Interop.Sys.SysConf(Interop.Sys.SysConfName._SC_PAGESIZE);
Debug.Assert(pageSize > 0);
ValidateSizeAndOffset(
requestedSize, requestedOffset, pageSize,
out nativeSize, out extraMemNeeded, out nativeOffset);
// Determine whether to create the pages as private or as shared; the former is used for copy-on-write.
Interop.Sys.MemoryMappedFlags flags =
(memMappedFileHandle._access == MemoryMappedFileAccess.CopyOnWrite || access == MemoryMappedFileAccess.CopyOnWrite) ?
Interop.Sys.MemoryMappedFlags.MAP_PRIVATE :
Interop.Sys.MemoryMappedFlags.MAP_SHARED;
// If we have a file handle, get the file descriptor from it. If the handle is null,
// we'll use an anonymous backing store for the map.
SafeFileHandle fd;
if (memMappedFileHandle._fileStreamHandle != null)
{
// Get the file descriptor from the SafeFileHandle
fd = memMappedFileHandle._fileStreamHandle;
Debug.Assert(!fd.IsInvalid);
}
else
{
fd = new SafeFileHandle(new IntPtr(-1), false);
flags |= Interop.Sys.MemoryMappedFlags.MAP_ANONYMOUS;
}
// Nothing to do for options.DelayAllocatePages, since we're only creating the map
// with mmap when creating the view.
// Verify that the requested view permissions don't exceed the map's permissions
Interop.Sys.MemoryMappedProtections viewProtForVerification = GetProtections(access, forVerification: true);
Interop.Sys.MemoryMappedProtections mapProtForVerification = GetProtections(memMappedFileHandle._access, forVerification: true);
if ((viewProtForVerification & mapProtForVerification) != viewProtForVerification)
{
throw new UnauthorizedAccessException();
}
// viewProtections is strictly less than mapProtections, so use viewProtections for actually creating the map.
Interop.Sys.MemoryMappedProtections viewProtForCreation = GetProtections(access, forVerification: false);
// Create the map
IntPtr addr = IntPtr.Zero;
if (nativeSize > 0)
{
addr = Interop.Sys.MMap(
IntPtr.Zero, // don't specify an address; let the system choose one
nativeSize, // specify the rounded-size we computed so as to page align; size + extraMemNeeded
viewProtForCreation,
flags,
fd, // mmap adds a ref count to the fd, so there's no need to dup it.
nativeOffset); // specify the rounded-offset we computed so as to page align; offset - extraMemNeeded
}
else
{
// There are some corner cases where the .NET API allows the requested size to be zero, e.g. the caller is
// creating a map at the end of the capacity. We can't pass 0 to mmap, as that'll fail with EINVAL, nor can
// we create a map that extends beyond the end of the underlying file, as that'll fail on some platforms at the
// time of the map's creation. Instead, since there's no data to be read/written, it doesn't actually matter
// what backs the view, so we just create an anonymous mapping.
addr = Interop.Sys.MMap(
IntPtr.Zero,
1, // any length that's greater than zero will suffice
viewProtForCreation,
flags | Interop.Sys.MemoryMappedFlags.MAP_ANONYMOUS,
new SafeFileHandle(new IntPtr(-1), false), // ignore the actual fd even if there was one
0);
requestedSize = 0;
extraMemNeeded = 0;
}
if (addr == IntPtr.Zero) // note that shim uses null pointer, not non-null MAP_FAILED sentinel
{
throw Interop.GetExceptionForIoErrno(Interop.Sys.GetLastErrorInfo());
}
// Based on the HandleInheritability, try to prevent the memory-mapped region
// from being inherited by a forked process
if (memMappedFileHandle._inheritability == HandleInheritability.None)
{
DisableForkingIfPossible(addr, nativeSize);
}
// Create and return the view handle
var viewHandle = new SafeMemoryMappedViewHandle(addr, ownsHandle: true);
viewHandle.Initialize((ulong)nativeSize);
return new MemoryMappedView(
viewHandle,
extraMemNeeded, // the view points to offset - extraMemNeeded, so we need to shift back by extraMemNeeded
requestedSize, // only allow access to the actual size requested
access);
}
public unsafe void Flush(UIntPtr capacity)
{
if (capacity == UIntPtr.Zero)
return;
byte* ptr = null;
try
{
_viewHandle.AcquirePointer(ref ptr);
int result = Interop.Sys.MSync(
(IntPtr)ptr, (ulong)capacity,
Interop.Sys.MemoryMappedSyncFlags.MS_SYNC | Interop.Sys.MemoryMappedSyncFlags.MS_INVALIDATE);
if (result < 0)
{
throw Interop.GetExceptionForIoErrno(Interop.Sys.GetLastErrorInfo());
}
}
finally
{
if (ptr != null)
{
_viewHandle.ReleasePointer();
}
}
}
/// <summary>Attempt to prevent the specified pages from being copied into forked processes.</summary>
/// <param name="addr">The starting address.</param>
/// <param name="length">The length.</param>
private static void DisableForkingIfPossible(IntPtr addr, ulong length)
{
if (length > 0)
{
Interop.Sys.MAdvise(addr, length, Interop.Sys.MemoryAdvice.MADV_DONTFORK);
// Intentionally ignore error code -- it's just a hint and it's not supported on all systems.
}
}
/// <summary>
/// The Windows implementation limits maps to the size of the logical address space.
/// We use the same value here.
/// </summary>
private const long MaxProcessAddressSpace = 8192L * 1000 * 1000 * 1000;
/// <summary>Maps a MemoryMappedFileAccess to the associated MemoryMappedProtections.</summary>
internal static Interop.Sys.MemoryMappedProtections GetProtections(
MemoryMappedFileAccess access, bool forVerification)
{
switch (access)
{
default:
case MemoryMappedFileAccess.Read:
return Interop.Sys.MemoryMappedProtections.PROT_READ;
case MemoryMappedFileAccess.Write:
return Interop.Sys.MemoryMappedProtections.PROT_WRITE;
case MemoryMappedFileAccess.ReadWrite:
return
Interop.Sys.MemoryMappedProtections.PROT_READ |
Interop.Sys.MemoryMappedProtections.PROT_WRITE;
case MemoryMappedFileAccess.ReadExecute:
return
Interop.Sys.MemoryMappedProtections.PROT_READ |
Interop.Sys.MemoryMappedProtections.PROT_EXEC;
case MemoryMappedFileAccess.ReadWriteExecute:
return
Interop.Sys.MemoryMappedProtections.PROT_READ |
Interop.Sys.MemoryMappedProtections.PROT_WRITE |
Interop.Sys.MemoryMappedProtections.PROT_EXEC;
case MemoryMappedFileAccess.CopyOnWrite:
return forVerification ?
Interop.Sys.MemoryMappedProtections.PROT_READ :
Interop.Sys.MemoryMappedProtections.PROT_READ | Interop.Sys.MemoryMappedProtections.PROT_WRITE;
}
}
}
}
| -1 |
dotnet/runtime | 65,926 | FastMod for EEHashTable (Faster virtual generics) | Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| EgorBo | "2022-02-27T11:34:01Z" | "2022-03-01T19:43:37Z" | a5a7836f8193414fe64232d5c386b6402eb70e19 | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | FastMod for EEHashTable (Faster virtual generics). Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| ./src/tests/JIT/HardwareIntrinsics/General/Vector256/EqualsAny.Byte.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
namespace JIT.HardwareIntrinsics.General
{
public static partial class Program
{
private static void EqualsAnyByte()
{
var test = new VectorBooleanBinaryOpTest__EqualsAnyByte();
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
// Validates passing a static member works
test.RunClsVarScenario();
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
// Validates passing an instance member of a class works
test.RunClassFldScenario();
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class VectorBooleanBinaryOpTest__EqualsAnyByte
{
private struct DataTable
{
private byte[] inArray1;
private byte[] inArray2;
private GCHandle inHandle1;
private GCHandle inHandle2;
private ulong alignment;
public DataTable(Byte[] inArray1, Byte[] inArray2, int alignment)
{
int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Byte>();
int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Byte>();
if ((alignment != 32 && alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2)
{
throw new ArgumentException("Invalid value of alignment");
}
this.inArray1 = new byte[alignment * 2];
this.inArray2 = new byte[alignment * 2];
this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Byte, byte>(ref inArray1[0]), (uint)sizeOfinArray1);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Byte, byte>(ref inArray2[0]), (uint)sizeOfinArray2);
}
public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle1.Free();
inHandle2.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
{
return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
}
}
private struct TestStruct
{
public Vector256<Byte> _fld1;
public Vector256<Byte> _fld2;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Byte>, byte>(ref testStruct._fld1), ref Unsafe.As<Byte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<Byte>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetByte(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Byte>, byte>(ref testStruct._fld2), ref Unsafe.As<Byte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<Byte>>());
return testStruct;
}
public void RunStructFldScenario(VectorBooleanBinaryOpTest__EqualsAnyByte testClass)
{
var result = Vector256.EqualsAny(_fld1, _fld2);
testClass.ValidateResult(_fld1, _fld2, result);
}
}
private static readonly int LargestVectorSize = 32;
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector256<Byte>>() / sizeof(Byte);
private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector256<Byte>>() / sizeof(Byte);
private static Byte[] _data1 = new Byte[Op1ElementCount];
private static Byte[] _data2 = new Byte[Op2ElementCount];
private static Vector256<Byte> _clsVar1;
private static Vector256<Byte> _clsVar2;
private Vector256<Byte> _fld1;
private Vector256<Byte> _fld2;
private DataTable _dataTable;
static VectorBooleanBinaryOpTest__EqualsAnyByte()
{
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Byte>, byte>(ref _clsVar1), ref Unsafe.As<Byte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<Byte>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetByte(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Byte>, byte>(ref _clsVar2), ref Unsafe.As<Byte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<Byte>>());
}
public VectorBooleanBinaryOpTest__EqualsAnyByte()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Byte>, byte>(ref _fld1), ref Unsafe.As<Byte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<Byte>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetByte(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Byte>, byte>(ref _fld2), ref Unsafe.As<Byte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<Byte>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); }
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetByte(); }
_dataTable = new DataTable(_data1, _data2, LargestVectorSize);
}
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = Vector256.EqualsAny(
Unsafe.Read<Vector256<Byte>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector256<Byte>>(_dataTable.inArray2Ptr)
);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, result);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var method = typeof(Vector256).GetMethod(nameof(Vector256.EqualsAny), new Type[] {
typeof(Vector256<Byte>),
typeof(Vector256<Byte>)
});
if (method is null)
{
method = typeof(Vector256).GetMethod(nameof(Vector256.EqualsAny), 1, new Type[] {
typeof(Vector256<>).MakeGenericType(Type.MakeGenericMethodParameter(0)),
typeof(Vector256<>).MakeGenericType(Type.MakeGenericMethodParameter(0))
});
}
if (method.IsGenericMethodDefinition)
{
method = method.MakeGenericMethod(typeof(Byte));
}
var result = method.Invoke(null, new object[] {
Unsafe.Read<Vector256<Byte>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector256<Byte>>(_dataTable.inArray2Ptr)
});
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, (bool)(result));
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = Vector256.EqualsAny(
_clsVar1,
_clsVar2
);
ValidateResult(_clsVar1, _clsVar2, result);
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var op1 = Unsafe.Read<Vector256<Byte>>(_dataTable.inArray1Ptr);
var op2 = Unsafe.Read<Vector256<Byte>>(_dataTable.inArray2Ptr);
var result = Vector256.EqualsAny(op1, op2);
ValidateResult(op1, op2, result);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new VectorBooleanBinaryOpTest__EqualsAnyByte();
var result = Vector256.EqualsAny(test._fld1, test._fld2);
ValidateResult(test._fld1, test._fld2, result);
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = Vector256.EqualsAny(_fld1, _fld2);
ValidateResult(_fld1, _fld2, result);
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = Vector256.EqualsAny(test._fld1, test._fld2);
ValidateResult(test._fld1, test._fld2, result);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
private void ValidateResult(Vector256<Byte> op1, Vector256<Byte> op2, bool result, [CallerMemberName] string method = "")
{
Byte[] inArray1 = new Byte[Op1ElementCount];
Byte[] inArray2 = new Byte[Op2ElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<Byte, byte>(ref inArray1[0]), op1);
Unsafe.WriteUnaligned(ref Unsafe.As<Byte, byte>(ref inArray2[0]), op2);
ValidateResult(inArray1, inArray2, result, method);
}
private void ValidateResult(void* op1, void* op2, bool result, [CallerMemberName] string method = "")
{
Byte[] inArray1 = new Byte[Op1ElementCount];
Byte[] inArray2 = new Byte[Op2ElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Byte, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector256<Byte>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Byte, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector256<Byte>>());
ValidateResult(inArray1, inArray2, result, method);
}
private void ValidateResult(Byte[] left, Byte[] right, bool result, [CallerMemberName] string method = "")
{
bool succeeded = true;
var expectedResult = false;
for (var i = 0; i < Op1ElementCount; i++)
{
expectedResult |= (left[i] == right[i]);
}
succeeded = (expectedResult == result);
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(Vector256)}.{nameof(Vector256.EqualsAny)}<Byte>(Vector256<Byte>, Vector256<Byte>): {method} failed:");
TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})");
TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})");
TestLibrary.TestFramework.LogInformation($" result: ({result})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
namespace JIT.HardwareIntrinsics.General
{
public static partial class Program
{
private static void EqualsAnyByte()
{
var test = new VectorBooleanBinaryOpTest__EqualsAnyByte();
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
// Validates passing a static member works
test.RunClsVarScenario();
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
// Validates passing an instance member of a class works
test.RunClassFldScenario();
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class VectorBooleanBinaryOpTest__EqualsAnyByte
{
private struct DataTable
{
private byte[] inArray1;
private byte[] inArray2;
private GCHandle inHandle1;
private GCHandle inHandle2;
private ulong alignment;
public DataTable(Byte[] inArray1, Byte[] inArray2, int alignment)
{
int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Byte>();
int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Byte>();
if ((alignment != 32 && alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2)
{
throw new ArgumentException("Invalid value of alignment");
}
this.inArray1 = new byte[alignment * 2];
this.inArray2 = new byte[alignment * 2];
this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Byte, byte>(ref inArray1[0]), (uint)sizeOfinArray1);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Byte, byte>(ref inArray2[0]), (uint)sizeOfinArray2);
}
public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle1.Free();
inHandle2.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
{
return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
}
}
private struct TestStruct
{
public Vector256<Byte> _fld1;
public Vector256<Byte> _fld2;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Byte>, byte>(ref testStruct._fld1), ref Unsafe.As<Byte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<Byte>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetByte(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Byte>, byte>(ref testStruct._fld2), ref Unsafe.As<Byte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<Byte>>());
return testStruct;
}
public void RunStructFldScenario(VectorBooleanBinaryOpTest__EqualsAnyByte testClass)
{
var result = Vector256.EqualsAny(_fld1, _fld2);
testClass.ValidateResult(_fld1, _fld2, result);
}
}
private static readonly int LargestVectorSize = 32;
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector256<Byte>>() / sizeof(Byte);
private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector256<Byte>>() / sizeof(Byte);
private static Byte[] _data1 = new Byte[Op1ElementCount];
private static Byte[] _data2 = new Byte[Op2ElementCount];
private static Vector256<Byte> _clsVar1;
private static Vector256<Byte> _clsVar2;
private Vector256<Byte> _fld1;
private Vector256<Byte> _fld2;
private DataTable _dataTable;
static VectorBooleanBinaryOpTest__EqualsAnyByte()
{
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Byte>, byte>(ref _clsVar1), ref Unsafe.As<Byte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<Byte>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetByte(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Byte>, byte>(ref _clsVar2), ref Unsafe.As<Byte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<Byte>>());
}
public VectorBooleanBinaryOpTest__EqualsAnyByte()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Byte>, byte>(ref _fld1), ref Unsafe.As<Byte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<Byte>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetByte(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<Byte>, byte>(ref _fld2), ref Unsafe.As<Byte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<Byte>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); }
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetByte(); }
_dataTable = new DataTable(_data1, _data2, LargestVectorSize);
}
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = Vector256.EqualsAny(
Unsafe.Read<Vector256<Byte>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector256<Byte>>(_dataTable.inArray2Ptr)
);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, result);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var method = typeof(Vector256).GetMethod(nameof(Vector256.EqualsAny), new Type[] {
typeof(Vector256<Byte>),
typeof(Vector256<Byte>)
});
if (method is null)
{
method = typeof(Vector256).GetMethod(nameof(Vector256.EqualsAny), 1, new Type[] {
typeof(Vector256<>).MakeGenericType(Type.MakeGenericMethodParameter(0)),
typeof(Vector256<>).MakeGenericType(Type.MakeGenericMethodParameter(0))
});
}
if (method.IsGenericMethodDefinition)
{
method = method.MakeGenericMethod(typeof(Byte));
}
var result = method.Invoke(null, new object[] {
Unsafe.Read<Vector256<Byte>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector256<Byte>>(_dataTable.inArray2Ptr)
});
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, (bool)(result));
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = Vector256.EqualsAny(
_clsVar1,
_clsVar2
);
ValidateResult(_clsVar1, _clsVar2, result);
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var op1 = Unsafe.Read<Vector256<Byte>>(_dataTable.inArray1Ptr);
var op2 = Unsafe.Read<Vector256<Byte>>(_dataTable.inArray2Ptr);
var result = Vector256.EqualsAny(op1, op2);
ValidateResult(op1, op2, result);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new VectorBooleanBinaryOpTest__EqualsAnyByte();
var result = Vector256.EqualsAny(test._fld1, test._fld2);
ValidateResult(test._fld1, test._fld2, result);
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = Vector256.EqualsAny(_fld1, _fld2);
ValidateResult(_fld1, _fld2, result);
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = Vector256.EqualsAny(test._fld1, test._fld2);
ValidateResult(test._fld1, test._fld2, result);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
private void ValidateResult(Vector256<Byte> op1, Vector256<Byte> op2, bool result, [CallerMemberName] string method = "")
{
Byte[] inArray1 = new Byte[Op1ElementCount];
Byte[] inArray2 = new Byte[Op2ElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<Byte, byte>(ref inArray1[0]), op1);
Unsafe.WriteUnaligned(ref Unsafe.As<Byte, byte>(ref inArray2[0]), op2);
ValidateResult(inArray1, inArray2, result, method);
}
private void ValidateResult(void* op1, void* op2, bool result, [CallerMemberName] string method = "")
{
Byte[] inArray1 = new Byte[Op1ElementCount];
Byte[] inArray2 = new Byte[Op2ElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Byte, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector256<Byte>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Byte, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector256<Byte>>());
ValidateResult(inArray1, inArray2, result, method);
}
private void ValidateResult(Byte[] left, Byte[] right, bool result, [CallerMemberName] string method = "")
{
bool succeeded = true;
var expectedResult = false;
for (var i = 0; i < Op1ElementCount; i++)
{
expectedResult |= (left[i] == right[i]);
}
succeeded = (expectedResult == result);
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(Vector256)}.{nameof(Vector256.EqualsAny)}<Byte>(Vector256<Byte>, Vector256<Byte>): {method} failed:");
TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})");
TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})");
TestLibrary.TestFramework.LogInformation($" result: ({result})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
| -1 |
dotnet/runtime | 65,926 | FastMod for EEHashTable (Faster virtual generics) | Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| EgorBo | "2022-02-27T11:34:01Z" | "2022-03-01T19:43:37Z" | a5a7836f8193414fe64232d5c386b6402eb70e19 | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | FastMod for EEHashTable (Faster virtual generics). Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| ./src/libraries/System.Diagnostics.DiagnosticSource/src/System/Diagnostics/ActivitySource.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Threading;
using System.Collections.Generic;
using System.Runtime.CompilerServices;
namespace System.Diagnostics
{
public sealed class ActivitySource : IDisposable
{
private static readonly SynchronizedList<ActivitySource> s_activeSources = new SynchronizedList<ActivitySource>();
private static readonly SynchronizedList<ActivityListener> s_allListeners = new SynchronizedList<ActivityListener>();
private SynchronizedList<ActivityListener>? _listeners;
/// <summary>
/// Construct an ActivitySource object with the input name
/// </summary>
/// <param name="name">The name of the ActivitySource object</param>
/// <param name="version">The version of the component publishing the tracing info.</param>
public ActivitySource(string name!!, string? version = "")
{
Name = name;
Version = version;
s_activeSources.Add(this);
if (s_allListeners.Count > 0)
{
s_allListeners.EnumWithAction((listener, source) =>
{
Func<ActivitySource, bool>? shouldListenTo = listener.ShouldListenTo;
if (shouldListenTo != null)
{
var activitySource = (ActivitySource)source;
if (shouldListenTo(activitySource))
{
activitySource.AddListener(listener);
}
}
}, this);
}
GC.KeepAlive(DiagnosticSourceEventSource.Log);
}
/// <summary>
/// Returns the ActivitySource name.
/// </summary>
public string Name { get; }
/// <summary>
/// Returns the ActivitySource version.
/// </summary>
public string? Version { get; }
/// <summary>
/// Check if there is any listeners for this ActivitySource.
/// This property can be helpful to tell if there is no listener, then no need to create Activity object
/// and avoid creating the objects needed to create Activity (e.g. ActivityContext)
/// Example of that is http scenario which can avoid reading the context data from the wire.
/// </summary>
public bool HasListeners()
{
SynchronizedList<ActivityListener>? listeners = _listeners;
return listeners != null && listeners.Count > 0;
}
/// <summary>
/// Creates a new <see cref="Activity"/> object if there is any listener to the Activity, returns null otherwise.
/// </summary>
/// <param name="name">The operation name of the Activity</param>
/// <param name="kind">The <see cref="ActivityKind"/></param>
/// <returns>The created <see cref="Activity"/> object or null if there is no any event listener.</returns>
/// <remarks>
/// If the Activity object is created, it will not start automatically. Callers need to call <see cref="Activity.Start()"/> to start it.
/// </remarks>
public Activity? CreateActivity(string name, ActivityKind kind)
=> CreateActivity(name, kind, default, null, null, null, default, startIt: false);
/// <summary>
/// Creates a new <see cref="Activity"/> object if there is any listener to the Activity, returns null otherwise.
/// If the Activity object is created, it will not automatically start. Callers will need to call <see cref="Activity.Start()"/> to start it.
/// </summary>
/// <param name="name">The operation name of the Activity.</param>
/// <param name="kind">The <see cref="ActivityKind"/></param>
/// <param name="parentContext">The parent <see cref="ActivityContext"/> object to initialize the created Activity object with.</param>
/// <param name="tags">The optional tags list to initialize the created Activity object with.</param>
/// <param name="links">The optional <see cref="ActivityLink"/> list to initialize the created Activity object with.</param>
/// <param name="idFormat">The default Id format to use.</param>
/// <returns>The created <see cref="Activity"/> object or null if there is no any listener.</returns>
/// <remarks>
/// If the Activity object is created, it will not start automatically. Callers need to call <see cref="Activity.Start()"/> to start it.
/// </remarks>
public Activity? CreateActivity(string name, ActivityKind kind, ActivityContext parentContext, IEnumerable<KeyValuePair<string, object?>>? tags = null, IEnumerable<ActivityLink>? links = null, ActivityIdFormat idFormat = ActivityIdFormat.Unknown)
=> CreateActivity(name, kind, parentContext, null, tags, links, default, startIt: false, idFormat);
/// <summary>
/// Creates a new <see cref="Activity"/> object if there is any listener to the Activity, returns null otherwise.
/// </summary>
/// <param name="name">The operation name of the Activity.</param>
/// <param name="kind">The <see cref="ActivityKind"/></param>
/// <param name="parentId">The parent Id to initialize the created Activity object with.</param>
/// <param name="tags">The optional tags list to initialize the created Activity object with.</param>
/// <param name="links">The optional <see cref="ActivityLink"/> list to initialize the created Activity object with.</param>
/// <param name="idFormat">The default Id format to use.</param>
/// <returns>The created <see cref="Activity"/> object or null if there is no any listener.</returns>
/// <remarks>
/// If the Activity object is created, it will not start automatically. Callers need to call <see cref="Activity.Start()"/> to start it.
/// </remarks>
public Activity? CreateActivity(string name, ActivityKind kind, string parentId, IEnumerable<KeyValuePair<string, object?>>? tags = null, IEnumerable<ActivityLink>? links = null, ActivityIdFormat idFormat = ActivityIdFormat.Unknown)
=> CreateActivity(name, kind, default, parentId, tags, links, default, startIt: false, idFormat);
/// <summary>
/// Creates and starts a new <see cref="Activity"/> object if there is any listener to the Activity, returns null otherwise.
/// </summary>
/// <param name="name">The operation name of the Activity</param>
/// <param name="kind">The <see cref="ActivityKind"/></param>
/// <returns>The created <see cref="Activity"/> object or null if there is no any event listener.</returns>
public Activity? StartActivity([CallerMemberName] string name = "", ActivityKind kind = ActivityKind.Internal)
=> CreateActivity(name, kind, default, null, null, null, default);
/// <summary>
/// Creates and starts a new <see cref="Activity"/> object if there is any listener to the Activity events, returns null otherwise.
/// </summary>
/// <param name="name">The operation name of the Activity.</param>
/// <param name="kind">The <see cref="ActivityKind"/></param>
/// <param name="parentContext">The parent <see cref="ActivityContext"/> object to initialize the created Activity object with.</param>
/// <param name="tags">The optional tags list to initialize the created Activity object with.</param>
/// <param name="links">The optional <see cref="ActivityLink"/> list to initialize the created Activity object with.</param>
/// <param name="startTime">The optional start timestamp to set on the created Activity object.</param>
/// <returns>The created <see cref="Activity"/> object or null if there is no any listener.</returns>
public Activity? StartActivity(string name, ActivityKind kind, ActivityContext parentContext, IEnumerable<KeyValuePair<string, object?>>? tags = null, IEnumerable<ActivityLink>? links = null, DateTimeOffset startTime = default)
=> CreateActivity(name, kind, parentContext, null, tags, links, startTime);
/// <summary>
/// Creates and starts a new <see cref="Activity"/> object if there is any listener to the Activity events, returns null otherwise.
/// </summary>
/// <param name="name">The operation name of the Activity.</param>
/// <param name="kind">The <see cref="ActivityKind"/></param>
/// <param name="parentId">The parent Id to initialize the created Activity object with.</param>
/// <param name="tags">The optional tags list to initialize the created Activity object with.</param>
/// <param name="links">The optional <see cref="ActivityLink"/> list to initialize the created Activity object with.</param>
/// <param name="startTime">The optional start timestamp to set on the created Activity object.</param>
/// <returns>The created <see cref="Activity"/> object or null if there is no any listener.</returns>
public Activity? StartActivity(string name, ActivityKind kind, string parentId, IEnumerable<KeyValuePair<string, object?>>? tags = null, IEnumerable<ActivityLink>? links = null, DateTimeOffset startTime = default)
=> CreateActivity(name, kind, default, parentId, tags, links, startTime);
/// <summary>
/// Creates and starts a new <see cref="Activity"/> object if there is any listener to the Activity events, returns null otherwise.
/// </summary>
/// <param name="kind">The <see cref="ActivityKind"/></param>
/// <param name="parentContext">The parent <see cref="ActivityContext"/> object to initialize the created Activity object with.</param>
/// <param name="tags">The optional tags list to initialize the created Activity object with.</param>
/// <param name="links">The optional <see cref="ActivityLink"/> list to initialize the created Activity object with.</param>
/// <param name="startTime">The optional start timestamp to set on the created Activity object.</param>
/// <param name="name">The operation name of the Activity.</param>
/// <returns>The created <see cref="Activity"/> object or null if there is no any listener.</returns>
public Activity? StartActivity(ActivityKind kind, ActivityContext parentContext = default, IEnumerable<KeyValuePair<string, object?>>? tags = null, IEnumerable<ActivityLink>? links = null, DateTimeOffset startTime = default, [CallerMemberName] string name = "")
=> CreateActivity(name, kind, parentContext, null, tags, links, startTime);
private Activity? CreateActivity(string name, ActivityKind kind, ActivityContext context, string? parentId, IEnumerable<KeyValuePair<string, object?>>? tags,
IEnumerable<ActivityLink>? links, DateTimeOffset startTime, bool startIt = true, ActivityIdFormat idFormat = ActivityIdFormat.Unknown)
{
// _listeners can get assigned to null in Dispose.
SynchronizedList<ActivityListener>? listeners = _listeners;
if (listeners == null || listeners.Count == 0)
{
return null;
}
Activity? activity = null;
ActivityTagsCollection? samplerTags;
string? traceState;
ActivitySamplingResult samplingResult = ActivitySamplingResult.None;
if (parentId != null)
{
ActivityCreationOptions<string> aco = default;
ActivityCreationOptions<ActivityContext> acoContext = default;
aco = new ActivityCreationOptions<string>(this, name, parentId, kind, tags, links, idFormat);
if (aco.IdFormat == ActivityIdFormat.W3C)
{
// acoContext is used only in the Sample calls which called only when we have W3C Id format.
acoContext = new ActivityCreationOptions<ActivityContext>(this, name, aco.GetContext(), kind, tags, links, ActivityIdFormat.W3C);
}
listeners.EnumWithFunc((ActivityListener listener, ref ActivityCreationOptions<string> data, ref ActivitySamplingResult result, ref ActivityCreationOptions<ActivityContext> dataWithContext) => {
SampleActivity<string>? sampleUsingParentId = listener.SampleUsingParentId;
if (sampleUsingParentId != null)
{
ActivitySamplingResult sr = sampleUsingParentId(ref data);
dataWithContext.SetTraceState(data.TraceState); // Keep the trace state in sync between data and dataWithContext
if (sr > result)
{
result = sr;
}
}
else if (data.IdFormat == ActivityIdFormat.W3C)
{
// In case we have a parent Id and the listener not providing the SampleUsingParentId, we'll try to find out if the following conditions are true:
// - The listener is providing the Sample callback
// - Can convert the parent Id to a Context. ActivityCreationOptions.TraceId != default means parent id converted to a valid context.
// Then we can call the listener Sample callback with the constructed context.
SampleActivity<ActivityContext>? sample = listener.Sample;
if (sample != null)
{
ActivitySamplingResult sr = sample(ref dataWithContext);
data.SetTraceState(dataWithContext.TraceState); // Keep the trace state in sync between data and dataWithContext
if (sr > result)
{
result = sr;
}
}
}
}, ref aco, ref samplingResult, ref acoContext);
if (context == default)
{
if (aco.GetContext() != default)
{
context = aco.GetContext();
parentId = null;
}
else if (acoContext.GetContext() != default)
{
context = acoContext.GetContext();
parentId = null;
}
}
samplerTags = aco.GetSamplingTags();
ActivityTagsCollection? atc = acoContext.GetSamplingTags();
if (atc != null)
{
if (samplerTags == null)
{
samplerTags = atc;
}
else
{
foreach (KeyValuePair<string, object?> tag in atc)
{
samplerTags.Add(tag);
}
}
}
idFormat = aco.IdFormat;
traceState = aco.TraceState;
}
else
{
bool useCurrentActivityContext = context == default && Activity.Current != null;
var aco = new ActivityCreationOptions<ActivityContext>(this, name, useCurrentActivityContext ? Activity.Current!.Context : context, kind, tags, links, idFormat);
listeners.EnumWithFunc((ActivityListener listener, ref ActivityCreationOptions<ActivityContext> data, ref ActivitySamplingResult result, ref ActivityCreationOptions<ActivityContext> unused) => {
SampleActivity<ActivityContext>? sample = listener.Sample;
if (sample != null)
{
ActivitySamplingResult dr = sample(ref data);
if (dr > result)
{
result = dr;
}
}
}, ref aco, ref samplingResult, ref aco);
if (!useCurrentActivityContext)
{
// We use the context stored inside ActivityCreationOptions as it is possible the trace id get automatically generated during the sampling.
// We don't use the context stored inside ActivityCreationOptions only in case if we used Activity.Current context, the reason is we need to
// create the new child activity with Parent set to Activity.Current.
context = aco.GetContext();
}
samplerTags = aco.GetSamplingTags();
idFormat = aco.IdFormat;
traceState = aco.TraceState;
}
if (samplingResult != ActivitySamplingResult.None)
{
activity = Activity.Create(this, name, kind, parentId, context, tags, links, startTime, samplerTags, samplingResult, startIt, idFormat, traceState);
}
return activity;
}
/// <summary>
/// Dispose the ActivitySource object and remove the current instance from the global list. empty the listeners list too.
/// </summary>
public void Dispose()
{
_listeners = null;
s_activeSources.Remove(this);
}
/// <summary>
/// Add a listener to the <see cref="Activity"/> starting and stopping events.
/// </summary>
/// <param name="listener"> The <see cref="ActivityListener"/> object to use for listening to the <see cref="Activity"/> events.</param>
public static void AddActivityListener(ActivityListener listener!!)
{
if (s_allListeners.AddIfNotExist(listener))
{
s_activeSources.EnumWithAction((source, obj) => {
var shouldListenTo = ((ActivityListener)obj).ShouldListenTo;
if (shouldListenTo != null && shouldListenTo(source))
{
source.AddListener((ActivityListener)obj);
}
}, listener);
}
}
internal delegate void Function<T, TParent>(T item, ref ActivityCreationOptions<TParent> data, ref ActivitySamplingResult samplingResult, ref ActivityCreationOptions<ActivityContext> dataWithContext);
internal void AddListener(ActivityListener listener)
{
if (_listeners == null)
{
Interlocked.CompareExchange(ref _listeners, new SynchronizedList<ActivityListener>(), null);
}
_listeners.AddIfNotExist(listener);
}
internal static void DetachListener(ActivityListener listener)
{
s_allListeners.Remove(listener);
s_activeSources.EnumWithAction((source, obj) => source._listeners?.Remove((ActivityListener) obj), listener);
}
internal void NotifyActivityStart(Activity activity)
{
Debug.Assert(activity != null);
// _listeners can get assigned to null in Dispose.
SynchronizedList<ActivityListener>? listeners = _listeners;
if (listeners != null && listeners.Count > 0)
{
listeners.EnumWithAction((listener, obj) => listener.ActivityStarted?.Invoke((Activity) obj), activity);
}
}
internal void NotifyActivityStop(Activity activity)
{
Debug.Assert(activity != null);
// _listeners can get assigned to null in Dispose.
SynchronizedList<ActivityListener>? listeners = _listeners;
if (listeners != null && listeners.Count > 0)
{
listeners.EnumWithAction((listener, obj) => listener.ActivityStopped?.Invoke((Activity) obj), activity);
}
}
}
// SynchronizedList<T> is a helper collection which ensure thread safety on the collection
// and allow enumerating the collection items and execute some action on the enumerated item and can detect any change in the collection
// during the enumeration which force restarting the enumeration again.
// Caution: We can have the action executed on the same item more than once which is ok in our scenarios.
internal sealed class SynchronizedList<T>
{
private readonly List<T> _list;
private uint _version;
public SynchronizedList() => _list = new List<T>();
public void Add(T item)
{
lock (_list)
{
_list.Add(item);
_version++;
}
}
public bool AddIfNotExist(T item)
{
lock (_list)
{
if (!_list.Contains(item))
{
_list.Add(item);
_version++;
return true;
}
return false;
}
}
public bool Remove(T item)
{
lock (_list)
{
if (_list.Remove(item))
{
_version++;
return true;
}
return false;
}
}
public int Count => _list.Count;
public void EnumWithFunc<TParent>(ActivitySource.Function<T, TParent> func, ref ActivityCreationOptions<TParent> data, ref ActivitySamplingResult samplingResult, ref ActivityCreationOptions<ActivityContext> dataWithContext)
{
uint version = _version;
int index = 0;
while (index < _list.Count)
{
T item;
lock (_list)
{
if (version != _version)
{
version = _version;
index = 0;
continue;
}
item = _list[index];
index++;
}
// Important to call the func outside the lock.
// This is the whole point we are having this wrapper class.
func(item, ref data, ref samplingResult, ref dataWithContext);
}
}
public void EnumWithAction(Action<T, object> action, object arg)
{
uint version = _version;
int index = 0;
while (index < _list.Count)
{
T item;
lock (_list)
{
if (version != _version)
{
version = _version;
index = 0;
continue;
}
item = _list[index];
index++;
}
// Important to call the action outside the lock.
// This is the whole point we are having this wrapper class.
action(item, arg);
}
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Threading;
using System.Collections.Generic;
using System.Runtime.CompilerServices;
namespace System.Diagnostics
{
public sealed class ActivitySource : IDisposable
{
private static readonly SynchronizedList<ActivitySource> s_activeSources = new SynchronizedList<ActivitySource>();
private static readonly SynchronizedList<ActivityListener> s_allListeners = new SynchronizedList<ActivityListener>();
private SynchronizedList<ActivityListener>? _listeners;
/// <summary>
/// Construct an ActivitySource object with the input name
/// </summary>
/// <param name="name">The name of the ActivitySource object</param>
/// <param name="version">The version of the component publishing the tracing info.</param>
public ActivitySource(string name!!, string? version = "")
{
Name = name;
Version = version;
s_activeSources.Add(this);
if (s_allListeners.Count > 0)
{
s_allListeners.EnumWithAction((listener, source) =>
{
Func<ActivitySource, bool>? shouldListenTo = listener.ShouldListenTo;
if (shouldListenTo != null)
{
var activitySource = (ActivitySource)source;
if (shouldListenTo(activitySource))
{
activitySource.AddListener(listener);
}
}
}, this);
}
GC.KeepAlive(DiagnosticSourceEventSource.Log);
}
/// <summary>
/// Returns the ActivitySource name.
/// </summary>
public string Name { get; }
/// <summary>
/// Returns the ActivitySource version.
/// </summary>
public string? Version { get; }
/// <summary>
/// Check if there is any listeners for this ActivitySource.
/// This property can be helpful to tell if there is no listener, then no need to create Activity object
/// and avoid creating the objects needed to create Activity (e.g. ActivityContext)
/// Example of that is http scenario which can avoid reading the context data from the wire.
/// </summary>
public bool HasListeners()
{
SynchronizedList<ActivityListener>? listeners = _listeners;
return listeners != null && listeners.Count > 0;
}
/// <summary>
/// Creates a new <see cref="Activity"/> object if there is any listener to the Activity, returns null otherwise.
/// </summary>
/// <param name="name">The operation name of the Activity</param>
/// <param name="kind">The <see cref="ActivityKind"/></param>
/// <returns>The created <see cref="Activity"/> object or null if there is no any event listener.</returns>
/// <remarks>
/// If the Activity object is created, it will not start automatically. Callers need to call <see cref="Activity.Start()"/> to start it.
/// </remarks>
public Activity? CreateActivity(string name, ActivityKind kind)
=> CreateActivity(name, kind, default, null, null, null, default, startIt: false);
/// <summary>
/// Creates a new <see cref="Activity"/> object if there is any listener to the Activity, returns null otherwise.
/// If the Activity object is created, it will not automatically start. Callers will need to call <see cref="Activity.Start()"/> to start it.
/// </summary>
/// <param name="name">The operation name of the Activity.</param>
/// <param name="kind">The <see cref="ActivityKind"/></param>
/// <param name="parentContext">The parent <see cref="ActivityContext"/> object to initialize the created Activity object with.</param>
/// <param name="tags">The optional tags list to initialize the created Activity object with.</param>
/// <param name="links">The optional <see cref="ActivityLink"/> list to initialize the created Activity object with.</param>
/// <param name="idFormat">The default Id format to use.</param>
/// <returns>The created <see cref="Activity"/> object or null if there is no any listener.</returns>
/// <remarks>
/// If the Activity object is created, it will not start automatically. Callers need to call <see cref="Activity.Start()"/> to start it.
/// </remarks>
public Activity? CreateActivity(string name, ActivityKind kind, ActivityContext parentContext, IEnumerable<KeyValuePair<string, object?>>? tags = null, IEnumerable<ActivityLink>? links = null, ActivityIdFormat idFormat = ActivityIdFormat.Unknown)
=> CreateActivity(name, kind, parentContext, null, tags, links, default, startIt: false, idFormat);
/// <summary>
/// Creates a new <see cref="Activity"/> object if there is any listener to the Activity, returns null otherwise.
/// </summary>
/// <param name="name">The operation name of the Activity.</param>
/// <param name="kind">The <see cref="ActivityKind"/></param>
/// <param name="parentId">The parent Id to initialize the created Activity object with.</param>
/// <param name="tags">The optional tags list to initialize the created Activity object with.</param>
/// <param name="links">The optional <see cref="ActivityLink"/> list to initialize the created Activity object with.</param>
/// <param name="idFormat">The default Id format to use.</param>
/// <returns>The created <see cref="Activity"/> object or null if there is no any listener.</returns>
/// <remarks>
/// If the Activity object is created, it will not start automatically. Callers need to call <see cref="Activity.Start()"/> to start it.
/// </remarks>
public Activity? CreateActivity(string name, ActivityKind kind, string parentId, IEnumerable<KeyValuePair<string, object?>>? tags = null, IEnumerable<ActivityLink>? links = null, ActivityIdFormat idFormat = ActivityIdFormat.Unknown)
=> CreateActivity(name, kind, default, parentId, tags, links, default, startIt: false, idFormat);
/// <summary>
/// Creates and starts a new <see cref="Activity"/> object if there is any listener to the Activity, returns null otherwise.
/// </summary>
/// <param name="name">The operation name of the Activity</param>
/// <param name="kind">The <see cref="ActivityKind"/></param>
/// <returns>The created <see cref="Activity"/> object or null if there is no any event listener.</returns>
public Activity? StartActivity([CallerMemberName] string name = "", ActivityKind kind = ActivityKind.Internal)
=> CreateActivity(name, kind, default, null, null, null, default);
/// <summary>
/// Creates and starts a new <see cref="Activity"/> object if there is any listener to the Activity events, returns null otherwise.
/// </summary>
/// <param name="name">The operation name of the Activity.</param>
/// <param name="kind">The <see cref="ActivityKind"/></param>
/// <param name="parentContext">The parent <see cref="ActivityContext"/> object to initialize the created Activity object with.</param>
/// <param name="tags">The optional tags list to initialize the created Activity object with.</param>
/// <param name="links">The optional <see cref="ActivityLink"/> list to initialize the created Activity object with.</param>
/// <param name="startTime">The optional start timestamp to set on the created Activity object.</param>
/// <returns>The created <see cref="Activity"/> object or null if there is no any listener.</returns>
public Activity? StartActivity(string name, ActivityKind kind, ActivityContext parentContext, IEnumerable<KeyValuePair<string, object?>>? tags = null, IEnumerable<ActivityLink>? links = null, DateTimeOffset startTime = default)
=> CreateActivity(name, kind, parentContext, null, tags, links, startTime);
/// <summary>
/// Creates and starts a new <see cref="Activity"/> object if there is any listener to the Activity events, returns null otherwise.
/// </summary>
/// <param name="name">The operation name of the Activity.</param>
/// <param name="kind">The <see cref="ActivityKind"/></param>
/// <param name="parentId">The parent Id to initialize the created Activity object with.</param>
/// <param name="tags">The optional tags list to initialize the created Activity object with.</param>
/// <param name="links">The optional <see cref="ActivityLink"/> list to initialize the created Activity object with.</param>
/// <param name="startTime">The optional start timestamp to set on the created Activity object.</param>
/// <returns>The created <see cref="Activity"/> object or null if there is no any listener.</returns>
public Activity? StartActivity(string name, ActivityKind kind, string parentId, IEnumerable<KeyValuePair<string, object?>>? tags = null, IEnumerable<ActivityLink>? links = null, DateTimeOffset startTime = default)
=> CreateActivity(name, kind, default, parentId, tags, links, startTime);
/// <summary>
/// Creates and starts a new <see cref="Activity"/> object if there is any listener to the Activity events, returns null otherwise.
/// </summary>
/// <param name="kind">The <see cref="ActivityKind"/></param>
/// <param name="parentContext">The parent <see cref="ActivityContext"/> object to initialize the created Activity object with.</param>
/// <param name="tags">The optional tags list to initialize the created Activity object with.</param>
/// <param name="links">The optional <see cref="ActivityLink"/> list to initialize the created Activity object with.</param>
/// <param name="startTime">The optional start timestamp to set on the created Activity object.</param>
/// <param name="name">The operation name of the Activity.</param>
/// <returns>The created <see cref="Activity"/> object or null if there is no any listener.</returns>
public Activity? StartActivity(ActivityKind kind, ActivityContext parentContext = default, IEnumerable<KeyValuePair<string, object?>>? tags = null, IEnumerable<ActivityLink>? links = null, DateTimeOffset startTime = default, [CallerMemberName] string name = "")
=> CreateActivity(name, kind, parentContext, null, tags, links, startTime);
private Activity? CreateActivity(string name, ActivityKind kind, ActivityContext context, string? parentId, IEnumerable<KeyValuePair<string, object?>>? tags,
IEnumerable<ActivityLink>? links, DateTimeOffset startTime, bool startIt = true, ActivityIdFormat idFormat = ActivityIdFormat.Unknown)
{
// _listeners can get assigned to null in Dispose.
SynchronizedList<ActivityListener>? listeners = _listeners;
if (listeners == null || listeners.Count == 0)
{
return null;
}
Activity? activity = null;
ActivityTagsCollection? samplerTags;
string? traceState;
ActivitySamplingResult samplingResult = ActivitySamplingResult.None;
if (parentId != null)
{
ActivityCreationOptions<string> aco = default;
ActivityCreationOptions<ActivityContext> acoContext = default;
aco = new ActivityCreationOptions<string>(this, name, parentId, kind, tags, links, idFormat);
if (aco.IdFormat == ActivityIdFormat.W3C)
{
// acoContext is used only in the Sample calls which called only when we have W3C Id format.
acoContext = new ActivityCreationOptions<ActivityContext>(this, name, aco.GetContext(), kind, tags, links, ActivityIdFormat.W3C);
}
listeners.EnumWithFunc((ActivityListener listener, ref ActivityCreationOptions<string> data, ref ActivitySamplingResult result, ref ActivityCreationOptions<ActivityContext> dataWithContext) => {
SampleActivity<string>? sampleUsingParentId = listener.SampleUsingParentId;
if (sampleUsingParentId != null)
{
ActivitySamplingResult sr = sampleUsingParentId(ref data);
dataWithContext.SetTraceState(data.TraceState); // Keep the trace state in sync between data and dataWithContext
if (sr > result)
{
result = sr;
}
}
else if (data.IdFormat == ActivityIdFormat.W3C)
{
// In case we have a parent Id and the listener not providing the SampleUsingParentId, we'll try to find out if the following conditions are true:
// - The listener is providing the Sample callback
// - Can convert the parent Id to a Context. ActivityCreationOptions.TraceId != default means parent id converted to a valid context.
// Then we can call the listener Sample callback with the constructed context.
SampleActivity<ActivityContext>? sample = listener.Sample;
if (sample != null)
{
ActivitySamplingResult sr = sample(ref dataWithContext);
data.SetTraceState(dataWithContext.TraceState); // Keep the trace state in sync between data and dataWithContext
if (sr > result)
{
result = sr;
}
}
}
}, ref aco, ref samplingResult, ref acoContext);
if (context == default)
{
if (aco.GetContext() != default)
{
context = aco.GetContext();
parentId = null;
}
else if (acoContext.GetContext() != default)
{
context = acoContext.GetContext();
parentId = null;
}
}
samplerTags = aco.GetSamplingTags();
ActivityTagsCollection? atc = acoContext.GetSamplingTags();
if (atc != null)
{
if (samplerTags == null)
{
samplerTags = atc;
}
else
{
foreach (KeyValuePair<string, object?> tag in atc)
{
samplerTags.Add(tag);
}
}
}
idFormat = aco.IdFormat;
traceState = aco.TraceState;
}
else
{
bool useCurrentActivityContext = context == default && Activity.Current != null;
var aco = new ActivityCreationOptions<ActivityContext>(this, name, useCurrentActivityContext ? Activity.Current!.Context : context, kind, tags, links, idFormat);
listeners.EnumWithFunc((ActivityListener listener, ref ActivityCreationOptions<ActivityContext> data, ref ActivitySamplingResult result, ref ActivityCreationOptions<ActivityContext> unused) => {
SampleActivity<ActivityContext>? sample = listener.Sample;
if (sample != null)
{
ActivitySamplingResult dr = sample(ref data);
if (dr > result)
{
result = dr;
}
}
}, ref aco, ref samplingResult, ref aco);
if (!useCurrentActivityContext)
{
// We use the context stored inside ActivityCreationOptions as it is possible the trace id get automatically generated during the sampling.
// We don't use the context stored inside ActivityCreationOptions only in case if we used Activity.Current context, the reason is we need to
// create the new child activity with Parent set to Activity.Current.
context = aco.GetContext();
}
samplerTags = aco.GetSamplingTags();
idFormat = aco.IdFormat;
traceState = aco.TraceState;
}
if (samplingResult != ActivitySamplingResult.None)
{
activity = Activity.Create(this, name, kind, parentId, context, tags, links, startTime, samplerTags, samplingResult, startIt, idFormat, traceState);
}
return activity;
}
/// <summary>
/// Dispose the ActivitySource object and remove the current instance from the global list. empty the listeners list too.
/// </summary>
public void Dispose()
{
_listeners = null;
s_activeSources.Remove(this);
}
/// <summary>
/// Add a listener to the <see cref="Activity"/> starting and stopping events.
/// </summary>
/// <param name="listener"> The <see cref="ActivityListener"/> object to use for listening to the <see cref="Activity"/> events.</param>
public static void AddActivityListener(ActivityListener listener!!)
{
if (s_allListeners.AddIfNotExist(listener))
{
s_activeSources.EnumWithAction((source, obj) => {
var shouldListenTo = ((ActivityListener)obj).ShouldListenTo;
if (shouldListenTo != null && shouldListenTo(source))
{
source.AddListener((ActivityListener)obj);
}
}, listener);
}
}
internal delegate void Function<T, TParent>(T item, ref ActivityCreationOptions<TParent> data, ref ActivitySamplingResult samplingResult, ref ActivityCreationOptions<ActivityContext> dataWithContext);
internal void AddListener(ActivityListener listener)
{
if (_listeners == null)
{
Interlocked.CompareExchange(ref _listeners, new SynchronizedList<ActivityListener>(), null);
}
_listeners.AddIfNotExist(listener);
}
internal static void DetachListener(ActivityListener listener)
{
s_allListeners.Remove(listener);
s_activeSources.EnumWithAction((source, obj) => source._listeners?.Remove((ActivityListener) obj), listener);
}
internal void NotifyActivityStart(Activity activity)
{
Debug.Assert(activity != null);
// _listeners can get assigned to null in Dispose.
SynchronizedList<ActivityListener>? listeners = _listeners;
if (listeners != null && listeners.Count > 0)
{
listeners.EnumWithAction((listener, obj) => listener.ActivityStarted?.Invoke((Activity) obj), activity);
}
}
internal void NotifyActivityStop(Activity activity)
{
Debug.Assert(activity != null);
// _listeners can get assigned to null in Dispose.
SynchronizedList<ActivityListener>? listeners = _listeners;
if (listeners != null && listeners.Count > 0)
{
listeners.EnumWithAction((listener, obj) => listener.ActivityStopped?.Invoke((Activity) obj), activity);
}
}
}
// SynchronizedList<T> is a helper collection which ensure thread safety on the collection
// and allow enumerating the collection items and execute some action on the enumerated item and can detect any change in the collection
// during the enumeration which force restarting the enumeration again.
// Caution: We can have the action executed on the same item more than once which is ok in our scenarios.
internal sealed class SynchronizedList<T>
{
private readonly List<T> _list;
private uint _version;
public SynchronizedList() => _list = new List<T>();
public void Add(T item)
{
lock (_list)
{
_list.Add(item);
_version++;
}
}
public bool AddIfNotExist(T item)
{
lock (_list)
{
if (!_list.Contains(item))
{
_list.Add(item);
_version++;
return true;
}
return false;
}
}
public bool Remove(T item)
{
lock (_list)
{
if (_list.Remove(item))
{
_version++;
return true;
}
return false;
}
}
public int Count => _list.Count;
public void EnumWithFunc<TParent>(ActivitySource.Function<T, TParent> func, ref ActivityCreationOptions<TParent> data, ref ActivitySamplingResult samplingResult, ref ActivityCreationOptions<ActivityContext> dataWithContext)
{
uint version = _version;
int index = 0;
while (index < _list.Count)
{
T item;
lock (_list)
{
if (version != _version)
{
version = _version;
index = 0;
continue;
}
item = _list[index];
index++;
}
// Important to call the func outside the lock.
// This is the whole point we are having this wrapper class.
func(item, ref data, ref samplingResult, ref dataWithContext);
}
}
public void EnumWithAction(Action<T, object> action, object arg)
{
uint version = _version;
int index = 0;
while (index < _list.Count)
{
T item;
lock (_list)
{
if (version != _version)
{
version = _version;
index = 0;
continue;
}
item = _list[index];
index++;
}
// Important to call the action outside the lock.
// This is the whole point we are having this wrapper class.
action(item, arg);
}
}
}
}
| -1 |
dotnet/runtime | 65,926 | FastMod for EEHashTable (Faster virtual generics) | Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| EgorBo | "2022-02-27T11:34:01Z" | "2022-03-01T19:43:37Z" | a5a7836f8193414fe64232d5c386b6402eb70e19 | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | FastMod for EEHashTable (Faster virtual generics). Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| ./src/libraries/System.Private.Xml/tests/Xslt/TestFiles/TestData/XsltScenarios/EXslt/dynamic-evaluate.xsl | <?xml version="1.0" encoding="UTF-8" ?>
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:dyn2="http://gotdotnet.com/exslt/dynamic" exclude-result-prefixes="dyn2">
<xsl:output indent="yes" omit-xml-declaration="yes"/>
<xsl:template match="data">
<out>
<test1>
<xsl:value-of select="dyn2:evaluate(., concat('2', '+', '2'))"/>
</test1>
<test2>
<xsl:copy-of select="dyn2:evaluate(., /data/path)"/>
</test2>
<test3>
<xsl:value-of select="dyn2:evaluate(., /data/path)/@id"/>
</test3>
<test4>
<xsl:copy-of select="dyn2:evaluate(., /data/path2)"/>
</test4>
<test5>
<xsl:value-of select="dyn2:evaluate(/no/such/node, /data/path2)"/>
</test5>
<test6>
<xsl:copy-of select="dyn2:evaluate(., '')"/>
</test6>
<test7>
<xsl:copy-of select="dyn2:evaluate(., /data/path3)"/>
</test7>
<test8>
<xsl:copy-of select="dyn2:evaluate(., 'dyn2:evaluate(., str:concat(path4|path5|path6))')"/>
</test8>
<test9>
<xsl:copy-of select="dyn2:evaluate(., 'orders/order[last()]')"/>
</test9>
<test10>
<xsl:variable name="namespaces">xmlns:foo="http://orders.com" xmlns:bar='http://bar.com/'</xsl:variable>
<xsl:copy-of select="dyn2:evaluate(., /data/path7, $namespaces)"/>
</test10>
<test11>
<xsl:copy-of select="dyn2:evaluate(., /data/path7, '')"/>
</test11>
<test12>
<xsl:variable name="namespaces">
xmlns:bar='http://bar.com/"
xmlns:foo = "http://orders.com"
</xsl:variable>
<xsl:copy-of select="dyn2:evaluate(., /data/path7, $namespaces)"/>
</test12>
<test13>
<xsl:copy-of select="dyn2:evaluate(., 'current()')"/>
</test13>
</out>
</xsl:template>
</xsl:stylesheet>
| <?xml version="1.0" encoding="UTF-8" ?>
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:dyn2="http://gotdotnet.com/exslt/dynamic" exclude-result-prefixes="dyn2">
<xsl:output indent="yes" omit-xml-declaration="yes"/>
<xsl:template match="data">
<out>
<test1>
<xsl:value-of select="dyn2:evaluate(., concat('2', '+', '2'))"/>
</test1>
<test2>
<xsl:copy-of select="dyn2:evaluate(., /data/path)"/>
</test2>
<test3>
<xsl:value-of select="dyn2:evaluate(., /data/path)/@id"/>
</test3>
<test4>
<xsl:copy-of select="dyn2:evaluate(., /data/path2)"/>
</test4>
<test5>
<xsl:value-of select="dyn2:evaluate(/no/such/node, /data/path2)"/>
</test5>
<test6>
<xsl:copy-of select="dyn2:evaluate(., '')"/>
</test6>
<test7>
<xsl:copy-of select="dyn2:evaluate(., /data/path3)"/>
</test7>
<test8>
<xsl:copy-of select="dyn2:evaluate(., 'dyn2:evaluate(., str:concat(path4|path5|path6))')"/>
</test8>
<test9>
<xsl:copy-of select="dyn2:evaluate(., 'orders/order[last()]')"/>
</test9>
<test10>
<xsl:variable name="namespaces">xmlns:foo="http://orders.com" xmlns:bar='http://bar.com/'</xsl:variable>
<xsl:copy-of select="dyn2:evaluate(., /data/path7, $namespaces)"/>
</test10>
<test11>
<xsl:copy-of select="dyn2:evaluate(., /data/path7, '')"/>
</test11>
<test12>
<xsl:variable name="namespaces">
xmlns:bar='http://bar.com/"
xmlns:foo = "http://orders.com"
</xsl:variable>
<xsl:copy-of select="dyn2:evaluate(., /data/path7, $namespaces)"/>
</test12>
<test13>
<xsl:copy-of select="dyn2:evaluate(., 'current()')"/>
</test13>
</out>
</xsl:template>
</xsl:stylesheet>
| -1 |
dotnet/runtime | 65,926 | FastMod for EEHashTable (Faster virtual generics) | Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| EgorBo | "2022-02-27T11:34:01Z" | "2022-03-01T19:43:37Z" | a5a7836f8193414fe64232d5c386b6402eb70e19 | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | FastMod for EEHashTable (Faster virtual generics). Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| ./src/libraries/System.Runtime/tests/System/PlatformNotSupportedExceptionTests.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using Xunit;
namespace System.Tests
{
public static class PlatformNotSupportedExceptionTests
{
private const int COR_E_PLATFORMNOTSUPPORTED = unchecked((int)0x80131539);
[Fact]
public static void Ctor_Empty()
{
var exception = new PlatformNotSupportedException();
ExceptionHelpers.ValidateExceptionProperties(exception, hResult: COR_E_PLATFORMNOTSUPPORTED, validateMessage: false);
}
[Fact]
public static void Ctor_String()
{
string message = "platform not supported";
var exception = new PlatformNotSupportedException(message);
ExceptionHelpers.ValidateExceptionProperties(exception, hResult: COR_E_PLATFORMNOTSUPPORTED, message: message);
}
[Fact]
public static void Ctor_String_Exception()
{
string message = "platform not supported";
var innerException = new Exception("Inner exception");
var exception = new PlatformNotSupportedException(message, innerException);
ExceptionHelpers.ValidateExceptionProperties(exception, hResult: COR_E_PLATFORMNOTSUPPORTED, innerException: innerException, message: message);
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using Xunit;
namespace System.Tests
{
public static class PlatformNotSupportedExceptionTests
{
private const int COR_E_PLATFORMNOTSUPPORTED = unchecked((int)0x80131539);
[Fact]
public static void Ctor_Empty()
{
var exception = new PlatformNotSupportedException();
ExceptionHelpers.ValidateExceptionProperties(exception, hResult: COR_E_PLATFORMNOTSUPPORTED, validateMessage: false);
}
[Fact]
public static void Ctor_String()
{
string message = "platform not supported";
var exception = new PlatformNotSupportedException(message);
ExceptionHelpers.ValidateExceptionProperties(exception, hResult: COR_E_PLATFORMNOTSUPPORTED, message: message);
}
[Fact]
public static void Ctor_String_Exception()
{
string message = "platform not supported";
var innerException = new Exception("Inner exception");
var exception = new PlatformNotSupportedException(message, innerException);
ExceptionHelpers.ValidateExceptionProperties(exception, hResult: COR_E_PLATFORMNOTSUPPORTED, innerException: innerException, message: message);
}
}
}
| -1 |
dotnet/runtime | 65,926 | FastMod for EEHashTable (Faster virtual generics) | Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| EgorBo | "2022-02-27T11:34:01Z" | "2022-03-01T19:43:37Z" | a5a7836f8193414fe64232d5c386b6402eb70e19 | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | FastMod for EEHashTable (Faster virtual generics). Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| ./src/libraries/System.Text.RegularExpressions/src/Resources/Strings.resx | <?xml version="1.0" encoding="utf-8"?>
<root>
<!--
Microsoft ResX Schema
Version 2.0
The primary goals of this format is to allow a simple XML format
that is mostly human readable. The generation and parsing of the
various data types are done through the TypeConverter classes
associated with the data types.
Example:
... ado.net/XML headers & schema ...
<resheader name="resmimetype">text/microsoft-resx</resheader>
<resheader name="version">2.0</resheader>
<resheader name="reader">System.Resources.ResXResourceReader, System.Windows.Forms, ...</resheader>
<resheader name="writer">System.Resources.ResXResourceWriter, System.Windows.Forms, ...</resheader>
<data name="Name1"><value>this is my long string</value><comment>this is a comment</comment></data>
<data name="Color1" type="System.Drawing.Color, System.Drawing">Blue</data>
<data name="Bitmap1" mimetype="application/x-microsoft.net.object.binary.base64">
<value>[base64 mime encoded serialized .NET Framework object]</value>
</data>
<data name="Icon1" type="System.Drawing.Icon, System.Drawing" mimetype="application/x-microsoft.net.object.bytearray.base64">
<value>[base64 mime encoded string representing a byte array form of the .NET Framework object]</value>
<comment>This is a comment</comment>
</data>
There are any number of "resheader" rows that contain simple
name/value pairs.
Each data row contains a name, and value. The row also contains a
type or mimetype. Type corresponds to a .NET class that support
text/value conversion through the TypeConverter architecture.
Classes that don't support this are serialized and stored with the
mimetype set.
The mimetype is used for serialized objects, and tells the
ResXResourceReader how to depersist the object. This is currently not
extensible. For a given mimetype the value must be set accordingly:
Note - application/x-microsoft.net.object.binary.base64 is the format
that the ResXResourceWriter will generate, however the reader can
read any of the formats listed below.
mimetype: application/x-microsoft.net.object.binary.base64
value : The object must be serialized with
: System.Runtime.Serialization.Formatters.Binary.BinaryFormatter
: and then encoded with base64 encoding.
mimetype: application/x-microsoft.net.object.soap.base64
value : The object must be serialized with
: System.Runtime.Serialization.Formatters.Soap.SoapFormatter
: and then encoded with base64 encoding.
mimetype: application/x-microsoft.net.object.bytearray.base64
value : The object must be serialized into a byte array
: using a System.ComponentModel.TypeConverter
: and then encoded with base64 encoding.
-->
<xsd:schema id="root" xmlns="" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:msdata="urn:schemas-microsoft-com:xml-msdata">
<xsd:import namespace="http://www.w3.org/XML/1998/namespace" />
<xsd:element name="root" msdata:IsDataSet="true">
<xsd:complexType>
<xsd:choice maxOccurs="unbounded">
<xsd:element name="metadata">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="value" type="xsd:string" minOccurs="0" />
</xsd:sequence>
<xsd:attribute name="name" use="required" type="xsd:string" />
<xsd:attribute name="type" type="xsd:string" />
<xsd:attribute name="mimetype" type="xsd:string" />
<xsd:attribute ref="xml:space" />
</xsd:complexType>
</xsd:element>
<xsd:element name="assembly">
<xsd:complexType>
<xsd:attribute name="alias" type="xsd:string" />
<xsd:attribute name="name" type="xsd:string" />
</xsd:complexType>
</xsd:element>
<xsd:element name="data">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="value" type="xsd:string" minOccurs="0" msdata:Ordinal="1" />
<xsd:element name="comment" type="xsd:string" minOccurs="0" msdata:Ordinal="2" />
</xsd:sequence>
<xsd:attribute name="name" type="xsd:string" use="required" msdata:Ordinal="1" />
<xsd:attribute name="type" type="xsd:string" msdata:Ordinal="3" />
<xsd:attribute name="mimetype" type="xsd:string" msdata:Ordinal="4" />
<xsd:attribute ref="xml:space" />
</xsd:complexType>
</xsd:element>
<xsd:element name="resheader">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="value" type="xsd:string" minOccurs="0" msdata:Ordinal="1" />
</xsd:sequence>
<xsd:attribute name="name" type="xsd:string" use="required" />
</xsd:complexType>
</xsd:element>
</xsd:choice>
</xsd:complexType>
</xsd:element>
</xsd:schema>
<resheader name="resmimetype">
<value>text/microsoft-resx</value>
</resheader>
<resheader name="version">
<value>2.0</value>
</resheader>
<resheader name="reader">
<value>System.Resources.ResXResourceReader, System.Windows.Forms, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089</value>
</resheader>
<resheader name="writer">
<value>System.Resources.ResXResourceWriter, System.Windows.Forms, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089</value>
</resheader>
<data name="Generic" xml:space="preserve">
<value>Regular expression parser error '{0}' at offset {1}.</value>
</data>
<data name="AlternationHasNamedCapture" xml:space="preserve">
<value>Alternation conditions do not capture and cannot be named.</value>
</data>
<data name="AlternationHasComment" xml:space="preserve">
<value>Alternation conditions cannot be comments.</value>
</data>
<data name="Arg_ArrayPlusOffTooSmall" xml:space="preserve">
<value>Destination array is not long enough to copy all the items in the collection. Check array index and length.</value>
</data>
<data name="ShorthandClassInCharacterRange" xml:space="preserve">
<value>Cannot include class \\{0} in character range.</value>
</data>
<data name="ShorthandClassInCharacterRangeNoPlaceholder" xml:space="preserve">
<value>Cannot include class in character range.</value>
</data>
<data name="BeginIndexNotNegative" xml:space="preserve">
<value>Start index cannot be less than 0 or greater than input length.</value>
</data>
<data name="QuantifierOrCaptureGroupOutOfRange" xml:space="preserve">
<value>Capture group numbers must be less than or equal to Int32.MaxValue.</value>
</data>
<data name="CaptureGroupOfZero" xml:space="preserve">
<value>Capture number cannot be zero.</value>
</data>
<data name="CountTooSmall" xml:space="preserve">
<value>Count cannot be less than -1.</value>
</data>
<data name="EnumNotStarted" xml:space="preserve">
<value>Enumeration has either not started or has already finished.</value>
</data>
<data name="AlternationHasMalformedCondition" xml:space="preserve">
<value>Illegal conditional (?(...)) expression.</value>
</data>
<data name="IllegalDefaultRegexMatchTimeoutInAppDomain" xml:space="preserve">
<value>AppDomain data '{0}' contains the invalid value or object '{1}' for specifying a default matching timeout for System.Text.RegularExpressions.Regex.</value>
</data>
<data name="UnescapedEndingBackslash" xml:space="preserve">
<value>Illegal \\ at end of pattern.</value>
</data>
<data name="ReversedQuantifierRange" xml:space="preserve">
<value>Illegal {x,y} with x > y.</value>
</data>
<data name="InvalidUnicodePropertyEscape" xml:space="preserve">
<value>Incomplete \\p{X} character escape.</value>
</data>
<data name="InternalError_ScanRegex" xml:space="preserve">
<value>Internal error in ScanRegex.</value>
<comment>{Locked="ScanRegex"}</comment>
</data>
<data name="CaptureGroupNameInvalid" xml:space="preserve">
<value>Invalid group name: Group names must begin with a word character.</value>
</data>
<data name="InvalidEmptyArgument" xml:space="preserve">
<value>Argument {0} cannot be zero-length.</value>
</data>
<data name="LengthNotNegative" xml:space="preserve">
<value>Length cannot be less than 0 or exceed input length.</value>
</data>
<data name="MalformedNamedReference" xml:space="preserve">
<value>Malformed \\k<...> named back reference.</value>
</data>
<data name="AlternationHasMalformedReference" xml:space="preserve">
<value>(?({0}) ) malformed.</value>
</data>
<data name="AlternationHasMalformedReferenceNoPlaceholder" xml:space="preserve">
<value>Alternation has malformed reference.</value>
</data>
<data name="MalformedUnicodePropertyEscape" xml:space="preserve">
<value>Malformed \\p{X} character escape.</value>
</data>
<data name="MakeException" xml:space="preserve">
<value>Invalid pattern '{0}' at offset {1}. {2}</value>
</data>
<data name="MissingControlCharacter" xml:space="preserve">
<value>Missing control character.</value>
</data>
<data name="NestedQuantifiersNotParenthesized" xml:space="preserve">
<value>Nested quantifier '{0}'.</value>
</data>
<data name="NestedQuantifiersNotParenthesizedNoPlaceholder" xml:space="preserve">
<value>Nested quantifier no parenthesized.</value>
</data>
<data name="NoResultOnFailed" xml:space="preserve">
<value>Result cannot be called on a failed Match.</value>
</data>
<data name="InsufficientClosingParentheses" xml:space="preserve">
<value>Not enough )'s.</value>
</data>
<data name="NotSupported_ReadOnlyCollection" xml:space="preserve">
<value>Collection is read-only.</value>
</data>
<data name="PlatformNotSupported_CompileToAssembly" xml:space="preserve">
<value>This platform does not support writing compiled regular expressions to an assembly. Use RegexGeneratorAttribute with the regular expression source generator instead.</value>
</data>
<data name="QuantifierAfterNothing" xml:space="preserve">
<value>Quantifier {x,y} following nothing.</value>
</data>
<data name="RegexMatchTimeoutException_Occurred" xml:space="preserve">
<value>The RegEx engine has timed out while trying to match a pattern to an input string. This can occur for many reasons, including very large inputs or excessive backtracking caused by nested quantifiers, back-references and other factors.</value>
</data>
<data name="ReplacementError" xml:space="preserve">
<value>Replacement pattern error.</value>
</data>
<data name="ReversedCharacterRange" xml:space="preserve">
<value>[x-y] range in reverse order.</value>
</data>
<data name="ExclusionGroupNotLast" xml:space="preserve">
<value>A subtraction must be the last element in a character class.</value>
</data>
<data name="InsufficientOrInvalidHexDigits" xml:space="preserve">
<value>Insufficient hexadecimal digits.</value>
</data>
<data name="AlternationHasTooManyConditions" xml:space="preserve">
<value>Too many | in (?()|).</value>
</data>
<data name="InsufficientOpeningParentheses" xml:space="preserve">
<value>Too many )'s.</value>
</data>
<data name="UndefinedNumberedReference" xml:space="preserve">
<value>Reference to undefined group number {0}.</value>
</data>
<data name="UndefinedNumberedReferenceNoPlaceholder" xml:space="preserve">
<value>Reference to undefined group number.</value>
</data>
<data name="UndefinedNamedReference" xml:space="preserve">
<value>Reference to undefined group name '{0}'.</value>
</data>
<data name="UndefinedNamedReferenceNoPlaceholder" xml:space="preserve">
<value>Reference to undefined group name.</value>
</data>
<data name="AlternationHasUndefinedReference" xml:space="preserve">
<value>(?({0}) ) reference to undefined group.</value>
</data>
<data name="AlternationHasUndefinedReferenceNoPlaceholder" xml:space="preserve">
<value>Alternation has a reference to undefined group.</value>
</data>
<data name="UnrecognizedUnicodeProperty" xml:space="preserve">
<value>Unknown property '{0}'.</value>
</data>
<data name="UnrecognizedUnicodePropertyNoPlaceholder" xml:space="preserve">
<value>Unknown property Unicode property.</value>
</data>
<data name="UnrecognizedControlCharacter" xml:space="preserve">
<value>Unrecognized control character.</value>
</data>
<data name="UnrecognizedEscape" xml:space="preserve">
<value>Unrecognized escape sequence \\{0}.</value>
</data>
<data name="InvalidGroupingConstruct" xml:space="preserve">
<value>Unrecognized grouping construct.</value>
</data>
<data name="UnterminatedBracket" xml:space="preserve">
<value>Unterminated [] set.</value>
</data>
<data name="UnterminatedComment" xml:space="preserve">
<value>Unterminated (?#...) comment.</value>
</data>
<data name="NotSupported_NonBacktrackingAndReplacementsWithSubstitutionsOfGroups" xml:space="preserve">
<value>Regex replacements with substitutions of groups are not supported with RegexOptions.NonBacktracking.</value>
<comment>{Locked="RegexOptions.NonBacktracking"}</comment>
</data>
<data name="NotSupported_NonBacktrackingConflictingOption" xml:space="preserve">
<value>RegexOptions.NonBacktracking is not supported in conjunction with RegexOptions.{0}.</value>
<comment>{Locked="RegexOptions.NonBacktracking"}</comment>
</data>
<data name="NotSupported_NonBacktrackingConflictingExpression" xml:space="preserve">
<value>RegexOptions.NonBacktracking is not supported in conjunction with expressions containing: '{0}'.</value>
<comment>{Locked="RegexOptions.NonBacktracking"}</comment>
</data>
<data name="ExpressionDescription_Backreference" xml:space="preserve">
<value>backreference (\\ number)</value>
</data>
<data name="ExpressionDescription_Conditional" xml:space="preserve">
<value>captured group conditional (?( name ) yes-pattern | no-pattern ) or (?( number ) yes-pattern| no-pattern )</value>
</data>
<data name="ExpressionDescription_PositiveLookaround" xml:space="preserve">
<value>positive lookahead (?= pattern) or positive lookbehind (?<= pattern)</value>
</data>
<data name="ExpressionDescription_NegativeLookaround" xml:space="preserve">
<value>negative lookahead (?! pattern) or negative lookbehind (?<! pattern)</value>
</data>
<data name="ExpressionDescription_ContiguousMatches" xml:space="preserve">
<value>contiguous matches (\\G)</value>
</data>
<data name="ExpressionDescription_AtomicSubexpressions" xml:space="preserve">
<value>atomic subexpressions (?> pattern)</value>
</data>
<data name="ExpressionDescription_IfThenElse" xml:space="preserve">
<value>test conditional (?( test-pattern ) yes-pattern | no-pattern )</value>
</data>
<data name="ExpressionDescription_BalancingGroup" xml:space="preserve">
<value>balancing group (?<name1-name2>subexpression) or (?'name1-name2' subexpression)</value>
</data>
<data name="UsingSpanAPIsWithCompiledToAssembly" xml:space="preserve">
<value>Searching an input span using a pre-compiled Regex assembly is not supported. Please use the string overloads or use a newer Regex implementation.</value>
</data>
</root> | <?xml version="1.0" encoding="utf-8"?>
<root>
<!--
Microsoft ResX Schema
Version 2.0
The primary goals of this format is to allow a simple XML format
that is mostly human readable. The generation and parsing of the
various data types are done through the TypeConverter classes
associated with the data types.
Example:
... ado.net/XML headers & schema ...
<resheader name="resmimetype">text/microsoft-resx</resheader>
<resheader name="version">2.0</resheader>
<resheader name="reader">System.Resources.ResXResourceReader, System.Windows.Forms, ...</resheader>
<resheader name="writer">System.Resources.ResXResourceWriter, System.Windows.Forms, ...</resheader>
<data name="Name1"><value>this is my long string</value><comment>this is a comment</comment></data>
<data name="Color1" type="System.Drawing.Color, System.Drawing">Blue</data>
<data name="Bitmap1" mimetype="application/x-microsoft.net.object.binary.base64">
<value>[base64 mime encoded serialized .NET Framework object]</value>
</data>
<data name="Icon1" type="System.Drawing.Icon, System.Drawing" mimetype="application/x-microsoft.net.object.bytearray.base64">
<value>[base64 mime encoded string representing a byte array form of the .NET Framework object]</value>
<comment>This is a comment</comment>
</data>
There are any number of "resheader" rows that contain simple
name/value pairs.
Each data row contains a name, and value. The row also contains a
type or mimetype. Type corresponds to a .NET class that support
text/value conversion through the TypeConverter architecture.
Classes that don't support this are serialized and stored with the
mimetype set.
The mimetype is used for serialized objects, and tells the
ResXResourceReader how to depersist the object. This is currently not
extensible. For a given mimetype the value must be set accordingly:
Note - application/x-microsoft.net.object.binary.base64 is the format
that the ResXResourceWriter will generate, however the reader can
read any of the formats listed below.
mimetype: application/x-microsoft.net.object.binary.base64
value : The object must be serialized with
: System.Runtime.Serialization.Formatters.Binary.BinaryFormatter
: and then encoded with base64 encoding.
mimetype: application/x-microsoft.net.object.soap.base64
value : The object must be serialized with
: System.Runtime.Serialization.Formatters.Soap.SoapFormatter
: and then encoded with base64 encoding.
mimetype: application/x-microsoft.net.object.bytearray.base64
value : The object must be serialized into a byte array
: using a System.ComponentModel.TypeConverter
: and then encoded with base64 encoding.
-->
<xsd:schema id="root" xmlns="" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:msdata="urn:schemas-microsoft-com:xml-msdata">
<xsd:import namespace="http://www.w3.org/XML/1998/namespace" />
<xsd:element name="root" msdata:IsDataSet="true">
<xsd:complexType>
<xsd:choice maxOccurs="unbounded">
<xsd:element name="metadata">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="value" type="xsd:string" minOccurs="0" />
</xsd:sequence>
<xsd:attribute name="name" use="required" type="xsd:string" />
<xsd:attribute name="type" type="xsd:string" />
<xsd:attribute name="mimetype" type="xsd:string" />
<xsd:attribute ref="xml:space" />
</xsd:complexType>
</xsd:element>
<xsd:element name="assembly">
<xsd:complexType>
<xsd:attribute name="alias" type="xsd:string" />
<xsd:attribute name="name" type="xsd:string" />
</xsd:complexType>
</xsd:element>
<xsd:element name="data">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="value" type="xsd:string" minOccurs="0" msdata:Ordinal="1" />
<xsd:element name="comment" type="xsd:string" minOccurs="0" msdata:Ordinal="2" />
</xsd:sequence>
<xsd:attribute name="name" type="xsd:string" use="required" msdata:Ordinal="1" />
<xsd:attribute name="type" type="xsd:string" msdata:Ordinal="3" />
<xsd:attribute name="mimetype" type="xsd:string" msdata:Ordinal="4" />
<xsd:attribute ref="xml:space" />
</xsd:complexType>
</xsd:element>
<xsd:element name="resheader">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="value" type="xsd:string" minOccurs="0" msdata:Ordinal="1" />
</xsd:sequence>
<xsd:attribute name="name" type="xsd:string" use="required" />
</xsd:complexType>
</xsd:element>
</xsd:choice>
</xsd:complexType>
</xsd:element>
</xsd:schema>
<resheader name="resmimetype">
<value>text/microsoft-resx</value>
</resheader>
<resheader name="version">
<value>2.0</value>
</resheader>
<resheader name="reader">
<value>System.Resources.ResXResourceReader, System.Windows.Forms, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089</value>
</resheader>
<resheader name="writer">
<value>System.Resources.ResXResourceWriter, System.Windows.Forms, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089</value>
</resheader>
<data name="Generic" xml:space="preserve">
<value>Regular expression parser error '{0}' at offset {1}.</value>
</data>
<data name="AlternationHasNamedCapture" xml:space="preserve">
<value>Alternation conditions do not capture and cannot be named.</value>
</data>
<data name="AlternationHasComment" xml:space="preserve">
<value>Alternation conditions cannot be comments.</value>
</data>
<data name="Arg_ArrayPlusOffTooSmall" xml:space="preserve">
<value>Destination array is not long enough to copy all the items in the collection. Check array index and length.</value>
</data>
<data name="ShorthandClassInCharacterRange" xml:space="preserve">
<value>Cannot include class \\{0} in character range.</value>
</data>
<data name="ShorthandClassInCharacterRangeNoPlaceholder" xml:space="preserve">
<value>Cannot include class in character range.</value>
</data>
<data name="BeginIndexNotNegative" xml:space="preserve">
<value>Start index cannot be less than 0 or greater than input length.</value>
</data>
<data name="QuantifierOrCaptureGroupOutOfRange" xml:space="preserve">
<value>Capture group numbers must be less than or equal to Int32.MaxValue.</value>
</data>
<data name="CaptureGroupOfZero" xml:space="preserve">
<value>Capture number cannot be zero.</value>
</data>
<data name="CountTooSmall" xml:space="preserve">
<value>Count cannot be less than -1.</value>
</data>
<data name="EnumNotStarted" xml:space="preserve">
<value>Enumeration has either not started or has already finished.</value>
</data>
<data name="AlternationHasMalformedCondition" xml:space="preserve">
<value>Illegal conditional (?(...)) expression.</value>
</data>
<data name="IllegalDefaultRegexMatchTimeoutInAppDomain" xml:space="preserve">
<value>AppDomain data '{0}' contains the invalid value or object '{1}' for specifying a default matching timeout for System.Text.RegularExpressions.Regex.</value>
</data>
<data name="UnescapedEndingBackslash" xml:space="preserve">
<value>Illegal \\ at end of pattern.</value>
</data>
<data name="ReversedQuantifierRange" xml:space="preserve">
<value>Illegal {x,y} with x > y.</value>
</data>
<data name="InvalidUnicodePropertyEscape" xml:space="preserve">
<value>Incomplete \\p{X} character escape.</value>
</data>
<data name="InternalError_ScanRegex" xml:space="preserve">
<value>Internal error in ScanRegex.</value>
<comment>{Locked="ScanRegex"}</comment>
</data>
<data name="CaptureGroupNameInvalid" xml:space="preserve">
<value>Invalid group name: Group names must begin with a word character.</value>
</data>
<data name="InvalidEmptyArgument" xml:space="preserve">
<value>Argument {0} cannot be zero-length.</value>
</data>
<data name="LengthNotNegative" xml:space="preserve">
<value>Length cannot be less than 0 or exceed input length.</value>
</data>
<data name="MalformedNamedReference" xml:space="preserve">
<value>Malformed \\k<...> named back reference.</value>
</data>
<data name="AlternationHasMalformedReference" xml:space="preserve">
<value>(?({0}) ) malformed.</value>
</data>
<data name="AlternationHasMalformedReferenceNoPlaceholder" xml:space="preserve">
<value>Alternation has malformed reference.</value>
</data>
<data name="MalformedUnicodePropertyEscape" xml:space="preserve">
<value>Malformed \\p{X} character escape.</value>
</data>
<data name="MakeException" xml:space="preserve">
<value>Invalid pattern '{0}' at offset {1}. {2}</value>
</data>
<data name="MissingControlCharacter" xml:space="preserve">
<value>Missing control character.</value>
</data>
<data name="NestedQuantifiersNotParenthesized" xml:space="preserve">
<value>Nested quantifier '{0}'.</value>
</data>
<data name="NestedQuantifiersNotParenthesizedNoPlaceholder" xml:space="preserve">
<value>Nested quantifier no parenthesized.</value>
</data>
<data name="NoResultOnFailed" xml:space="preserve">
<value>Result cannot be called on a failed Match.</value>
</data>
<data name="InsufficientClosingParentheses" xml:space="preserve">
<value>Not enough )'s.</value>
</data>
<data name="NotSupported_ReadOnlyCollection" xml:space="preserve">
<value>Collection is read-only.</value>
</data>
<data name="PlatformNotSupported_CompileToAssembly" xml:space="preserve">
<value>This platform does not support writing compiled regular expressions to an assembly. Use RegexGeneratorAttribute with the regular expression source generator instead.</value>
</data>
<data name="QuantifierAfterNothing" xml:space="preserve">
<value>Quantifier {x,y} following nothing.</value>
</data>
<data name="RegexMatchTimeoutException_Occurred" xml:space="preserve">
<value>The RegEx engine has timed out while trying to match a pattern to an input string. This can occur for many reasons, including very large inputs or excessive backtracking caused by nested quantifiers, back-references and other factors.</value>
</data>
<data name="ReplacementError" xml:space="preserve">
<value>Replacement pattern error.</value>
</data>
<data name="ReversedCharacterRange" xml:space="preserve">
<value>[x-y] range in reverse order.</value>
</data>
<data name="ExclusionGroupNotLast" xml:space="preserve">
<value>A subtraction must be the last element in a character class.</value>
</data>
<data name="InsufficientOrInvalidHexDigits" xml:space="preserve">
<value>Insufficient hexadecimal digits.</value>
</data>
<data name="AlternationHasTooManyConditions" xml:space="preserve">
<value>Too many | in (?()|).</value>
</data>
<data name="InsufficientOpeningParentheses" xml:space="preserve">
<value>Too many )'s.</value>
</data>
<data name="UndefinedNumberedReference" xml:space="preserve">
<value>Reference to undefined group number {0}.</value>
</data>
<data name="UndefinedNumberedReferenceNoPlaceholder" xml:space="preserve">
<value>Reference to undefined group number.</value>
</data>
<data name="UndefinedNamedReference" xml:space="preserve">
<value>Reference to undefined group name '{0}'.</value>
</data>
<data name="UndefinedNamedReferenceNoPlaceholder" xml:space="preserve">
<value>Reference to undefined group name.</value>
</data>
<data name="AlternationHasUndefinedReference" xml:space="preserve">
<value>(?({0}) ) reference to undefined group.</value>
</data>
<data name="AlternationHasUndefinedReferenceNoPlaceholder" xml:space="preserve">
<value>Alternation has a reference to undefined group.</value>
</data>
<data name="UnrecognizedUnicodeProperty" xml:space="preserve">
<value>Unknown property '{0}'.</value>
</data>
<data name="UnrecognizedUnicodePropertyNoPlaceholder" xml:space="preserve">
<value>Unknown property Unicode property.</value>
</data>
<data name="UnrecognizedControlCharacter" xml:space="preserve">
<value>Unrecognized control character.</value>
</data>
<data name="UnrecognizedEscape" xml:space="preserve">
<value>Unrecognized escape sequence \\{0}.</value>
</data>
<data name="InvalidGroupingConstruct" xml:space="preserve">
<value>Unrecognized grouping construct.</value>
</data>
<data name="UnterminatedBracket" xml:space="preserve">
<value>Unterminated [] set.</value>
</data>
<data name="UnterminatedComment" xml:space="preserve">
<value>Unterminated (?#...) comment.</value>
</data>
<data name="NotSupported_NonBacktrackingAndReplacementsWithSubstitutionsOfGroups" xml:space="preserve">
<value>Regex replacements with substitutions of groups are not supported with RegexOptions.NonBacktracking.</value>
<comment>{Locked="RegexOptions.NonBacktracking"}</comment>
</data>
<data name="NotSupported_NonBacktrackingConflictingOption" xml:space="preserve">
<value>RegexOptions.NonBacktracking is not supported in conjunction with RegexOptions.{0}.</value>
<comment>{Locked="RegexOptions.NonBacktracking"}</comment>
</data>
<data name="NotSupported_NonBacktrackingConflictingExpression" xml:space="preserve">
<value>RegexOptions.NonBacktracking is not supported in conjunction with expressions containing: '{0}'.</value>
<comment>{Locked="RegexOptions.NonBacktracking"}</comment>
</data>
<data name="ExpressionDescription_Backreference" xml:space="preserve">
<value>backreference (\\ number)</value>
</data>
<data name="ExpressionDescription_Conditional" xml:space="preserve">
<value>captured group conditional (?( name ) yes-pattern | no-pattern ) or (?( number ) yes-pattern| no-pattern )</value>
</data>
<data name="ExpressionDescription_PositiveLookaround" xml:space="preserve">
<value>positive lookahead (?= pattern) or positive lookbehind (?<= pattern)</value>
</data>
<data name="ExpressionDescription_NegativeLookaround" xml:space="preserve">
<value>negative lookahead (?! pattern) or negative lookbehind (?<! pattern)</value>
</data>
<data name="ExpressionDescription_ContiguousMatches" xml:space="preserve">
<value>contiguous matches (\\G)</value>
</data>
<data name="ExpressionDescription_AtomicSubexpressions" xml:space="preserve">
<value>atomic subexpressions (?> pattern)</value>
</data>
<data name="ExpressionDescription_IfThenElse" xml:space="preserve">
<value>test conditional (?( test-pattern ) yes-pattern | no-pattern )</value>
</data>
<data name="ExpressionDescription_BalancingGroup" xml:space="preserve">
<value>balancing group (?<name1-name2>subexpression) or (?'name1-name2' subexpression)</value>
</data>
<data name="UsingSpanAPIsWithCompiledToAssembly" xml:space="preserve">
<value>Searching an input span using a pre-compiled Regex assembly is not supported. Please use the string overloads or use a newer Regex implementation.</value>
</data>
</root> | -1 |
dotnet/runtime | 65,926 | FastMod for EEHashTable (Faster virtual generics) | Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| EgorBo | "2022-02-27T11:34:01Z" | "2022-03-01T19:43:37Z" | a5a7836f8193414fe64232d5c386b6402eb70e19 | af71404d9a3b38e63408af47cc847ac1a1fcf4e1 | FastMod for EEHashTable (Faster virtual generics). Closes https://github.com/dotnet/runtime/issues/65778
It was suggested to use a different data structure for `CORINFO_HELP_VIRTUAL_FUNC_PTR` but that is a more complicated change and I think this one worth having too anyway.
Benchmark:
```csharp
using System.Text.Json.Nodes;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Running;
BenchmarkSwitcher.FromAssembly(typeof(Benchmarks).Assembly).Run(args);
public class Benchmarks
{
public virtual void VGM<T>() { }
[Benchmark]
public void CallVirtualGenericMethod() => VGM<int>();
// System.Text.Json
JsonNode _jsonNode = true;
[Benchmark]
public bool JsonNodeConversion() => (bool)_jsonNode;
// string interning
string str = "Hello";
[Benchmark]
public string StrIsInterned() => string.IsInterned(str);
}
```
| Method | Toolchain | Mean | Error | StdDev | Ratio |
|-------------------------- |-------------------------- |----------:|----------:|----------:|------:|
| CallVirtualGenericMethod | \Core_Root\corerun.exe | 5.299 ns | 0.0364 ns | 0.0322 ns | 1.00 |
| CallVirtualGenericMethod | \Core_Root_PR\corerun.exe | 4.166 ns | 0.1064 ns | 0.1383 ns | 0.79 |
| | | | | | |
| JsonNodeConversion | \Core_Root\corerun.exe | 5.509 ns | 0.0070 ns | 0.0062 ns | 1.00 |
| JsonNodeConversion | \Core_Root_PR\corerun.exe | 4.177 ns | 0.0461 ns | 0.0431 ns | 0.76 |
| | | | | | |
| StrIsInterned | \Core_Root\corerun.exe | 54.741 ns | 0.1900 ns | 0.1780 ns | 1.00 |
| StrIsInterned | \Core_Root_PR\corerun.exe | 50.172 ns | 0.3521 ns | 0.2750 ns | 0.92 |
| ./src/libraries/System.Security.Permissions/src/System/Data/OracleClient/OraclePermissionAttribute.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Security.Permissions;
namespace System.Data.OracleClient
{
#if NETCOREAPP
[Obsolete(Obsoletions.CodeAccessSecurityMessage, DiagnosticId = Obsoletions.CodeAccessSecurityDiagId, UrlFormat = Obsoletions.SharedUrlFormat)]
#endif
[AttributeUsage(AttributeTargets.Assembly | AttributeTargets.Class | AttributeTargets.Struct |
AttributeTargets.Constructor | AttributeTargets.Method, AllowMultiple = true, Inherited = false)]
public sealed class OraclePermissionAttribute : CodeAccessSecurityAttribute
{
public OraclePermissionAttribute(SecurityAction action) : base(action) { }
public bool AllowBlankPassword { get; set; }
public string ConnectionString { get { return null; } set { } }
public KeyRestrictionBehavior KeyRestrictionBehavior { get; set; }
public string KeyRestrictions { get { return null; } set { } }
public override Security.IPermission CreatePermission() { return null; }
[System.ComponentModel.EditorBrowsable(ComponentModel.EditorBrowsableState.Never)]
public bool ShouldSerializeConnectionString() => false;
[System.ComponentModel.EditorBrowsable(ComponentModel.EditorBrowsableState.Never)]
public bool ShouldSerializeKeyRestrictions() => false;
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Security.Permissions;
namespace System.Data.OracleClient
{
#if NETCOREAPP
[Obsolete(Obsoletions.CodeAccessSecurityMessage, DiagnosticId = Obsoletions.CodeAccessSecurityDiagId, UrlFormat = Obsoletions.SharedUrlFormat)]
#endif
[AttributeUsage(AttributeTargets.Assembly | AttributeTargets.Class | AttributeTargets.Struct |
AttributeTargets.Constructor | AttributeTargets.Method, AllowMultiple = true, Inherited = false)]
public sealed class OraclePermissionAttribute : CodeAccessSecurityAttribute
{
public OraclePermissionAttribute(SecurityAction action) : base(action) { }
public bool AllowBlankPassword { get; set; }
public string ConnectionString { get { return null; } set { } }
public KeyRestrictionBehavior KeyRestrictionBehavior { get; set; }
public string KeyRestrictions { get { return null; } set { } }
public override Security.IPermission CreatePermission() { return null; }
[System.ComponentModel.EditorBrowsable(ComponentModel.EditorBrowsableState.Never)]
public bool ShouldSerializeConnectionString() => false;
[System.ComponentModel.EditorBrowsable(ComponentModel.EditorBrowsableState.Never)]
public bool ShouldSerializeKeyRestrictions() => false;
}
}
| -1 |
dotnet/runtime | 65,924 | Fix libraries GCStress pipeline | Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| BruceForstall | "2022-02-27T04:30:37Z" | "2022-03-01T18:20:06Z" | dfe08b3bea9a1897562635307ab4bb70bc31fada | 79026a542a8364b6783c5d212bb1841ac2c873a9 | Fix libraries GCStress pipeline. Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| ./eng/pipelines/libraries/run-test-job.yml | parameters:
buildConfig: ''
osGroup: ''
osSubgroup: ''
archType: ''
targetRid: ''
framework: 'net7.0'
isOfficialBuild: false
liveRuntimeBuildConfig: ''
runtimeFlavor: 'coreclr'
runtimeDisplayName: 'coreclr'
interpreter: ''
timeoutInMinutes: 150
pool: ''
runtimeVariant: ''
testScope: ''
helixQueues: []
dependOnEvaluatePaths: false
condition: true
shouldContinueOnError: false
variables: {}
# coreclrTestGroup: if empty, then a normal, default test run is created. If set, it indicates a set of
# stress modes that each test will be run with. This is the same usage as 'testGroup' in
# eng/pipelines/common/templates/runtimes/run-test-job.yml.
coreclrTestGroup: ''
dependsOn: []
jobs:
- template: /eng/pipelines/libraries/base-job.yml
parameters:
buildConfig: ${{ parameters.buildConfig }}
osGroup: ${{ parameters.osGroup }}
osSubgroup: ${{ parameters.osSubgroup }}
archType: ${{ parameters.archType }}
framework: ${{ parameters.framework }}
isOfficialBuild: ${{ parameters.isOfficialBuild }}
liveRuntimeBuildConfig: ${{ parameters.liveRuntimeBuildConfig }}
runtimeFlavor: ${{ parameters.runtimeFlavor }}
runtimeVariant: ${{ parameters.runtimeVariant }}
timeoutInMinutes: ${{ parameters.timeoutInMinutes }}
container: '' # we just send to helix, no need to use a container.
condition: ${{ parameters.condition }}
testScope: ${{ parameters.testScope }}
runTests: true
${{ if ne(parameters.liveRuntimeBuildConfig, '') }}:
displayName: ${{ format('Test Run {0} {1}', parameters.liveRuntimeBuildConfig, parameters.runtimeDisplayName) }}
name: ${{ format('test_run_{0}_{1}', parameters.liveRuntimeBuildConfig, parameters.runtimeDisplayName) }}
${{ if eq(parameters.liveRuntimeBuildConfig, '') }}:
displayName: 'Test Run'
name: test_run
${{ if eq(parameters.interpreter, 'true') }}:
testDisplayName: ${{ parameters.runtimeFlavor }}_interpreter_${{ parameters.liveRuntimeBuildConfig }}
# To run the tests we just send to helix and wait, use ubuntu hosted pools for faster providing and to not back up our build pools
pool: ${{ parameters.pool }}
dependOnEvaluatePaths: ${{ parameters.dependOnEvaluatePaths }}
dependsOn:
- ${{ if ne(parameters.dependsOn[0], '') }}:
- ${{ parameters.dependsOn }}
- ${{ if eq(parameters.dependsOn[0], '') }}:
- ${{ if notIn(parameters.framework, 'allConfigurations', 'net48') }}:
- ${{ format('libraries_build_{0}{1}_{2}_{3}', parameters.osGroup, parameters.osSubgroup, parameters.archType, parameters.buildConfig) }}
- ${{ if ne(parameters.liveRuntimeBuildConfig, '') }}:
- ${{ format('{0}_{1}_product_build_{2}{3}_{4}_{5}', parameters.runtimeFlavor, parameters.runtimeVariant, parameters.osGroup, parameters.osSubgroup, parameters.archType, parameters.liveRuntimeBuildConfig) }}
variables:
- librariesTestsArtifactName: ${{ format('libraries_test_assets_{0}{1}_{2}_{3}', parameters.osGroup, parameters.osSubgroup, parameters.archType, parameters.buildConfig) }}
- _archiveTestsParameter: /p:ArchiveTests=true
- ${{ parameters.variables }}
steps:
- template: /eng/pipelines/common/download-artifact-step.yml
parameters:
displayName: Build Assets
cleanUnpackFolder: false
artifactName: $(librariesBuildArtifactName)
artifactFileName: $(librariesBuildArtifactName)$(archiveExtension)
unpackFolder: $(Build.SourcesDirectory)/artifacts
- template: /eng/pipelines/common/download-artifact-step.yml
parameters:
displayName: Test Assets
cleanUnpackFolder: false
artifactName: $(librariesTestsArtifactName)
artifactFileName: $(librariesTestsArtifactName)$(archiveExtension)
unpackFolder: $(Build.SourcesDirectory)/artifacts
- ${{ if in(parameters.coreclrTestGroup, 'gcstress0x3-gcstress0xc', 'gcstress-extra') }}:
# We need to find and download the GC stress dependencies (namely, coredistools). Put them
# in the 'sharedFramework' directory where we unpacked the CoreCLR build artifacts. The 'sharedFramework'
# directory is what is copied into the testhost.
- ${{ if eq(parameters.osGroup, 'windows') }}:
- script: $(Build.SourcesDirectory)\src\coreclr\tests\setup-stress-dependencies.cmd
/arch ${{ parameters.archType }}
/outputdir $(_runtimeDownloadPath)/sharedFramework
displayName: Download GC stress dependencies
- ${{ if ne(parameters.osGroup, 'windows') }}:
- script: $(Build.SourcesDirectory)/src/coreclr/tests/setup-stress-dependencies.sh
--arch=${{ parameters.archType }}
--outputDir=$(_runtimeDownloadPath)/sharedFramework
displayName: Download GC stress dependencies
- ${{ if ne(parameters.liveRuntimeBuildConfig, '') }}:
- script: $(_buildScript)
-subset libs.pretest
$(_buildArguments)
/p:RuntimeFlavor=${{ parameters.runtimeFlavor }}
/bl:$(Build.SourcesDirectory)/artifacts/log/$(_BuildConfig)/overrideRuntimeFromLiveDrop.binlog
displayName: Prepare TestHost with runtime $(runtimeFlavorName)
- template: /eng/pipelines/libraries/helix.yml
parameters:
runtimeFlavor: ${{ parameters.runtimeFlavor }}
osGroup: ${{ parameters.osGroup }}
targetRid: ${{ parameters.targetRid }}
archType: ${{ parameters.archType }}
buildConfig: ${{ parameters.buildConfig }}
helixQueues: ${{ parameters.helixQueues }}
testScope: ${{ parameters.testScope }}
interpreter: ${{ parameters.interpreter }}
shouldContinueOnError: ${{ parameters.shouldContinueOnError }}
creator: dotnet-bot
testRunNamePrefixSuffix: $(_testRunNamePrefixSuffix)
extraHelixArguments: $(_extraHelixArguments)
# coreclrTestGroup: The following mappings of 'coreclrTestGroup' to 'scenarios' is copied from
# eng/pipelines/common/templates/runtimes/run-test-job.yml (with 'testGroup' replaced by 'coreclrTestGroup'
# for clarity), and should remain in sync. This is only a subset; only the testGroups that are
# used to test the libraries have been added here. More could be added if we decided to test the
# libraries with more stress modes. The scenario tags are interpreted by
# src\tests\Common\testenvironment.proj.
#
# The one difference here compared to eng/pipelines/common/templates/runtimes/run-test-job.yml is
# that 'jitstress' contains 'no_tiered_compilation'. The 'normal' (default) test mode
# is run in a regular CI job, so there is no need to duplicate it here. So, add 'no_tiered_compilation'
# to the 'jitstress' job instead of adding a new job just for 'no_tiered_compilation'.
${{ if in(parameters.coreclrTestGroup, 'jitstress') }}:
scenarios:
- no_tiered_compilation
- jitminopts
- jitstress1
- jitstress1_tiered
- jitstress2
- jitstress2_tiered
- zapdisable
- tailcallstress
${{ if in(parameters.coreclrTestGroup, 'jitstressregs' ) }}:
scenarios:
- jitstressregs1
- jitstressregs2
- jitstressregs3
- jitstressregs4
- jitstressregs8
- jitstressregs0x10
- jitstressregs0x80
- jitstressregs0x1000
${{ if in(parameters.coreclrTestGroup, 'jitstress2-jitstressregs') }}:
scenarios:
- jitstress2_jitstressregs1
- jitstress2_jitstressregs2
- jitstress2_jitstressregs3
- jitstress2_jitstressregs4
- jitstress2_jitstressregs8
- jitstress2_jitstressregs0x10
- jitstress2_jitstressregs0x80
- jitstress2_jitstressregs0x1000
${{ if in(parameters.coreclrTestGroup, 'gcstress0x3-gcstress0xc') }}:
scenarios:
# Disable gcstress0x3 for now; it causes lots of test timeouts. Investigate this after
# gcstress0xc runs are clean. Tracking issue: https://github.com/dotnet/runtime/issues/38903.
# - gcstress0x3
- gcstress0xc
${{ if in(parameters.coreclrTestGroup, 'gcstress-extra') }}:
scenarios:
- heapverify1
- gcstress0xc_zapdisable
- gcstress0xc_zapdisable_jitstress2
- gcstress0xc_zapdisable_heapverify1
- gcstress0xc_jitstress1
- gcstress0xc_jitstress2
- gcstress0xc_jitminopts_heapverify1
${{ if in(parameters.coreclrTestGroup, 'pgo') }}:
scenarios:
- nopgo
- defaultpgo
- dynamicpgo
- fullpgo
- fullpgo_random_gdv
- fullpgo_random_edge
- fullpgo_random_gdv_edge
- jitosr
- jitosr_stress
- jitosr_stress_random
- jitosr_pgo
| parameters:
buildConfig: ''
osGroup: ''
osSubgroup: ''
archType: ''
targetRid: ''
framework: 'net7.0'
isOfficialBuild: false
liveRuntimeBuildConfig: ''
runtimeFlavor: 'coreclr'
runtimeDisplayName: 'coreclr'
interpreter: ''
timeoutInMinutes: 150
pool: ''
runtimeVariant: ''
testScope: ''
helixQueues: []
dependOnEvaluatePaths: false
condition: true
shouldContinueOnError: false
variables: {}
# coreclrTestGroup: if empty, then a normal, default test run is created. If set, it indicates a set of
# stress modes that each test will be run with. This is the same usage as 'testGroup' in
# eng/pipelines/common/templates/runtimes/run-test-job.yml.
coreclrTestGroup: ''
dependsOn: []
jobs:
- template: /eng/pipelines/libraries/base-job.yml
parameters:
buildConfig: ${{ parameters.buildConfig }}
osGroup: ${{ parameters.osGroup }}
osSubgroup: ${{ parameters.osSubgroup }}
archType: ${{ parameters.archType }}
framework: ${{ parameters.framework }}
isOfficialBuild: ${{ parameters.isOfficialBuild }}
liveRuntimeBuildConfig: ${{ parameters.liveRuntimeBuildConfig }}
runtimeFlavor: ${{ parameters.runtimeFlavor }}
runtimeVariant: ${{ parameters.runtimeVariant }}
timeoutInMinutes: ${{ parameters.timeoutInMinutes }}
container: '' # we just send to helix, no need to use a container.
condition: ${{ parameters.condition }}
testScope: ${{ parameters.testScope }}
runTests: true
${{ if ne(parameters.liveRuntimeBuildConfig, '') }}:
displayName: ${{ format('Test Run {0} {1}', parameters.liveRuntimeBuildConfig, parameters.runtimeDisplayName) }}
name: ${{ format('test_run_{0}_{1}', parameters.liveRuntimeBuildConfig, parameters.runtimeDisplayName) }}
${{ if eq(parameters.liveRuntimeBuildConfig, '') }}:
displayName: 'Test Run'
name: test_run
${{ if eq(parameters.interpreter, 'true') }}:
testDisplayName: ${{ parameters.runtimeFlavor }}_interpreter_${{ parameters.liveRuntimeBuildConfig }}
# To run the tests we just send to helix and wait, use ubuntu hosted pools for faster providing and to not back up our build pools
pool: ${{ parameters.pool }}
dependOnEvaluatePaths: ${{ parameters.dependOnEvaluatePaths }}
dependsOn:
- ${{ if ne(parameters.dependsOn[0], '') }}:
- ${{ parameters.dependsOn }}
- ${{ if eq(parameters.dependsOn[0], '') }}:
- ${{ if notIn(parameters.framework, 'allConfigurations', 'net48') }}:
- ${{ format('libraries_build_{0}{1}_{2}_{3}', parameters.osGroup, parameters.osSubgroup, parameters.archType, parameters.buildConfig) }}
- ${{ if ne(parameters.liveRuntimeBuildConfig, '') }}:
- ${{ format('{0}_{1}_product_build_{2}{3}_{4}_{5}', parameters.runtimeFlavor, parameters.runtimeVariant, parameters.osGroup, parameters.osSubgroup, parameters.archType, parameters.liveRuntimeBuildConfig) }}
variables:
- librariesTestsArtifactName: ${{ format('libraries_test_assets_{0}{1}_{2}_{3}', parameters.osGroup, parameters.osSubgroup, parameters.archType, parameters.buildConfig) }}
- _archiveTestsParameter: /p:ArchiveTests=true
- ${{ parameters.variables }}
steps:
- template: /eng/pipelines/common/download-artifact-step.yml
parameters:
displayName: Build Assets
cleanUnpackFolder: false
artifactName: $(librariesBuildArtifactName)
artifactFileName: $(librariesBuildArtifactName)$(archiveExtension)
unpackFolder: $(Build.SourcesDirectory)/artifacts
- template: /eng/pipelines/common/download-artifact-step.yml
parameters:
displayName: Test Assets
cleanUnpackFolder: false
artifactName: $(librariesTestsArtifactName)
artifactFileName: $(librariesTestsArtifactName)$(archiveExtension)
unpackFolder: $(Build.SourcesDirectory)/artifacts
- ${{ if ne(parameters.liveRuntimeBuildConfig, '') }}:
- script: $(_buildScript)
-subset libs.pretest
$(_buildArguments)
/p:RuntimeFlavor=${{ parameters.runtimeFlavor }}
/bl:$(Build.SourcesDirectory)/artifacts/log/$(_BuildConfig)/overrideRuntimeFromLiveDrop.binlog
displayName: Prepare TestHost with runtime $(runtimeFlavorName)
- template: /eng/pipelines/libraries/helix.yml
parameters:
runtimeFlavor: ${{ parameters.runtimeFlavor }}
osGroup: ${{ parameters.osGroup }}
targetRid: ${{ parameters.targetRid }}
archType: ${{ parameters.archType }}
buildConfig: ${{ parameters.buildConfig }}
helixQueues: ${{ parameters.helixQueues }}
testScope: ${{ parameters.testScope }}
interpreter: ${{ parameters.interpreter }}
shouldContinueOnError: ${{ parameters.shouldContinueOnError }}
creator: dotnet-bot
testRunNamePrefixSuffix: $(_testRunNamePrefixSuffix)
extraHelixArguments: $(_extraHelixArguments)
# coreclrTestGroup: The following mappings of 'coreclrTestGroup' to 'scenarios' is copied from
# eng/pipelines/common/templates/runtimes/run-test-job.yml (with 'testGroup' replaced by 'coreclrTestGroup'
# for clarity), and should remain in sync. This is only a subset; only the testGroups that are
# used to test the libraries have been added here. More could be added if we decided to test the
# libraries with more stress modes. The scenario tags are interpreted by
# src\tests\Common\testenvironment.proj.
#
# The one difference here compared to eng/pipelines/common/templates/runtimes/run-test-job.yml is
# that 'jitstress' contains 'no_tiered_compilation'. The 'normal' (default) test mode
# is run in a regular CI job, so there is no need to duplicate it here. So, add 'no_tiered_compilation'
# to the 'jitstress' job instead of adding a new job just for 'no_tiered_compilation'.
${{ if in(parameters.coreclrTestGroup, 'jitstress') }}:
scenarios:
- no_tiered_compilation
- jitminopts
- jitstress1
- jitstress1_tiered
- jitstress2
- jitstress2_tiered
- zapdisable
- tailcallstress
${{ if in(parameters.coreclrTestGroup, 'jitstressregs' ) }}:
scenarios:
- jitstressregs1
- jitstressregs2
- jitstressregs3
- jitstressregs4
- jitstressregs8
- jitstressregs0x10
- jitstressregs0x80
- jitstressregs0x1000
${{ if in(parameters.coreclrTestGroup, 'jitstress2-jitstressregs') }}:
scenarios:
- jitstress2_jitstressregs1
- jitstress2_jitstressregs2
- jitstress2_jitstressregs3
- jitstress2_jitstressregs4
- jitstress2_jitstressregs8
- jitstress2_jitstressregs0x10
- jitstress2_jitstressregs0x80
- jitstress2_jitstressregs0x1000
${{ if in(parameters.coreclrTestGroup, 'gcstress0x3-gcstress0xc') }}:
scenarios:
# Disable gcstress0x3 for now; it causes lots of test timeouts. Investigate this after
# gcstress0xc runs are clean. Tracking issue: https://github.com/dotnet/runtime/issues/38903.
# - gcstress0x3
- gcstress0xc
${{ if in(parameters.coreclrTestGroup, 'gcstress-extra') }}:
scenarios:
- heapverify1
- gcstress0xc_zapdisable
- gcstress0xc_zapdisable_jitstress2
- gcstress0xc_zapdisable_heapverify1
- gcstress0xc_jitstress1
- gcstress0xc_jitstress2
- gcstress0xc_jitminopts_heapverify1
${{ if in(parameters.coreclrTestGroup, 'pgo') }}:
scenarios:
- nopgo
- defaultpgo
- dynamicpgo
- fullpgo
- fullpgo_random_gdv
- fullpgo_random_edge
- fullpgo_random_gdv_edge
- jitosr
- jitosr_stress
- jitosr_stress_random
- jitosr_pgo
| 1 |
dotnet/runtime | 65,924 | Fix libraries GCStress pipeline | Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| BruceForstall | "2022-02-27T04:30:37Z" | "2022-03-01T18:20:06Z" | dfe08b3bea9a1897562635307ab4bb70bc31fada | 79026a542a8364b6783c5d212bb1841ac2c873a9 | Fix libraries GCStress pipeline. Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| ./src/libraries/externals.csproj | <Project Sdk="Microsoft.Build.NoTargets">
<PropertyGroup>
<!-- Set the RuntimeIdentifier so that the DotNetHost and DotNetHostPolicy packages resolve for the corresponding runtime. -->
<RuntimeIdentifier>$(PackageRID)</RuntimeIdentifier>
<SwapNativeForIL Condition="'$(SwapNativeForIL)' == '' and ('$(Configuration)' == 'Debug' or '$(Coverage)' == 'true') and '$(RuntimeFlavor)' != 'Mono'">true</SwapNativeForIL>
<TargetFramework>$(NetCoreAppCurrent)</TargetFramework>
<!-- Binplace properties -->
<BinPlaceForTargetVertical>false</BinPlaceForTargetVertical>
<BinPlaceNative>true</BinPlaceNative>
<BinPlaceRuntime>false</BinPlaceRuntime>
<CopyLocalLockFileAssemblies>true</CopyLocalLockFileAssemblies>
<UseLiveBuiltDotNetHost Condition="'$(TargetArchitecture)' == 's390x' or '$(TargetArchitecture)' == 'armv6'">true</UseLiveBuiltDotNetHost>
</PropertyGroup>
<ItemGroup Condition="'$(DotNetBuildFromSource)' != 'true'">
<PackageReference Include="Microsoft.DiaSymReader.Native"
Version="$(MicrosoftDiaSymReaderNativeVersion)" />
</ItemGroup>
<ItemGroup Condition="'$(TargetsMobile)' != 'true' and '$(UseLiveBuiltDotNetHost)' != 'true'">
<PackageReference Include="Microsoft.NETCore.DotNetHost"
Version="$(MicrosoftNETCoreDotNetHostVersion)" />
<PackageReference Include="Microsoft.NETCore.DotNetHostPolicy"
Version="$(MicrosoftNETCoreDotNetHostPolicyVersion)" />
</ItemGroup>
<!-- Setup the testing shared framework host -->
<Target Name="SetupTestingHost"
AfterTargets="AfterResolveReferences"
Condition="'$(TestNativeAot)' != 'true'">
<PropertyGroup>
<UseHardlink>true</UseHardlink>
<!-- workaround core-setup problem for hardlinking dotnet executable to testhost: core-setup #4742 -->
<UseHardlink Condition="$(PackageRID.StartsWith('freebsd'))">false</UseHardlink>
</PropertyGroup>
<!-- We do not need apphost.exe.
Exclude here so that when building with the 2.x SDK we don't place it in the test shared framework. -->
<ItemGroup>
<ReferenceCopyLocalPaths Remove="@(ReferenceCopyLocalPaths)" Condition="'%(Filename)' == 'apphost'" />
</ItemGroup>
<ItemGroup Condition="'$(UseLiveBuiltDotNetHost)' != 'true'">
<HostFxFile Include="@(ReferenceCopyLocalPaths)" Condition="'%(ReferenceCopyLocalPaths.Filename)' == 'hostfxr' or
'%(ReferenceCopyLocalPaths.Filename)' == 'libhostfxr'" />
<DotnetExe Include="@(ReferenceCopyLocalPaths)" Condition="'%(ReferenceCopyLocalPaths.Filename)' == 'dotnet'" />
</ItemGroup>
<ItemGroup Condition="'$(UseLiveBuiltDotNetHost)' == 'true'">
<CoreHostFiles Include="$(DotNetHostBinDir)*" />
<HostFxFile Include="@(CoreHostFiles)" Condition="'%(CoreHostFiles.Filename)' == 'hostfxr' or
'%(CoreHostFiles.Filename)' == 'libhostfxr'" />
<HostPolicyFile Include="@(CoreHostFiles)" Condition="'%(CoreHostFiles.Filename)' == 'hostpolicy' or
'%(CoreHostFiles.Filename)' == 'libhostpolicy'" />
<DotnetExe Include="@(CoreHostFiles)" Condition="'%(CoreHostFiles.Filename)' == 'dotnet'" />
</ItemGroup>
<Copy SourceFiles="@(HostFxFile)"
DestinationFolder="$(NetCoreAppCurrentTestHostPath)host\fxr\$(ProductVersion)"
SkipUnchangedFiles="true"
UseHardlinksIfPossible="$(UseHardlink)" />
<Copy SourceFiles="@(HostPolicyFile)"
DestinationFolder="$(NetCoreAppCurrentTestHostPath)shared\Microsoft.NETCore.App\$(ProductVersion)"
SkipUnchangedFiles="true"
UseHardlinksIfPossible="$(UseHardlink)" />
<Copy SourceFiles="@(DotnetExe)"
DestinationFolder="$(NetCoreAppCurrentTestHostPath)"
SkipUnchangedFiles="true"
UseHardlinksIfPossible="$(UseHardlink)" />
<Exec Command="chmod +x $(NetCoreAppCurrentTestHostPath)%(DotnetExe.Filename)%(DotnetExe.Extension)" Condition="'$(TargetOS)' != 'windows' and '$(OS)' != 'Windows_NT'"/>
</Target>
<Target Name="OverrideRuntimeCoreCLR"
DependsOnTargets="ResolveRuntimeFilesFromLocalBuild"
AfterTargets="AfterResolveReferences"
Condition="'$(RuntimeFlavor)' != 'Mono' and '$(TestNativeAot)' != 'true'">
<ItemGroup>
<!-- CoreRun is not used for testing anymore, but we still use it for benchmarking and profiling -->
<RuntimeFiles Include="$(CoreCLRArtifactsPath)\corerun*" />
<RuntimeFiles Include="$(CoreCLRArtifactsPath)\PDB\corerun*" />
<ReferenceCopyLocalPaths Include="@(RuntimeFiles)" />
</ItemGroup>
<ItemGroup Condition="'$(SwapNativeForIL)' == 'true'">
<CoreCLRILFiles Include="$(CoreCLRArtifactsPath)\IL\*.*" />
<ReferenceCopyLocalPaths Remove="@(ReferenceCopyLocalPaths)" Condition="'@(CoreCLRILFiles->'%(FileName)%(Extension)')' == '%(FileName)%(Extension)'" />
<ReferenceCopyLocalPaths Remove="@(ReferenceCopyLocalPaths)" Condition="'@(CoreCLRILFiles->'%(FileName).ni%(Extension)')' == '%(FileName)%(Extension)'" />
<ReferenceCopyLocalPaths Include="@(CoreCLRILFiles)" />
</ItemGroup>
<Error Condition="'$(SwapNativeForIL)' == 'true' and '@(CoreCLRILFiles)' == ''" Text="Could not locate CoreCLR IL files." />
</Target>
<Target Name="OverrideRuntimeMono"
DependsOnTargets="ResolveRuntimeFilesFromLocalBuild"
AfterTargets="AfterResolveReferences"
Condition="'$(RuntimeFlavor)' == 'Mono'">
<ItemGroup>
<ReferenceCopyLocalPaths Include="@(RuntimeFiles)" />
<!-- Setup runtime pack native. -->
<ReferenceCopyLocalPaths Include="@(MonoCrossFiles)"
DestinationSubDirectory="cross/%(RecursiveDir)" />
<ReferenceCopyLocalPaths Include="@(MonoIncludeFiles)"
DestinationSubDirectory="include/%(RecursiveDir)" />
</ItemGroup>
</Target>
</Project>
| <Project Sdk="Microsoft.Build.NoTargets">
<PropertyGroup>
<!-- Set the RuntimeIdentifier so that the DotNetHost and DotNetHostPolicy packages resolve for the corresponding runtime. -->
<RuntimeIdentifier>$(PackageRID)</RuntimeIdentifier>
<SwapNativeForIL Condition="'$(SwapNativeForIL)' == '' and ('$(Configuration)' == 'Debug' or '$(Coverage)' == 'true') and '$(RuntimeFlavor)' != 'Mono'">true</SwapNativeForIL>
<TargetFramework>$(NetCoreAppCurrent)</TargetFramework>
<!-- Binplace properties -->
<BinPlaceForTargetVertical>false</BinPlaceForTargetVertical>
<BinPlaceNative>true</BinPlaceNative>
<BinPlaceRuntime>false</BinPlaceRuntime>
<CopyLocalLockFileAssemblies>true</CopyLocalLockFileAssemblies>
<UseLiveBuiltDotNetHost Condition="'$(TargetArchitecture)' == 's390x' or '$(TargetArchitecture)' == 'armv6'">true</UseLiveBuiltDotNetHost>
</PropertyGroup>
<PropertyGroup>
<GCStressDependsOnCoreDisTools>false</GCStressDependsOnCoreDisTools>
<GCStressDependsOnCoreDisTools Condition="'$(TargetOS)' == 'Windows' And ('$(TargetArchitecture)' == 'x64' Or '$(TargetArchitecture)' == 'x86')">true</GCStressDependsOnCoreDisTools>
<GCStressDependsOnCoreDisTools Condition="'$(TargetOS)' == 'Linux' And '$(TargetArchitecture)' == 'x64'">true</GCStressDependsOnCoreDisTools>
<CopyCoreDisToolsToCoreRoot>false</CopyCoreDisToolsToCoreRoot>
<CopyCoreDisToolsToCoreRoot Condition="$(GCStressDependsOnCoreDisTools) And '$(DotNetBuildFromSource)' != 'true'">true</CopyCoreDisToolsToCoreRoot>
</PropertyGroup>
<Import Project="$(RepositoryEngineeringDir)coredistools.targets" Condition="$(CopyCoreDisToolsToCoreRoot)" />
<ItemGroup Condition="'$(DotNetBuildFromSource)' != 'true'">
<PackageReference Include="Microsoft.DiaSymReader.Native"
Version="$(MicrosoftDiaSymReaderNativeVersion)" />
</ItemGroup>
<ItemGroup Condition="'$(TargetsMobile)' != 'true' and '$(UseLiveBuiltDotNetHost)' != 'true'">
<PackageReference Include="Microsoft.NETCore.DotNetHost"
Version="$(MicrosoftNETCoreDotNetHostVersion)" />
<PackageReference Include="Microsoft.NETCore.DotNetHostPolicy"
Version="$(MicrosoftNETCoreDotNetHostPolicyVersion)" />
</ItemGroup>
<!-- Setup the testing shared framework host -->
<Target Name="SetupTestingHost"
AfterTargets="AfterResolveReferences"
Condition="'$(TestNativeAot)' != 'true'">
<PropertyGroup>
<UseHardlink>true</UseHardlink>
<!-- workaround core-setup problem for hardlinking dotnet executable to testhost: core-setup #4742 -->
<UseHardlink Condition="$(PackageRID.StartsWith('freebsd'))">false</UseHardlink>
</PropertyGroup>
<!-- We do not need apphost.exe.
Exclude here so that when building with the 2.x SDK we don't place it in the test shared framework. -->
<ItemGroup>
<ReferenceCopyLocalPaths Remove="@(ReferenceCopyLocalPaths)" Condition="'%(Filename)' == 'apphost'" />
</ItemGroup>
<ItemGroup Condition="'$(UseLiveBuiltDotNetHost)' != 'true'">
<HostFxFile Include="@(ReferenceCopyLocalPaths)" Condition="'%(ReferenceCopyLocalPaths.Filename)' == 'hostfxr' or
'%(ReferenceCopyLocalPaths.Filename)' == 'libhostfxr'" />
<DotnetExe Include="@(ReferenceCopyLocalPaths)" Condition="'%(ReferenceCopyLocalPaths.Filename)' == 'dotnet'" />
</ItemGroup>
<ItemGroup Condition="'$(UseLiveBuiltDotNetHost)' == 'true'">
<CoreHostFiles Include="$(DotNetHostBinDir)*" />
<HostFxFile Include="@(CoreHostFiles)" Condition="'%(CoreHostFiles.Filename)' == 'hostfxr' or
'%(CoreHostFiles.Filename)' == 'libhostfxr'" />
<HostPolicyFile Include="@(CoreHostFiles)" Condition="'%(CoreHostFiles.Filename)' == 'hostpolicy' or
'%(CoreHostFiles.Filename)' == 'libhostpolicy'" />
<DotnetExe Include="@(CoreHostFiles)" Condition="'%(CoreHostFiles.Filename)' == 'dotnet'" />
</ItemGroup>
<Copy SourceFiles="@(HostFxFile)"
DestinationFolder="$(NetCoreAppCurrentTestHostPath)host\fxr\$(ProductVersion)"
SkipUnchangedFiles="true"
UseHardlinksIfPossible="$(UseHardlink)" />
<Copy SourceFiles="@(HostPolicyFile)"
DestinationFolder="$(NetCoreAppCurrentTestHostPath)shared\Microsoft.NETCore.App\$(ProductVersion)"
SkipUnchangedFiles="true"
UseHardlinksIfPossible="$(UseHardlink)" />
<Copy SourceFiles="@(DotnetExe)"
DestinationFolder="$(NetCoreAppCurrentTestHostPath)"
SkipUnchangedFiles="true"
UseHardlinksIfPossible="$(UseHardlink)" />
<Copy Condition="$(CopyCoreDisToolsToCoreRoot)"
SourceFiles="$(CoreDisToolsLibrary)"
DestinationFolder="$(NetCoreAppCurrentTestHostPath)shared\Microsoft.NETCore.App\$(ProductVersion)"
SkipUnchangedFiles="true"
UseHardlinksIfPossible="$(UseHardlink)" />
<Exec Command="chmod +x $(NetCoreAppCurrentTestHostPath)%(DotnetExe.Filename)%(DotnetExe.Extension)" Condition="'$(TargetOS)' != 'windows' and '$(OS)' != 'Windows_NT'"/>
</Target>
<Target Name="OverrideRuntimeCoreCLR"
DependsOnTargets="ResolveRuntimeFilesFromLocalBuild"
AfterTargets="AfterResolveReferences"
Condition="'$(RuntimeFlavor)' != 'Mono' and '$(TestNativeAot)' != 'true'">
<ItemGroup>
<!-- CoreRun is not used for testing anymore, but we still use it for benchmarking and profiling -->
<RuntimeFiles Include="$(CoreCLRArtifactsPath)\corerun*" />
<RuntimeFiles Include="$(CoreCLRArtifactsPath)\PDB\corerun*" />
<ReferenceCopyLocalPaths Include="@(RuntimeFiles)" />
</ItemGroup>
<ItemGroup Condition="'$(SwapNativeForIL)' == 'true'">
<CoreCLRILFiles Include="$(CoreCLRArtifactsPath)\IL\*.*" />
<ReferenceCopyLocalPaths Remove="@(ReferenceCopyLocalPaths)" Condition="'@(CoreCLRILFiles->'%(FileName)%(Extension)')' == '%(FileName)%(Extension)'" />
<ReferenceCopyLocalPaths Remove="@(ReferenceCopyLocalPaths)" Condition="'@(CoreCLRILFiles->'%(FileName).ni%(Extension)')' == '%(FileName)%(Extension)'" />
<ReferenceCopyLocalPaths Include="@(CoreCLRILFiles)" />
</ItemGroup>
<Error Condition="'$(SwapNativeForIL)' == 'true' and '@(CoreCLRILFiles)' == ''" Text="Could not locate CoreCLR IL files." />
</Target>
<Target Name="OverrideRuntimeMono"
DependsOnTargets="ResolveRuntimeFilesFromLocalBuild"
AfterTargets="AfterResolveReferences"
Condition="'$(RuntimeFlavor)' == 'Mono'">
<ItemGroup>
<ReferenceCopyLocalPaths Include="@(RuntimeFiles)" />
<!-- Setup runtime pack native. -->
<ReferenceCopyLocalPaths Include="@(MonoCrossFiles)"
DestinationSubDirectory="cross/%(RecursiveDir)" />
<ReferenceCopyLocalPaths Include="@(MonoIncludeFiles)"
DestinationSubDirectory="include/%(RecursiveDir)" />
</ItemGroup>
</Target>
</Project>
| 1 |
dotnet/runtime | 65,924 | Fix libraries GCStress pipeline | Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| BruceForstall | "2022-02-27T04:30:37Z" | "2022-03-01T18:20:06Z" | dfe08b3bea9a1897562635307ab4bb70bc31fada | 79026a542a8364b6783c5d212bb1841ac2c873a9 | Fix libraries GCStress pipeline. Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| ./src/libraries/sendtohelixhelp.proj | <Project InitialTargets="PrintHelixQueues;PrintBuildTargetFramework;BuildHelixWorkItems" Sdk="Microsoft.DotNet.Helix.Sdk">
<!-- This project uses the Helix SDK, documented at
https://github.com/dotnet/arcade/tree/master/src/Microsoft.DotNet.Helix/Sdk,
to send test jobs to Helix.
-->
<PropertyGroup>
<WindowsShell Condition="'$(TargetOS)' == 'windows' or '$(BrowserHost)' == 'windows'">true</WindowsShell>
<!-- Set Helix build to build number if available -->
<HelixBuild Condition="'$(HelixBuild)' == ''">$(BUILD_BUILDNUMBER)</HelixBuild>
<HelixBuild Condition="'$(HelixBuild)' == ''">default</HelixBuild>
<HelixConfiguration>$(Configuration)</HelixConfiguration>
<HelixArchitecture>$(TargetArchitecture)</HelixArchitecture>
<BuildHelixWorkItemsDependsOn>BuildHelixCommand</BuildHelixWorkItemsDependsOn>
<EnableDefaultBuildHelixWorkItems>true</EnableDefaultBuildHelixWorkItems>
</PropertyGroup>
<Import Project="$(MSBuildThisFileDirectory)sendtohelix-wasm.targets" Condition="'$(TargetOS)' == 'Browser'" />
<Import Project="$(MSBuildThisFileDirectory)sendtohelix-mobile.targets" Condition="'$(TargetsMobile)' == 'true' and '$(TargetOS)' != 'Browser'" />
<PropertyGroup Condition="'$(_workItemTimeout)' == ''">
<!-- Normal jobs have a 30 minute timeout for arm/arm64, and 15 minute timeout otherwise.
Stress modes can take considerably longer, so double those numbers. And GCStress is even slower.
-->
<_workItemTimeout Condition="
'$(Scenario)' == 'gcstress0x3' or
'$(Scenario)' == 'gcstress0xc' or
'$(Scenario)' == 'heapverify1' or
'$(Scenario)' == 'gcstress0xc_zapdisable' or
'$(Scenario)' == 'gcstress0xc_zapdisable_jitstress2' or
'$(Scenario)' == 'gcstress0xc_zapdisable_heapverify1' or
'$(Scenario)' == 'gcstress0xc_jitstress1' or
'$(Scenario)' == 'gcstress0xc_jitstress2' or
'$(Scenario)' == 'gcstress0xc_jitminopts_heapverify1'">01:30:00</_workItemTimeout>
<_workItemTimeout Condition="'$(_workItemTimeout)' == '' and ('$(TargetOS)' == 'iOSSimulator' or '$(TargetOS)' == 'tvOSSimulator' or '$(TargetOS)' == 'MacCatalyst' or '$(TargetOS)' == 'Android')">00:30:00</_workItemTimeout>
<_workItemTimeout Condition="'$(_workItemTimeout)' == '' and ('$(TargetOS)' == 'iOS' or '$(TargetOS)' == 'tvOS')">00:45:00</_workItemTimeout>
<_workItemTimeout Condition="'$(Scenario)' == '' and '$(_workItemTimeout)' == '' and ('$(TargetArchitecture)' == 'arm64' or '$(TargetArchitecture)' == 'arm')">00:45:00</_workItemTimeout>
<_workItemTimeout Condition="'$(Scenario)' != '' and '$(_workItemTimeout)' == '' and ('$(TargetArchitecture)' == 'arm64' or '$(TargetArchitecture)' == 'arm')">01:00:00</_workItemTimeout>
<_workItemTimeout Condition="'$(Scenario)' == '' and '$(_workItemTimeout)' == '' and '$(Outerloop)' == 'true'">00:20:00</_workItemTimeout>
<_workItemTimeout Condition="'$(Scenario)' == '' and '$(_workItemTimeout)' == ''">00:15:00</_workItemTimeout>
<_workItemTimeout Condition="'$(Scenario)' != '' and '$(_workItemTimeout)' == ''">00:30:00</_workItemTimeout>
</PropertyGroup>
<PropertyGroup>
<!-- The Helix runtime payload and the tests to run -->
<!-- TestArchiveRuntimeFile will be passed as a property by the calling project -->
<HelixCorrelationPayload Condition="'$(HelixCorrelationPayload)' == ''">$(TestArchiveRuntimeFile)</HelixCorrelationPayload>
<WorkItemArchiveWildCard Condition="'$(WorkItemArchiveWildCard)' == ''">$(TestArchiveTestsRoot)**/*.zip</WorkItemArchiveWildCard>
<!-- This property is used to show the tests results in Azure Dev Ops. By setting this property the
test run name will be displayed as $(BuildTargetFramework)-$(TargetOS)-$(Configuration)-$(TargetArchitecture)-$(HelixTargetQueue)
In the multi-scenario case, we append the scenario name to this test name prefix to distinguish the different scenario results.
-->
<TestRunNamePrefix>$(BuildTargetFramework)-$(TargetOS)-$(Configuration)-$(TargetArchitecture)-</TestRunNamePrefix>
<TestRunNamePrefix Condition="'$(TestRunNamePrefixSuffix)' != ''">$(TestRunNamePrefix)$(TestRunNamePrefixSuffix)-</TestRunNamePrefix>
<TestRunNamePrefix Condition="'$(Scenario)' != ''">$(TestRunNamePrefix)$(Scenario)-</TestRunNamePrefix>
<FailOnTestFailure Condition="'$(FailOnTestFailure)' == '' and '$(WaitForWorkItemCompletion)' != ''">$(WaitForWorkItemCompletion)</FailOnTestFailure>
<SdkForWorkloadTestingDirName Condition="'$(SdkForWorkloadTestingDirName)' == '' and '$(NeedsWorkload)' == 'true' and '$(TestUsingWorkloads)' == 'true'">dotnet-workload</SdkForWorkloadTestingDirName>
<SdkForWorkloadTestingDirName Condition="'$(SdkForWorkloadTestingDirName)' == '' and '$(NeedsWorkload)' == 'true' and '$(TestUsingWorkloads)' != 'true'">sdk-no-workload</SdkForWorkloadTestingDirName>
</PropertyGroup>
<PropertyGroup Condition="'$(HelixType)' == ''">
<!-- For PRs we want HelixType to be the same for all frameworks except package testing-->
<TestScope Condition="'$(TestScope)' == ''">innerloop</TestScope>
<HelixType>test/functional/cli/$(TestScope)/</HelixType>
</PropertyGroup>
<PropertyGroup Condition="'$(TargetOS)' == 'Browser' or '$(TargetOS)' == 'Android' or '$(TargetOS)' == 'iOS' or '$(TargetOS)' == 'iOSSimulator' or '$(TargetOS)' == 'tvOS' or '$(TargetOS)' == 'tvOSSimulator' or '$(TargetOS)' == 'MacCatalyst'">
<IncludeXHarnessCli>true</IncludeXHarnessCli>
<EnableXHarnessTelemetry>true</EnableXHarnessTelemetry>
</PropertyGroup>
<ItemGroup Condition="'$(MonoEnvOptions)' != ''">
<HelixPreCommand Condition="'$(WindowsShell)' == 'true'" Include="set MONO_ENV_OPTIONS='$(MonoEnvOptions)'" />
<HelixPreCommand Condition="'$(WindowsShell)' != 'true'" Include="export MONO_ENV_OPTIONS='$(MonoEnvOptions)'" />
</ItemGroup>
<ItemGroup Condition="'$(WindowsShell)' == 'true'">
<HelixPreCommand Include="taskkill.exe /f /im corerun.exe"/>
<HelixPostCommand Include="taskkill.exe /f /im corerun.exe"/>
</ItemGroup>
<PropertyGroup Condition="'$(NeedsWorkload)' == 'true'">
<NeedsDotNetSdk>false</NeedsDotNetSdk>
<IncludeXHarnessCli>true</IncludeXHarnessCli>
<EnableXHarnessTelemetry>true</EnableXHarnessTelemetry>
</PropertyGroup>
<PropertyGroup>
<!-- Set the name of the scenario file. Note that this is only used in invocations where $(Scenario) is set
(which is when this project is invoked to call the "CreateTestEnvFile" target).
-->
<TestEnvFileName></TestEnvFileName>
<TestEnvFileName Condition=" '$(Scenario)' != '' and '$(TargetOS)' == 'windows'">SetStressModes_$(Scenario).cmd</TestEnvFileName>
<TestEnvFileName Condition=" '$(Scenario)' != '' and '$(TargetOS)' != 'windows' and '$(TargetOS)' != 'Browser'">SetStressModes_$(Scenario).sh</TestEnvFileName>
</PropertyGroup>
<!-- HelixPreCommands is a set of commands run before the work item command. We use it here to inject
setting up the per-scenario environment.
-->
<ItemGroup Condition=" '$(TestEnvFileName)' != '' and '$(TargetOS)' == 'windows' ">
<HelixPreCommand Include="set __TestEnv=%HELIX_CORRELATION_PAYLOAD%\$(TestEnvFileName)" />
<HelixPreCommand Include="type %__TestEnv%" />
<HelixPreCommand Include="call %__TestEnv%" />
<!-- Display the interesting COMPlus variables that are set in the environment -->
<HelixPreCommand Include="set COMPlus" />
</ItemGroup>
<ItemGroup Condition=" '$(TestEnvFileName)' != '' and '$(TargetOS)' != 'windows' ">
<HelixPreCommand Include="export __TestEnv=$HELIX_CORRELATION_PAYLOAD/$(TestEnvFileName)" />
<HelixPreCommand Include="cat $__TestEnv" />
<HelixPreCommand Include=". $__TestEnv" /> <!-- Use "." not "source"; some clients appear to run scripts with "sh" not "bash" -->
<!-- Display the interesting COMPlus variables that are set in the environment -->
<HelixPreCommand Include="printenv | grep COMPlus" />
</ItemGroup>
<ItemGroup Condition="'$(NeedsWorkload)' == 'true'">
<HelixCommandPrefixItem Condition="'$(WindowsShell)' != 'true'" Include="PATH=$HELIX_CORRELATION_PAYLOAD/$(SdkForWorkloadTestingDirName):$PATH" />
<HelixCommandPrefixItem Condition="'$(WindowsShell)' == 'true'" Include="PATH=%HELIX_CORRELATION_PAYLOAD%\$(SdkForWorkloadTestingDirName)%3B%PATH%" />
<HelixCommandPrefixItem Condition="'$(WindowsShell)' != 'true'" Include="DOTNET_CLI_HOME=$HELIX_CORRELATION_PAYLOAD/$(SdkForWorkloadTestingDirName)" />
<!--<HelixCommandPrefixItem Condition="'$(WindowsShell)' == 'true'" Include="DOTNET_CLI_HOME=%HELIX_CORRELATION_PAYLOAD%\$(SdkForWorkloadTestingDirName)" />-->
<HelixCommandPrefixItem Condition="'$(WindowsShell)' != 'true'" Include="DOTNET_ROOT=$HELIX_CORRELATION_PAYLOAD/$(SdkForWorkloadTestingDirName)" />
<!--<HelixCommandPrefixItem Condition="'$(WindowsShell)' == 'true'" Include="DOTNET_ROOT=%HELIX_CORRELATION_PAYLOAD%\$(SdkForWorkloadTestingDirName)" />-->
<HelixCommandPrefixItem Condition="'$(WindowsShell)' != 'true'" Include="SDK_FOR_WORKLOAD_TESTING_PATH=%24{HELIX_CORRELATION_PAYLOAD}/$(SdkForWorkloadTestingDirName)" />
<HelixCommandPrefixItem Condition="'$(WindowsShell)' == 'true'" Include="SDK_FOR_WORKLOAD_TESTING_PATH=%HELIX_CORRELATION_PAYLOAD%\$(SdkForWorkloadTestingDirName)" />
<HelixCommandPrefixItem Include="DOTNET_CLI_TELEMETRY_OPTOUT=1" />
<HelixCommandPrefixItem Condition="'$(TestUsingWorkloads)' == 'true'" Include="TEST_USING_WORKLOADS=true" />
</ItemGroup>
<PropertyGroup Condition="'$(NeedsDotNetSdk)' == 'true'">
<IncludeDotNetCli>true</IncludeDotNetCli>
<DotNetCliPackageType>sdk</DotNetCliPackageType>
</PropertyGroup>
<PropertyGroup Condition="'$(UseDotNetCliVersionFromGlobalJson)' == 'true'">
<GlobalJsonContent>$([System.IO.File]::ReadAllText('$(RepoRoot)global.json'))</GlobalJsonContent>
<DotNetCliVersion>$([System.Text.RegularExpressions.Regex]::Match($(GlobalJsonContent), '(%3F<="dotnet": ").*(%3F=")'))</DotNetCliVersion>
</PropertyGroup>
<ItemGroup>
<HelixProperties Condition="'$(RuntimeFlavor)' != ''" Include="runtimeFlavor" Value="$(RuntimeFlavor)" />
<HelixProperties Condition="'$(Scenario)' != ''" Include="scenario" Value="$(Scenario)" />
</ItemGroup>
<!-- Ensure that all HelixPreCommand items are ready before this -->
<Target Name="BuildHelixCommand">
<PropertyGroup>
<HelixPreCommands>@(HelixPreCommand)</HelixPreCommands>
<HelixCommandPrefix Condition="'$(WindowsShell)' == 'true' and @(HelixCommandPrefixItem->Count()) > 0" >$(HelixCommandPrefix) @(HelixCommandPrefixItem -> 'set "%(Identity)"', ' & ')</HelixCommandPrefix>
<HelixCommandPrefix Condition="'$(WindowsShell)' != 'true' and @(HelixCommandPrefixItem->Count()) > 0 ">$(HelixCommandPrefix) @(HelixCommandPrefixItem, ' ')</HelixCommandPrefix>
<IncludeHelixCorrelationPayload Condition="'$(IncludeHelixCorrelationPayload)' == '' and '$(HelixCorrelationPayload)' != ''">true</IncludeHelixCorrelationPayload>
</PropertyGroup>
<PropertyGroup Condition="'$(HelixCommand)' == ''">
<HelixCommand Condition="'$(HelixCommandPrefix)' != '' and '$(WindowsShell)' != 'true'">$(HelixCommandPrefix) </HelixCommand>
<HelixCommand Condition="'$(HelixCommandPrefix)' != '' and '$(WindowsShell)' == 'true'">$(HelixCommandPrefix) & </HelixCommand>
<HelixCommand Condition="'$(InstallDevCerts)' == 'true' and '$(WindowsShell)' != 'true'">$(HelixCommand) dotnet dev-certs https && </HelixCommand>
<!-- on windows `dotnet dev-certs https shows a dialog, so instead install the certificate with powershell -->
<HelixCommand Condition="'$(InstallDevCerts)' == 'true' and '$(WindowsShell)' == 'true'">$(HelixCommand) powershell -command "New-SelfSignedCertificate -FriendlyName 'ASP.NET Core HTTPS development certificate' -DnsName @('localhost') -Subject 'CN = localhost' -KeyAlgorithm RSA -KeyLength 2048 -HashAlgorithm sha256 -CertStoreLocation 'Cert:\CurrentUser\My' -TextExtension @('2.5.29.37={text}1.3.6.1.5.5.7.3.1','1.3.6.1.4.1.311.84.1.1={hex}02','2.5.29.19={text}') -KeyUsage DigitalSignature,KeyEncipherment" && </HelixCommand>
<!--
For Windows we need to use "call", since the command is going to be called from a batch script created by Helix.
We "exit /b" at the end of RunTests.cmd. Helix runs some other commands after ours within the batch script,
so if we don't use "call", then we cause the parent script to exit, and anything after will not be executed.
-->
<HelixCommand Condition="'$(WindowsShell)' == 'true'">$(HelixCommand)call RunTests.cmd</HelixCommand>
<HelixCommand Condition="'$(WindowsShell)' == 'true' and '$(IncludeHelixCorrelationPayload)' == 'true'">$(HelixCommand) --runtime-path %HELIX_CORRELATION_PAYLOAD%</HelixCommand>
<HelixCommand Condition="'$(WindowsShell)' != 'true'">$(HelixCommand)./RunTests.sh</HelixCommand>
<HelixCommand Condition="'$(WindowsShell)' != 'true' and '$(IncludeHelixCorrelationPayload)' == 'true'">$(HelixCommand) --runtime-path "$HELIX_CORRELATION_PAYLOAD"</HelixCommand>
</PropertyGroup>
<!-- FIXME: is this used? -->
<PropertyGroup Condition="'$(RuntimeFlavor)' == 'Mono'">
<_MonoAotCrossCompilerPath>$([MSBuild]::NormalizePath($(MonoAotCrossDir), 'mono-aot-cross'))</_MonoAotCrossCompilerPath>
<_MonoAotCrossCompilerPath Condition="$([MSBuild]::IsOSPlatform('WINDOWS'))">$(_MonoAotCrossCompilerPath).exe</_MonoAotCrossCompilerPath>
</PropertyGroup>
<ItemGroup Condition="'$(RuntimeFlavor)' == 'Mono'">
<MonoAotCrossCompiler Include="$(_MonoAotCrossCompilerPath)" RuntimeIdentifier="$(TargetOS.ToLowerInvariant())-$(TargetArchitecture.ToLowerInvariant())" />
</ItemGroup>
</Target>
<!--
Create all the Helix data to start a set of jobs. Create a set of work items, one for each libraries
test assembly. All will have the same command line. Note that this target is listed in the
InitialTargets for this Project. This causes it to be invoked (and the Helix data created,
such as the HelixWorkItem item group) before Helix "Test" target is invoked (as a normal target).
-->
<Target Name="BuildHelixWorkItems" DependsOnTargets="$(BuildHelixWorkItemsDependsOn)">
<Message Condition="'$(Scenario)' == ''" Importance="High" Text="Building Helix work items" />
<Message Condition="'$(Scenario)' != ''" Importance="High" Text="Building Helix work items for scenario $(Scenario)" />
<Message Importance="High" Text="Using TestRunNamePrefix: $(TestRunNamePrefix)" />
<Message Condition="'$(HelixCorrelationPayload)' != ''" Importance="High" Text="Using HelixCorrelationPayload: $(HelixCorrelationPayload)" />
<Message Importance="High" Text="Using HelixCommand: $(HelixCommand)" />
<Message Importance="High" Text="Using HelixType: $(HelixType)" />
<Message Importance="High" Text="Using WorkItemArchiveWildCard: $(WorkItemArchiveWildCard)" />
<Message Importance="High" Text="Using Timeout: $(_workItemTimeout)" />
<PropertyGroup Condition="'$(RuntimeFlavor)' == 'CoreCLR' and '$(BUILD_BUILDID)' != ''">
<HelixPostCommands Condition="'$(TargetOS)' == 'windows'">
$(HelixPostCommands);
%HELIX_PYTHONPATH% %HELIX_CORRELATION_PAYLOAD%\gen-debug-dump-docs.py -buildid $(BUILD_BUILDID) -workitem %HELIX_WORKITEM_FRIENDLYNAME% -jobid %HELIX_CORRELATION_ID% -outdir %HELIX_WORKITEM_UPLOAD_ROOT% -templatedir %HELIX_CORRELATION_PAYLOAD% -dumpdir %HELIX_DUMP_FOLDER% -productver $(ProductVersion)
</HelixPostCommands>
<HelixPostCommands Condition="'$(TargetOS)' != 'windows'">
$(HelixPostCommands);
$HELIX_PYTHONPATH $HELIX_CORRELATION_PAYLOAD/gen-debug-dump-docs.py -buildid $(BUILD_BUILDID) -workitem $HELIX_WORKITEM_FRIENDLYNAME -jobid $HELIX_CORRELATION_ID -outdir $HELIX_WORKITEM_UPLOAD_ROOT -templatedir $HELIX_CORRELATION_PAYLOAD -dumpdir $HELIX_DUMP_FOLDER -productver $(ProductVersion)
</HelixPostCommands>
</PropertyGroup>
<Error Condition="'$(NeedsWorkload)' == 'true' and '$(TestUsingWorkloads)' == 'true' and ('$(SdkWithWorkloadForTestingPath)' == '' or !Exists($(SdkWithWorkloadForTestingPath)))"
Text="Could not find workload at %24(SdkWithWorkloadForTestingPath)=$(SdkWithWorkloadForTestingPath)" />
<Error Condition="'$(NeedsWorkload)' == 'true' and '$(TestUsingWorkloads)' != 'true' and ('$(SdkWithNoWorkloadForTestingPath)' == '' or !Exists($(SdkWithNoWorkloadForTestingPath)))"
Text="Could not find workload at %24(SdkWithNoWorkloadForTestingPath)=$(SdkWithNoWorkloadForTestingPath)" />
<ItemGroup Condition="'$(NeedsWorkload)' == 'true'">
<HelixCorrelationPayload Include="$(SdkWithWorkloadForTestingPath)" Destination="$(SdkForWorkloadTestingDirName)" Condition="'$(TestUsingWorkloads)' == 'true'" />
<HelixCorrelationPayload Include="$(SdkWithNoWorkloadForTestingPath)" Destination="$(SdkForWorkloadTestingDirName)" Condition="'$(TestUsingWorkloads)' != 'true'" />
<HelixCorrelationPayload Include="$(MicrosoftNetCoreAppRefPackDir)" Destination="microsoft.netcore.app.ref" />
</ItemGroup>
<ItemGroup Condition="'$(EnableDefaultBuildHelixWorkItems)' == 'true'">
<HelixCorrelationPayload Include="$(HelixCorrelationPayload)"
Condition="'$(IncludeHelixCorrelationPayload)' == 'true'"
AsArchive="$(HelixCorrelationPayload.EndsWith('.zip'))" />
<_DefaultWorkItems Include="$(WorkItemArchiveWildCard)" Exclude="$(HelixCorrelationPayload)" />
<HelixWorkItem Include="@(_DefaultWorkItems -> '$(WorkItemPrefix)%(FileName)')">
<PayloadArchive>%(Identity)</PayloadArchive>
<Command>$(HelixCommand)</Command>
<Timeout>$(_workItemTimeout)</Timeout>
</HelixWorkItem>
</ItemGroup>
<Message Condition="'$(Scenario)' != ''" Importance="High" Text="Done building Helix work items for scenario $(Scenario). Work item count: @(HelixWorkItem->Count())" />
<Message Condition="'$(Scenario)' == '' and '$(TargetOS)' != 'Android' and '$(TargetOS)' != 'iOS' and '$(TargetOS)' != 'iOSSimulator' and '$(TargetOS)' != 'tvOS' and '$(TargetOS)' != 'tvOSSimulator' and '$(TargetOS)' != 'MacCatalyst'" Importance="High" Text="Done building Helix work items. Work item count: @(HelixWorkItem->Count())" />
<Message Text="HelixCorrelationPayload: %(HelixCorrelationPayload.Identity)" Condition="'$(HelixDryRun)' == 'true'" Importance="High" />
<Message Text="HelixWorkItem: %(HelixWorkItem.Identity), Command: %(HelixWorkItem.Command), PreCommands: %(HelixWorkItem.PreCommands) with PayloadArchive: %(HelixWorkItem.PayloadArchive)" Condition="'$(HelixDryRun)' == 'true'" Importance="High" />
<Error Condition="@(XHarnessApkToTest->Count()) == 0 and @(XHarnessAppBundleToTest->Count()) == 0 and @(HelixWorkItem->Count()) == 0"
Text="No helix work items, or APKs, or AppBundles found to test" />
<Error Condition="'%(HelixWorkItem.Identity)' != '' and ('%(HelixWorkItem.PayloadArchive)' == '' or !Exists(%(HelixWorkItem.PayloadArchive)))"
Text="Missing PayloadArchive for @(HelixWorkItem)" />
<Error Text="Stopping the build for dry run" Condition="'$(HelixDryRun)' == 'true'" />
</Target>
<Target Name="PrintHelixQueues">
<Message Importance="High" Text="Using Queues: $(HelixTargetQueues)" />
<Message Condition="'$(Scenario)' == 'BuildWasmApps'" Importance="High"
Text="Scenario: $(Scenario), TestUsingWorkloads: $(TestUsingWorkloads)" />
</Target>
<Target Name="PrintBuildTargetFramework">
<Message Importance="High" Text="Build TargetFramework: $(BuildTargetFramework)" />
</Target>
</Project>
| <Project InitialTargets="PrintHelixQueues;PrintBuildTargetFramework;BuildHelixWorkItems" Sdk="Microsoft.DotNet.Helix.Sdk">
<!-- This project uses the Helix SDK, documented at
https://github.com/dotnet/arcade/tree/master/src/Microsoft.DotNet.Helix/Sdk,
to send test jobs to Helix.
-->
<PropertyGroup>
<WindowsShell Condition="'$(TargetOS)' == 'windows' or '$(BrowserHost)' == 'windows'">true</WindowsShell>
<!-- Set Helix build to build number if available -->
<HelixBuild Condition="'$(HelixBuild)' == ''">$(BUILD_BUILDNUMBER)</HelixBuild>
<HelixBuild Condition="'$(HelixBuild)' == ''">default</HelixBuild>
<HelixConfiguration>$(Configuration)</HelixConfiguration>
<HelixArchitecture>$(TargetArchitecture)</HelixArchitecture>
<BuildHelixWorkItemsDependsOn>BuildHelixCommand</BuildHelixWorkItemsDependsOn>
<EnableDefaultBuildHelixWorkItems>true</EnableDefaultBuildHelixWorkItems>
</PropertyGroup>
<Import Project="$(MSBuildThisFileDirectory)sendtohelix-wasm.targets" Condition="'$(TargetOS)' == 'Browser'" />
<Import Project="$(MSBuildThisFileDirectory)sendtohelix-mobile.targets" Condition="'$(TargetsMobile)' == 'true' and '$(TargetOS)' != 'Browser'" />
<PropertyGroup Condition="'$(_workItemTimeout)' == ''">
<!-- Normal jobs have a 30 minute timeout for arm/arm64, and 15 minute timeout otherwise.
Stress modes can take considerably longer, so double those numbers. And GCStress is even slower.
-->
<_workItemTimeout Condition="
'$(Scenario)' == 'gcstress0x3' or
'$(Scenario)' == 'gcstress0xc' or
'$(Scenario)' == 'heapverify1' or
'$(Scenario)' == 'gcstress0xc_zapdisable' or
'$(Scenario)' == 'gcstress0xc_zapdisable_jitstress2' or
'$(Scenario)' == 'gcstress0xc_zapdisable_heapverify1' or
'$(Scenario)' == 'gcstress0xc_jitstress1' or
'$(Scenario)' == 'gcstress0xc_jitstress2' or
'$(Scenario)' == 'gcstress0xc_jitminopts_heapverify1'">06:00:00</_workItemTimeout>
<_workItemTimeout Condition="'$(_workItemTimeout)' == '' and ('$(TargetOS)' == 'iOSSimulator' or '$(TargetOS)' == 'tvOSSimulator' or '$(TargetOS)' == 'MacCatalyst' or '$(TargetOS)' == 'Android')">00:30:00</_workItemTimeout>
<_workItemTimeout Condition="'$(_workItemTimeout)' == '' and ('$(TargetOS)' == 'iOS' or '$(TargetOS)' == 'tvOS')">00:45:00</_workItemTimeout>
<_workItemTimeout Condition="'$(Scenario)' == '' and '$(_workItemTimeout)' == '' and ('$(TargetArchitecture)' == 'arm64' or '$(TargetArchitecture)' == 'arm')">00:45:00</_workItemTimeout>
<_workItemTimeout Condition="'$(Scenario)' != '' and '$(_workItemTimeout)' == '' and ('$(TargetArchitecture)' == 'arm64' or '$(TargetArchitecture)' == 'arm')">01:00:00</_workItemTimeout>
<_workItemTimeout Condition="'$(Scenario)' == '' and '$(_workItemTimeout)' == '' and '$(Outerloop)' == 'true'">00:20:00</_workItemTimeout>
<_workItemTimeout Condition="'$(Scenario)' == '' and '$(_workItemTimeout)' == ''">00:15:00</_workItemTimeout>
<_workItemTimeout Condition="'$(Scenario)' != '' and '$(_workItemTimeout)' == ''">00:30:00</_workItemTimeout>
</PropertyGroup>
<PropertyGroup>
<!-- The Helix runtime payload and the tests to run -->
<!-- TestArchiveRuntimeFile will be passed as a property by the calling project -->
<HelixCorrelationPayload Condition="'$(HelixCorrelationPayload)' == ''">$(TestArchiveRuntimeFile)</HelixCorrelationPayload>
<WorkItemArchiveWildCard Condition="'$(WorkItemArchiveWildCard)' == ''">$(TestArchiveTestsRoot)**/*.zip</WorkItemArchiveWildCard>
<!-- This property is used to show the tests results in Azure Dev Ops. By setting this property the
test run name will be displayed as $(BuildTargetFramework)-$(TargetOS)-$(Configuration)-$(TargetArchitecture)-$(HelixTargetQueue)
In the multi-scenario case, we append the scenario name to this test name prefix to distinguish the different scenario results.
-->
<TestRunNamePrefix>$(BuildTargetFramework)-$(TargetOS)-$(Configuration)-$(TargetArchitecture)-</TestRunNamePrefix>
<TestRunNamePrefix Condition="'$(TestRunNamePrefixSuffix)' != ''">$(TestRunNamePrefix)$(TestRunNamePrefixSuffix)-</TestRunNamePrefix>
<TestRunNamePrefix Condition="'$(Scenario)' != ''">$(TestRunNamePrefix)$(Scenario)-</TestRunNamePrefix>
<FailOnTestFailure Condition="'$(FailOnTestFailure)' == '' and '$(WaitForWorkItemCompletion)' != ''">$(WaitForWorkItemCompletion)</FailOnTestFailure>
<SdkForWorkloadTestingDirName Condition="'$(SdkForWorkloadTestingDirName)' == '' and '$(NeedsWorkload)' == 'true' and '$(TestUsingWorkloads)' == 'true'">dotnet-workload</SdkForWorkloadTestingDirName>
<SdkForWorkloadTestingDirName Condition="'$(SdkForWorkloadTestingDirName)' == '' and '$(NeedsWorkload)' == 'true' and '$(TestUsingWorkloads)' != 'true'">sdk-no-workload</SdkForWorkloadTestingDirName>
</PropertyGroup>
<PropertyGroup Condition="'$(HelixType)' == ''">
<!-- For PRs we want HelixType to be the same for all frameworks except package testing-->
<TestScope Condition="'$(TestScope)' == ''">innerloop</TestScope>
<HelixType>test/functional/cli/$(TestScope)/</HelixType>
</PropertyGroup>
<PropertyGroup Condition="'$(TargetOS)' == 'Browser' or '$(TargetOS)' == 'Android' or '$(TargetOS)' == 'iOS' or '$(TargetOS)' == 'iOSSimulator' or '$(TargetOS)' == 'tvOS' or '$(TargetOS)' == 'tvOSSimulator' or '$(TargetOS)' == 'MacCatalyst'">
<IncludeXHarnessCli>true</IncludeXHarnessCli>
<EnableXHarnessTelemetry>true</EnableXHarnessTelemetry>
</PropertyGroup>
<ItemGroup Condition="'$(MonoEnvOptions)' != ''">
<HelixPreCommand Condition="'$(WindowsShell)' == 'true'" Include="set MONO_ENV_OPTIONS='$(MonoEnvOptions)'" />
<HelixPreCommand Condition="'$(WindowsShell)' != 'true'" Include="export MONO_ENV_OPTIONS='$(MonoEnvOptions)'" />
</ItemGroup>
<ItemGroup Condition="'$(WindowsShell)' == 'true'">
<HelixPreCommand Include="taskkill.exe /f /im corerun.exe"/>
<HelixPostCommand Include="taskkill.exe /f /im corerun.exe"/>
</ItemGroup>
<PropertyGroup Condition="'$(NeedsWorkload)' == 'true'">
<NeedsDotNetSdk>false</NeedsDotNetSdk>
<IncludeXHarnessCli>true</IncludeXHarnessCli>
<EnableXHarnessTelemetry>true</EnableXHarnessTelemetry>
</PropertyGroup>
<PropertyGroup>
<!-- Set the name of the scenario file. Note that this is only used in invocations where $(Scenario) is set
(which is when this project is invoked to call the "CreateTestEnvFile" target).
-->
<TestEnvFileName></TestEnvFileName>
<TestEnvFileName Condition=" '$(Scenario)' != '' and '$(TargetOS)' == 'windows'">SetStressModes_$(Scenario).cmd</TestEnvFileName>
<TestEnvFileName Condition=" '$(Scenario)' != '' and '$(TargetOS)' != 'windows' and '$(TargetOS)' != 'Browser'">SetStressModes_$(Scenario).sh</TestEnvFileName>
</PropertyGroup>
<!-- HelixPreCommands is a set of commands run before the work item command. We use it here to inject
setting up the per-scenario environment.
-->
<ItemGroup Condition=" '$(TestEnvFileName)' != '' and '$(TargetOS)' == 'windows' ">
<HelixPreCommand Include="set __TestEnv=%HELIX_CORRELATION_PAYLOAD%\$(TestEnvFileName)" />
<HelixPreCommand Include="type %__TestEnv%" />
<HelixPreCommand Include="call %__TestEnv%" />
<!-- Display the interesting COMPlus variables that are set in the environment -->
<HelixPreCommand Include="set COMPlus" />
</ItemGroup>
<ItemGroup Condition=" '$(TestEnvFileName)' != '' and '$(TargetOS)' != 'windows' ">
<HelixPreCommand Include="export __TestEnv=$HELIX_CORRELATION_PAYLOAD/$(TestEnvFileName)" />
<HelixPreCommand Include="cat $__TestEnv" />
<HelixPreCommand Include=". $__TestEnv" /> <!-- Use "." not "source"; some clients appear to run scripts with "sh" not "bash" -->
<!-- Display the interesting COMPlus variables that are set in the environment -->
<HelixPreCommand Include="printenv | grep COMPlus" />
</ItemGroup>
<ItemGroup Condition="'$(NeedsWorkload)' == 'true'">
<HelixCommandPrefixItem Condition="'$(WindowsShell)' != 'true'" Include="PATH=$HELIX_CORRELATION_PAYLOAD/$(SdkForWorkloadTestingDirName):$PATH" />
<HelixCommandPrefixItem Condition="'$(WindowsShell)' == 'true'" Include="PATH=%HELIX_CORRELATION_PAYLOAD%\$(SdkForWorkloadTestingDirName)%3B%PATH%" />
<HelixCommandPrefixItem Condition="'$(WindowsShell)' != 'true'" Include="DOTNET_CLI_HOME=$HELIX_CORRELATION_PAYLOAD/$(SdkForWorkloadTestingDirName)" />
<!--<HelixCommandPrefixItem Condition="'$(WindowsShell)' == 'true'" Include="DOTNET_CLI_HOME=%HELIX_CORRELATION_PAYLOAD%\$(SdkForWorkloadTestingDirName)" />-->
<HelixCommandPrefixItem Condition="'$(WindowsShell)' != 'true'" Include="DOTNET_ROOT=$HELIX_CORRELATION_PAYLOAD/$(SdkForWorkloadTestingDirName)" />
<!--<HelixCommandPrefixItem Condition="'$(WindowsShell)' == 'true'" Include="DOTNET_ROOT=%HELIX_CORRELATION_PAYLOAD%\$(SdkForWorkloadTestingDirName)" />-->
<HelixCommandPrefixItem Condition="'$(WindowsShell)' != 'true'" Include="SDK_FOR_WORKLOAD_TESTING_PATH=%24{HELIX_CORRELATION_PAYLOAD}/$(SdkForWorkloadTestingDirName)" />
<HelixCommandPrefixItem Condition="'$(WindowsShell)' == 'true'" Include="SDK_FOR_WORKLOAD_TESTING_PATH=%HELIX_CORRELATION_PAYLOAD%\$(SdkForWorkloadTestingDirName)" />
<HelixCommandPrefixItem Include="DOTNET_CLI_TELEMETRY_OPTOUT=1" />
<HelixCommandPrefixItem Condition="'$(TestUsingWorkloads)' == 'true'" Include="TEST_USING_WORKLOADS=true" />
</ItemGroup>
<PropertyGroup Condition="'$(NeedsDotNetSdk)' == 'true'">
<IncludeDotNetCli>true</IncludeDotNetCli>
<DotNetCliPackageType>sdk</DotNetCliPackageType>
</PropertyGroup>
<PropertyGroup Condition="'$(UseDotNetCliVersionFromGlobalJson)' == 'true'">
<GlobalJsonContent>$([System.IO.File]::ReadAllText('$(RepoRoot)global.json'))</GlobalJsonContent>
<DotNetCliVersion>$([System.Text.RegularExpressions.Regex]::Match($(GlobalJsonContent), '(%3F<="dotnet": ").*(%3F=")'))</DotNetCliVersion>
</PropertyGroup>
<ItemGroup>
<HelixProperties Condition="'$(RuntimeFlavor)' != ''" Include="runtimeFlavor" Value="$(RuntimeFlavor)" />
<HelixProperties Condition="'$(Scenario)' != ''" Include="scenario" Value="$(Scenario)" />
</ItemGroup>
<!-- Ensure that all HelixPreCommand items are ready before this -->
<Target Name="BuildHelixCommand">
<PropertyGroup>
<HelixPreCommands>@(HelixPreCommand)</HelixPreCommands>
<HelixCommandPrefix Condition="'$(WindowsShell)' == 'true' and @(HelixCommandPrefixItem->Count()) > 0" >$(HelixCommandPrefix) @(HelixCommandPrefixItem -> 'set "%(Identity)"', ' & ')</HelixCommandPrefix>
<HelixCommandPrefix Condition="'$(WindowsShell)' != 'true' and @(HelixCommandPrefixItem->Count()) > 0 ">$(HelixCommandPrefix) @(HelixCommandPrefixItem, ' ')</HelixCommandPrefix>
<IncludeHelixCorrelationPayload Condition="'$(IncludeHelixCorrelationPayload)' == '' and '$(HelixCorrelationPayload)' != ''">true</IncludeHelixCorrelationPayload>
</PropertyGroup>
<PropertyGroup Condition="'$(HelixCommand)' == ''">
<HelixCommand Condition="'$(HelixCommandPrefix)' != '' and '$(WindowsShell)' != 'true'">$(HelixCommandPrefix) </HelixCommand>
<HelixCommand Condition="'$(HelixCommandPrefix)' != '' and '$(WindowsShell)' == 'true'">$(HelixCommandPrefix) & </HelixCommand>
<HelixCommand Condition="'$(InstallDevCerts)' == 'true' and '$(WindowsShell)' != 'true'">$(HelixCommand) dotnet dev-certs https && </HelixCommand>
<!-- on windows `dotnet dev-certs https shows a dialog, so instead install the certificate with powershell -->
<HelixCommand Condition="'$(InstallDevCerts)' == 'true' and '$(WindowsShell)' == 'true'">$(HelixCommand) powershell -command "New-SelfSignedCertificate -FriendlyName 'ASP.NET Core HTTPS development certificate' -DnsName @('localhost') -Subject 'CN = localhost' -KeyAlgorithm RSA -KeyLength 2048 -HashAlgorithm sha256 -CertStoreLocation 'Cert:\CurrentUser\My' -TextExtension @('2.5.29.37={text}1.3.6.1.5.5.7.3.1','1.3.6.1.4.1.311.84.1.1={hex}02','2.5.29.19={text}') -KeyUsage DigitalSignature,KeyEncipherment" && </HelixCommand>
<!--
For Windows we need to use "call", since the command is going to be called from a batch script created by Helix.
We "exit /b" at the end of RunTests.cmd. Helix runs some other commands after ours within the batch script,
so if we don't use "call", then we cause the parent script to exit, and anything after will not be executed.
-->
<HelixCommand Condition="'$(WindowsShell)' == 'true'">$(HelixCommand)call RunTests.cmd</HelixCommand>
<HelixCommand Condition="'$(WindowsShell)' == 'true' and '$(IncludeHelixCorrelationPayload)' == 'true'">$(HelixCommand) --runtime-path %HELIX_CORRELATION_PAYLOAD%</HelixCommand>
<HelixCommand Condition="'$(WindowsShell)' != 'true'">$(HelixCommand)./RunTests.sh</HelixCommand>
<HelixCommand Condition="'$(WindowsShell)' != 'true' and '$(IncludeHelixCorrelationPayload)' == 'true'">$(HelixCommand) --runtime-path "$HELIX_CORRELATION_PAYLOAD"</HelixCommand>
</PropertyGroup>
<!-- FIXME: is this used? -->
<PropertyGroup Condition="'$(RuntimeFlavor)' == 'Mono'">
<_MonoAotCrossCompilerPath>$([MSBuild]::NormalizePath($(MonoAotCrossDir), 'mono-aot-cross'))</_MonoAotCrossCompilerPath>
<_MonoAotCrossCompilerPath Condition="$([MSBuild]::IsOSPlatform('WINDOWS'))">$(_MonoAotCrossCompilerPath).exe</_MonoAotCrossCompilerPath>
</PropertyGroup>
<ItemGroup Condition="'$(RuntimeFlavor)' == 'Mono'">
<MonoAotCrossCompiler Include="$(_MonoAotCrossCompilerPath)" RuntimeIdentifier="$(TargetOS.ToLowerInvariant())-$(TargetArchitecture.ToLowerInvariant())" />
</ItemGroup>
</Target>
<!--
Create all the Helix data to start a set of jobs. Create a set of work items, one for each libraries
test assembly. All will have the same command line. Note that this target is listed in the
InitialTargets for this Project. This causes it to be invoked (and the Helix data created,
such as the HelixWorkItem item group) before Helix "Test" target is invoked (as a normal target).
-->
<Target Name="BuildHelixWorkItems" DependsOnTargets="$(BuildHelixWorkItemsDependsOn)">
<Message Condition="'$(Scenario)' == ''" Importance="High" Text="Building Helix work items" />
<Message Condition="'$(Scenario)' != ''" Importance="High" Text="Building Helix work items for scenario $(Scenario)" />
<Message Importance="High" Text="Using TestRunNamePrefix: $(TestRunNamePrefix)" />
<Message Condition="'$(HelixCorrelationPayload)' != ''" Importance="High" Text="Using HelixCorrelationPayload: $(HelixCorrelationPayload)" />
<Message Importance="High" Text="Using HelixCommand: $(HelixCommand)" />
<Message Importance="High" Text="Using HelixType: $(HelixType)" />
<Message Importance="High" Text="Using WorkItemArchiveWildCard: $(WorkItemArchiveWildCard)" />
<Message Importance="High" Text="Using Timeout: $(_workItemTimeout)" />
<PropertyGroup Condition="'$(RuntimeFlavor)' == 'CoreCLR' and '$(BUILD_BUILDID)' != ''">
<HelixPostCommands Condition="'$(TargetOS)' == 'windows'">
$(HelixPostCommands);
%HELIX_PYTHONPATH% %HELIX_CORRELATION_PAYLOAD%\gen-debug-dump-docs.py -buildid $(BUILD_BUILDID) -workitem %HELIX_WORKITEM_FRIENDLYNAME% -jobid %HELIX_CORRELATION_ID% -outdir %HELIX_WORKITEM_UPLOAD_ROOT% -templatedir %HELIX_CORRELATION_PAYLOAD% -dumpdir %HELIX_DUMP_FOLDER% -productver $(ProductVersion)
</HelixPostCommands>
<HelixPostCommands Condition="'$(TargetOS)' != 'windows'">
$(HelixPostCommands);
$HELIX_PYTHONPATH $HELIX_CORRELATION_PAYLOAD/gen-debug-dump-docs.py -buildid $(BUILD_BUILDID) -workitem $HELIX_WORKITEM_FRIENDLYNAME -jobid $HELIX_CORRELATION_ID -outdir $HELIX_WORKITEM_UPLOAD_ROOT -templatedir $HELIX_CORRELATION_PAYLOAD -dumpdir $HELIX_DUMP_FOLDER -productver $(ProductVersion)
</HelixPostCommands>
</PropertyGroup>
<Error Condition="'$(NeedsWorkload)' == 'true' and '$(TestUsingWorkloads)' == 'true' and ('$(SdkWithWorkloadForTestingPath)' == '' or !Exists($(SdkWithWorkloadForTestingPath)))"
Text="Could not find workload at %24(SdkWithWorkloadForTestingPath)=$(SdkWithWorkloadForTestingPath)" />
<Error Condition="'$(NeedsWorkload)' == 'true' and '$(TestUsingWorkloads)' != 'true' and ('$(SdkWithNoWorkloadForTestingPath)' == '' or !Exists($(SdkWithNoWorkloadForTestingPath)))"
Text="Could not find workload at %24(SdkWithNoWorkloadForTestingPath)=$(SdkWithNoWorkloadForTestingPath)" />
<ItemGroup Condition="'$(NeedsWorkload)' == 'true'">
<HelixCorrelationPayload Include="$(SdkWithWorkloadForTestingPath)" Destination="$(SdkForWorkloadTestingDirName)" Condition="'$(TestUsingWorkloads)' == 'true'" />
<HelixCorrelationPayload Include="$(SdkWithNoWorkloadForTestingPath)" Destination="$(SdkForWorkloadTestingDirName)" Condition="'$(TestUsingWorkloads)' != 'true'" />
<HelixCorrelationPayload Include="$(MicrosoftNetCoreAppRefPackDir)" Destination="microsoft.netcore.app.ref" />
</ItemGroup>
<ItemGroup Condition="'$(EnableDefaultBuildHelixWorkItems)' == 'true'">
<HelixCorrelationPayload Include="$(HelixCorrelationPayload)"
Condition="'$(IncludeHelixCorrelationPayload)' == 'true'"
AsArchive="$(HelixCorrelationPayload.EndsWith('.zip'))" />
<_DefaultWorkItems Include="$(WorkItemArchiveWildCard)" Exclude="$(HelixCorrelationPayload)" />
<HelixWorkItem Include="@(_DefaultWorkItems -> '$(WorkItemPrefix)%(FileName)')">
<PayloadArchive>%(Identity)</PayloadArchive>
<Command>$(HelixCommand)</Command>
<Timeout>$(_workItemTimeout)</Timeout>
</HelixWorkItem>
</ItemGroup>
<Message Condition="'$(Scenario)' != ''" Importance="High" Text="Done building Helix work items for scenario $(Scenario). Work item count: @(HelixWorkItem->Count())" />
<Message Condition="'$(Scenario)' == '' and '$(TargetOS)' != 'Android' and '$(TargetOS)' != 'iOS' and '$(TargetOS)' != 'iOSSimulator' and '$(TargetOS)' != 'tvOS' and '$(TargetOS)' != 'tvOSSimulator' and '$(TargetOS)' != 'MacCatalyst'" Importance="High" Text="Done building Helix work items. Work item count: @(HelixWorkItem->Count())" />
<Message Text="HelixCorrelationPayload: %(HelixCorrelationPayload.Identity)" Condition="'$(HelixDryRun)' == 'true'" Importance="High" />
<Message Text="HelixWorkItem: %(HelixWorkItem.Identity), Command: %(HelixWorkItem.Command), PreCommands: %(HelixWorkItem.PreCommands) with PayloadArchive: %(HelixWorkItem.PayloadArchive)" Condition="'$(HelixDryRun)' == 'true'" Importance="High" />
<Error Condition="@(XHarnessApkToTest->Count()) == 0 and @(XHarnessAppBundleToTest->Count()) == 0 and @(HelixWorkItem->Count()) == 0"
Text="No helix work items, or APKs, or AppBundles found to test" />
<Error Condition="'%(HelixWorkItem.Identity)' != '' and ('%(HelixWorkItem.PayloadArchive)' == '' or !Exists(%(HelixWorkItem.PayloadArchive)))"
Text="Missing PayloadArchive for @(HelixWorkItem)" />
<Error Text="Stopping the build for dry run" Condition="'$(HelixDryRun)' == 'true'" />
</Target>
<Target Name="PrintHelixQueues">
<Message Importance="High" Text="Using Queues: $(HelixTargetQueues)" />
<Message Condition="'$(Scenario)' == 'BuildWasmApps'" Importance="High"
Text="Scenario: $(Scenario), TestUsingWorkloads: $(TestUsingWorkloads)" />
</Target>
<Target Name="PrintBuildTargetFramework">
<Message Importance="High" Text="Build TargetFramework: $(BuildTargetFramework)" />
</Target>
</Project>
| 1 |
dotnet/runtime | 65,924 | Fix libraries GCStress pipeline | Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| BruceForstall | "2022-02-27T04:30:37Z" | "2022-03-01T18:20:06Z" | dfe08b3bea9a1897562635307ab4bb70bc31fada | 79026a542a8364b6783c5d212bb1841ac2c873a9 | Fix libraries GCStress pipeline. Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| ./src/tests/Loader/classloader/TSAmbiguities/CollapsedMethods/InterfaceImplementation/HelloWorld.csproj | <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<AllowUnsafeBlocks>true</AllowUnsafeBlocks>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<ItemGroup>
<Compile Include="HelloWorld.cs" />
</ItemGroup>
</Project>
| <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<AllowUnsafeBlocks>true</AllowUnsafeBlocks>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<ItemGroup>
<Compile Include="HelloWorld.cs" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime | 65,924 | Fix libraries GCStress pipeline | Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| BruceForstall | "2022-02-27T04:30:37Z" | "2022-03-01T18:20:06Z" | dfe08b3bea9a1897562635307ab4bb70bc31fada | 79026a542a8364b6783c5d212bb1841ac2c873a9 | Fix libraries GCStress pipeline. Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| ./src/tests/CoreMangLib/system/enum/EnumIConvertibleToSingle.csproj | <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<AllowUnsafeBlocks>true</AllowUnsafeBlocks>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<ItemGroup>
<Compile Include="enumiconvertibletosingle.cs" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="$(TestSourceDir)Common/CoreCLRTestLibrary/CoreCLRTestLibrary.csproj" />
</ItemGroup>
</Project>
| <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<AllowUnsafeBlocks>true</AllowUnsafeBlocks>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<ItemGroup>
<Compile Include="enumiconvertibletosingle.cs" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="$(TestSourceDir)Common/CoreCLRTestLibrary/CoreCLRTestLibrary.csproj" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime | 65,924 | Fix libraries GCStress pipeline | Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| BruceForstall | "2022-02-27T04:30:37Z" | "2022-03-01T18:20:06Z" | dfe08b3bea9a1897562635307ab4bb70bc31fada | 79026a542a8364b6783c5d212bb1841ac2c873a9 | Fix libraries GCStress pipeline. Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| ./src/libraries/System.Net.WebHeaderCollection/ref/System.Net.WebHeaderCollection.csproj | <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<TargetFramework>$(NetCoreAppCurrent)</TargetFramework>
<Nullable>enable</Nullable>
<!-- Nullability of parameter 'name' doesn't match overridden member -->
<NoWarn>$(NoWarn);CS8765</NoWarn>
</PropertyGroup>
<ItemGroup>
<Compile Include="System.Net.WebHeaderCollection.cs" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\..\System.Runtime\ref\System.Runtime.csproj" />
<ProjectReference Include="..\..\System.Collections.Specialized\ref\System.Collections.Specialized.csproj" />
</ItemGroup>
</Project>
| <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<TargetFramework>$(NetCoreAppCurrent)</TargetFramework>
<Nullable>enable</Nullable>
<!-- Nullability of parameter 'name' doesn't match overridden member -->
<NoWarn>$(NoWarn);CS8765</NoWarn>
</PropertyGroup>
<ItemGroup>
<Compile Include="System.Net.WebHeaderCollection.cs" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\..\System.Runtime\ref\System.Runtime.csproj" />
<ProjectReference Include="..\..\System.Collections.Specialized\ref\System.Collections.Specialized.csproj" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime | 65,924 | Fix libraries GCStress pipeline | Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| BruceForstall | "2022-02-27T04:30:37Z" | "2022-03-01T18:20:06Z" | dfe08b3bea9a1897562635307ab4bb70bc31fada | 79026a542a8364b6783c5d212bb1841ac2c873a9 | Fix libraries GCStress pipeline. Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| ./src/tests/JIT/jit64/valuetypes/nullable/castclass/generics/castclass-generics029.csproj | <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType>PdbOnly</DebugType>
</PropertyGroup>
<ItemGroup>
<Compile Include="castclass-generics029.cs" />
<Compile Include="..\structdef.cs" />
</ItemGroup>
</Project>
| <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType>PdbOnly</DebugType>
</PropertyGroup>
<ItemGroup>
<Compile Include="castclass-generics029.cs" />
<Compile Include="..\structdef.cs" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime | 65,924 | Fix libraries GCStress pipeline | Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| BruceForstall | "2022-02-27T04:30:37Z" | "2022-03-01T18:20:06Z" | dfe08b3bea9a1897562635307ab4bb70bc31fada | 79026a542a8364b6783c5d212bb1841ac2c873a9 | Fix libraries GCStress pipeline. Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| ./src/tests/GC/Scenarios/GCSimulator/GCSimulator_171.csproj | <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<AllowUnsafeBlocks>true</AllowUnsafeBlocks>
<GCStressIncompatible>true</GCStressIncompatible>
<CLRTestExecutionArguments>-t 1 -tp 0 -dz 17 -sdz 8517 -dc 10000 -sdc 5000 -lt 5 -dp 0.8 -dw 0.8</CLRTestExecutionArguments>
<IsGCSimulatorTest>true</IsGCSimulatorTest>
<CLRTestProjectToRun>GCSimulator.csproj</CLRTestProjectToRun>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<ItemGroup>
<Compile Include="GCSimulator.cs" />
<Compile Include="lifetimefx.cs" />
</ItemGroup>
</Project>
| <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<AllowUnsafeBlocks>true</AllowUnsafeBlocks>
<GCStressIncompatible>true</GCStressIncompatible>
<CLRTestExecutionArguments>-t 1 -tp 0 -dz 17 -sdz 8517 -dc 10000 -sdc 5000 -lt 5 -dp 0.8 -dw 0.8</CLRTestExecutionArguments>
<IsGCSimulatorTest>true</IsGCSimulatorTest>
<CLRTestProjectToRun>GCSimulator.csproj</CLRTestProjectToRun>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<ItemGroup>
<Compile Include="GCSimulator.cs" />
<Compile Include="lifetimefx.cs" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime | 65,924 | Fix libraries GCStress pipeline | Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| BruceForstall | "2022-02-27T04:30:37Z" | "2022-03-01T18:20:06Z" | dfe08b3bea9a1897562635307ab4bb70bc31fada | 79026a542a8364b6783c5d212bb1841ac2c873a9 | Fix libraries GCStress pipeline. Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| ./src/tests/JIT/Regression/CLR-x86-JIT/V1-M11-Beta1/b42009/b42009.csproj | <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType>PdbOnly</DebugType>
</PropertyGroup>
<ItemGroup>
<Compile Include="$(MSBuildProjectName).cs" />
</ItemGroup>
</Project>
| <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType>PdbOnly</DebugType>
</PropertyGroup>
<ItemGroup>
<Compile Include="$(MSBuildProjectName).cs" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime | 65,924 | Fix libraries GCStress pipeline | Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| BruceForstall | "2022-02-27T04:30:37Z" | "2022-03-01T18:20:06Z" | dfe08b3bea9a1897562635307ab4bb70bc31fada | 79026a542a8364b6783c5d212bb1841ac2c873a9 | Fix libraries GCStress pipeline. Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| ./src/tests/JIT/Methodical/VT/etc/hanoi_ro.csproj | <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType>None</DebugType>
<Optimize>True</Optimize>
</PropertyGroup>
<ItemGroup>
<Compile Include="hanoi.cs" />
</ItemGroup>
</Project>
| <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType>None</DebugType>
<Optimize>True</Optimize>
</PropertyGroup>
<ItemGroup>
<Compile Include="hanoi.cs" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime | 65,924 | Fix libraries GCStress pipeline | Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| BruceForstall | "2022-02-27T04:30:37Z" | "2022-03-01T18:20:06Z" | dfe08b3bea9a1897562635307ab4bb70bc31fada | 79026a542a8364b6783c5d212bb1841ac2c873a9 | Fix libraries GCStress pipeline. Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| ./src/tests/JIT/Methodical/Arrays/misc/selfref_d.csproj | <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType>Full</DebugType>
<Optimize>False</Optimize>
</PropertyGroup>
<ItemGroup>
<Compile Include="selfref.cs" />
</ItemGroup>
</Project>
| <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType>Full</DebugType>
<Optimize>False</Optimize>
</PropertyGroup>
<ItemGroup>
<Compile Include="selfref.cs" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime | 65,924 | Fix libraries GCStress pipeline | Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| BruceForstall | "2022-02-27T04:30:37Z" | "2022-03-01T18:20:06Z" | dfe08b3bea9a1897562635307ab4bb70bc31fada | 79026a542a8364b6783c5d212bb1841ac2c873a9 | Fix libraries GCStress pipeline. Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| ./src/tests/GC/Scenarios/GCSimulator/GCSimulator_282.csproj | <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<AllowUnsafeBlocks>true</AllowUnsafeBlocks>
<GCStressIncompatible>true</GCStressIncompatible>
<CLRTestExecutionArguments>-t 3 -tp 0 -dz 17 -sdz 8500 -dc 10000 -sdc 5000 -lt 4 -dp 0.0 -dw 0.4</CLRTestExecutionArguments>
<IsGCSimulatorTest>true</IsGCSimulatorTest>
<CLRTestProjectToRun>GCSimulator.csproj</CLRTestProjectToRun>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<ItemGroup>
<Compile Include="GCSimulator.cs" />
<Compile Include="lifetimefx.cs" />
</ItemGroup>
</Project>
| <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<AllowUnsafeBlocks>true</AllowUnsafeBlocks>
<GCStressIncompatible>true</GCStressIncompatible>
<CLRTestExecutionArguments>-t 3 -tp 0 -dz 17 -sdz 8500 -dc 10000 -sdc 5000 -lt 4 -dp 0.0 -dw 0.4</CLRTestExecutionArguments>
<IsGCSimulatorTest>true</IsGCSimulatorTest>
<CLRTestProjectToRun>GCSimulator.csproj</CLRTestProjectToRun>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<ItemGroup>
<Compile Include="GCSimulator.cs" />
<Compile Include="lifetimefx.cs" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime | 65,924 | Fix libraries GCStress pipeline | Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| BruceForstall | "2022-02-27T04:30:37Z" | "2022-03-01T18:20:06Z" | dfe08b3bea9a1897562635307ab4bb70bc31fada | 79026a542a8364b6783c5d212bb1841ac2c873a9 | Fix libraries GCStress pipeline. Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| ./src/libraries/Microsoft.Extensions.Configuration.FileExtensions/tests/Microsoft.Extensions.Configuration.FileExtensions.Tests.csproj | <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<TargetFrameworks>$(NetCoreAppCurrent);$(NetFrameworkMinimum)</TargetFrameworks>
<EnableDefaultItems>true</EnableDefaultItems>
</PropertyGroup>
<ItemGroup>
<Compile Include="$(CommonTestPath)Extensions\ConfigurationRootTest.cs" Link="Common\Extensions\ConfigurationRootTest.cs" />
<TrimmerRootDescriptor Include="$(ILLinkDescriptorsPath)ILLink.Descriptors.Castle.xml" />
</ItemGroup>
<ItemGroup>
<PackageReference Include="Moq" Version="$(MoqVersion)" />
<ProjectReference Include="..\src\Microsoft.Extensions.Configuration.FileExtensions.csproj" />
</ItemGroup>
</Project>
| <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<TargetFrameworks>$(NetCoreAppCurrent);$(NetFrameworkMinimum)</TargetFrameworks>
<EnableDefaultItems>true</EnableDefaultItems>
</PropertyGroup>
<ItemGroup>
<Compile Include="$(CommonTestPath)Extensions\ConfigurationRootTest.cs" Link="Common\Extensions\ConfigurationRootTest.cs" />
<TrimmerRootDescriptor Include="$(ILLinkDescriptorsPath)ILLink.Descriptors.Castle.xml" />
</ItemGroup>
<ItemGroup>
<PackageReference Include="Moq" Version="$(MoqVersion)" />
<ProjectReference Include="..\src\Microsoft.Extensions.Configuration.FileExtensions.csproj" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime | 65,924 | Fix libraries GCStress pipeline | Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| BruceForstall | "2022-02-27T04:30:37Z" | "2022-03-01T18:20:06Z" | dfe08b3bea9a1897562635307ab4bb70bc31fada | 79026a542a8364b6783c5d212bb1841ac2c873a9 | Fix libraries GCStress pipeline. Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| ./src/tests/JIT/Methodical/Boxing/xlang/sin_cs_il_sinlib_r.csproj | <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<!-- Set to 'Full' if the Debug? column is marked in the spreadsheet. Leave blank otherwise. -->
<DebugType>None</DebugType>
<Optimize />
<NoWarn>$(NoWarn),8002</NoWarn>
</PropertyGroup>
<ItemGroup>
<Compile Include="sin_il.cs" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="sinlib_il.ilproj" />
</ItemGroup>
</Project>
| <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<!-- Set to 'Full' if the Debug? column is marked in the spreadsheet. Leave blank otherwise. -->
<DebugType>None</DebugType>
<Optimize />
<NoWarn>$(NoWarn),8002</NoWarn>
</PropertyGroup>
<ItemGroup>
<Compile Include="sin_il.cs" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="sinlib_il.ilproj" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime | 65,924 | Fix libraries GCStress pipeline | Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| BruceForstall | "2022-02-27T04:30:37Z" | "2022-03-01T18:20:06Z" | dfe08b3bea9a1897562635307ab4bb70bc31fada | 79026a542a8364b6783c5d212bb1841ac2c873a9 | Fix libraries GCStress pipeline. Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| ./src/tests/JIT/HardwareIntrinsics/Arm/AdvSimd/AdvSimd_Part0_r.csproj | <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<AllowUnsafeBlocks>true</AllowUnsafeBlocks>
</PropertyGroup>
<PropertyGroup>
<DebugType>Embedded</DebugType>
<Optimize />
</PropertyGroup>
<ItemGroup>
<Compile Include="Abs.Vector64.Int16.cs" />
<Compile Include="Abs.Vector64.Int32.cs" />
<Compile Include="Abs.Vector64.SByte.cs" />
<Compile Include="Abs.Vector64.Single.cs" />
<Compile Include="Abs.Vector128.Int16.cs" />
<Compile Include="Abs.Vector128.Int32.cs" />
<Compile Include="Abs.Vector128.SByte.cs" />
<Compile Include="Abs.Vector128.Single.cs" />
<Compile Include="AbsSaturate.Vector64.Int16.cs" />
<Compile Include="AbsSaturate.Vector64.Int32.cs" />
<Compile Include="AbsSaturate.Vector64.SByte.cs" />
<Compile Include="AbsSaturate.Vector128.Int16.cs" />
<Compile Include="AbsSaturate.Vector128.Int32.cs" />
<Compile Include="AbsSaturate.Vector128.SByte.cs" />
<Compile Include="AbsScalar.Vector64.Double.cs" />
<Compile Include="AbsScalar.Vector64.Single.cs" />
<Compile Include="AbsoluteCompareGreaterThan.Vector64.Single.cs" />
<Compile Include="AbsoluteCompareGreaterThan.Vector128.Single.cs" />
<Compile Include="AbsoluteCompareGreaterThanOrEqual.Vector64.Single.cs" />
<Compile Include="AbsoluteCompareGreaterThanOrEqual.Vector128.Single.cs" />
<Compile Include="AbsoluteCompareLessThan.Vector64.Single.cs" />
<Compile Include="AbsoluteCompareLessThan.Vector128.Single.cs" />
<Compile Include="AbsoluteCompareLessThanOrEqual.Vector64.Single.cs" />
<Compile Include="AbsoluteCompareLessThanOrEqual.Vector128.Single.cs" />
<Compile Include="AbsoluteDifference.Vector64.Byte.cs" />
<Compile Include="AbsoluteDifference.Vector64.Int16.cs" />
<Compile Include="AbsoluteDifference.Vector64.Int32.cs" />
<Compile Include="AbsoluteDifference.Vector64.SByte.cs" />
<Compile Include="AbsoluteDifference.Vector64.Single.cs" />
<Compile Include="AbsoluteDifference.Vector64.UInt16.cs" />
<Compile Include="AbsoluteDifference.Vector64.UInt32.cs" />
<Compile Include="AbsoluteDifference.Vector128.Byte.cs" />
<Compile Include="AbsoluteDifference.Vector128.Int16.cs" />
<Compile Include="AbsoluteDifference.Vector128.Int32.cs" />
<Compile Include="AbsoluteDifference.Vector128.SByte.cs" />
<Compile Include="AbsoluteDifference.Vector128.Single.cs" />
<Compile Include="AbsoluteDifference.Vector128.UInt16.cs" />
<Compile Include="AbsoluteDifference.Vector128.UInt32.cs" />
<Compile Include="AbsoluteDifferenceAdd.Vector64.Byte.cs" />
<Compile Include="AbsoluteDifferenceAdd.Vector64.Int16.cs" />
<Compile Include="AbsoluteDifferenceAdd.Vector64.Int32.cs" />
<Compile Include="AbsoluteDifferenceAdd.Vector64.SByte.cs" />
<Compile Include="AbsoluteDifferenceAdd.Vector64.UInt16.cs" />
<Compile Include="AbsoluteDifferenceAdd.Vector64.UInt32.cs" />
<Compile Include="AbsoluteDifferenceAdd.Vector128.Byte.cs" />
<Compile Include="AbsoluteDifferenceAdd.Vector128.Int16.cs" />
<Compile Include="AbsoluteDifferenceAdd.Vector128.Int32.cs" />
<Compile Include="AbsoluteDifferenceAdd.Vector128.SByte.cs" />
<Compile Include="AbsoluteDifferenceAdd.Vector128.UInt16.cs" />
<Compile Include="AbsoluteDifferenceAdd.Vector128.UInt32.cs" />
<Compile Include="AbsoluteDifferenceWideningLower.Vector64.Byte.cs" />
<Compile Include="AbsoluteDifferenceWideningLower.Vector64.Int16.cs" />
<Compile Include="AbsoluteDifferenceWideningLower.Vector64.Int32.cs" />
<Compile Include="AbsoluteDifferenceWideningLower.Vector64.SByte.cs" />
<Compile Include="AbsoluteDifferenceWideningLower.Vector64.UInt16.cs" />
<Compile Include="AbsoluteDifferenceWideningLower.Vector64.UInt32.cs" />
<Compile Include="AbsoluteDifferenceWideningLowerAndAdd.Vector64.Byte.cs" />
<Compile Include="AbsoluteDifferenceWideningLowerAndAdd.Vector64.Int16.cs" />
<Compile Include="AbsoluteDifferenceWideningLowerAndAdd.Vector64.Int32.cs" />
<Compile Include="AbsoluteDifferenceWideningLowerAndAdd.Vector64.SByte.cs" />
<Compile Include="AbsoluteDifferenceWideningLowerAndAdd.Vector64.UInt16.cs" />
<Compile Include="AbsoluteDifferenceWideningLowerAndAdd.Vector64.UInt32.cs" />
<Compile Include="AbsoluteDifferenceWideningUpper.Vector128.Byte.cs" />
<Compile Include="AbsoluteDifferenceWideningUpper.Vector128.Int16.cs" />
<Compile Include="AbsoluteDifferenceWideningUpper.Vector128.Int32.cs" />
<Compile Include="AbsoluteDifferenceWideningUpper.Vector128.SByte.cs" />
<Compile Include="AbsoluteDifferenceWideningUpper.Vector128.UInt16.cs" />
<Compile Include="AbsoluteDifferenceWideningUpper.Vector128.UInt32.cs" />
<Compile Include="AbsoluteDifferenceWideningUpperAndAdd.Vector128.Byte.cs" />
<Compile Include="AbsoluteDifferenceWideningUpperAndAdd.Vector128.Int16.cs" />
<Compile Include="AbsoluteDifferenceWideningUpperAndAdd.Vector128.Int32.cs" />
<Compile Include="AbsoluteDifferenceWideningUpperAndAdd.Vector128.SByte.cs" />
<Compile Include="AbsoluteDifferenceWideningUpperAndAdd.Vector128.UInt16.cs" />
<Compile Include="AbsoluteDifferenceWideningUpperAndAdd.Vector128.UInt32.cs" />
<Compile Include="Add.Vector64.Byte.cs" />
<Compile Include="Add.Vector64.Int16.cs" />
<Compile Include="Add.Vector64.Int32.cs" />
<Compile Include="Add.Vector64.SByte.cs" />
<Compile Include="Add.Vector64.Single.cs" />
<Compile Include="Add.Vector64.UInt16.cs" />
<Compile Include="Add.Vector64.UInt32.cs" />
<Compile Include="Add.Vector128.Byte.cs" />
<Compile Include="Add.Vector128.Int16.cs" />
<Compile Include="Add.Vector128.Int32.cs" />
<Compile Include="Add.Vector128.Int64.cs" />
<Compile Include="Add.Vector128.SByte.cs" />
<Compile Include="Add.Vector128.Single.cs" />
<Compile Include="Add.Vector128.UInt16.cs" />
<Compile Include="Add.Vector128.UInt32.cs" />
<Compile Include="Add.Vector128.UInt64.cs" />
<Compile Include="AddHighNarrowingLower.Vector64.Byte.cs" />
<Compile Include="AddHighNarrowingLower.Vector64.Int16.cs" />
<Compile Include="AddHighNarrowingLower.Vector64.Int32.cs" />
<Compile Include="AddHighNarrowingLower.Vector64.SByte.cs" />
<Compile Include="AddHighNarrowingLower.Vector64.UInt16.cs" />
<Compile Include="AddHighNarrowingLower.Vector64.UInt32.cs" />
<Compile Include="AddHighNarrowingUpper.Vector128.Byte.cs" />
<Compile Include="AddHighNarrowingUpper.Vector128.Int16.cs" />
<Compile Include="AddHighNarrowingUpper.Vector128.Int32.cs" />
<Compile Include="AddHighNarrowingUpper.Vector128.SByte.cs" />
<Compile Include="Program.AdvSimd_Part0.cs" />
<Compile Include="..\Shared\Helpers.cs" />
<Compile Include="..\Shared\Program.cs" />
</ItemGroup>
</Project>
| <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<AllowUnsafeBlocks>true</AllowUnsafeBlocks>
</PropertyGroup>
<PropertyGroup>
<DebugType>Embedded</DebugType>
<Optimize />
</PropertyGroup>
<ItemGroup>
<Compile Include="Abs.Vector64.Int16.cs" />
<Compile Include="Abs.Vector64.Int32.cs" />
<Compile Include="Abs.Vector64.SByte.cs" />
<Compile Include="Abs.Vector64.Single.cs" />
<Compile Include="Abs.Vector128.Int16.cs" />
<Compile Include="Abs.Vector128.Int32.cs" />
<Compile Include="Abs.Vector128.SByte.cs" />
<Compile Include="Abs.Vector128.Single.cs" />
<Compile Include="AbsSaturate.Vector64.Int16.cs" />
<Compile Include="AbsSaturate.Vector64.Int32.cs" />
<Compile Include="AbsSaturate.Vector64.SByte.cs" />
<Compile Include="AbsSaturate.Vector128.Int16.cs" />
<Compile Include="AbsSaturate.Vector128.Int32.cs" />
<Compile Include="AbsSaturate.Vector128.SByte.cs" />
<Compile Include="AbsScalar.Vector64.Double.cs" />
<Compile Include="AbsScalar.Vector64.Single.cs" />
<Compile Include="AbsoluteCompareGreaterThan.Vector64.Single.cs" />
<Compile Include="AbsoluteCompareGreaterThan.Vector128.Single.cs" />
<Compile Include="AbsoluteCompareGreaterThanOrEqual.Vector64.Single.cs" />
<Compile Include="AbsoluteCompareGreaterThanOrEqual.Vector128.Single.cs" />
<Compile Include="AbsoluteCompareLessThan.Vector64.Single.cs" />
<Compile Include="AbsoluteCompareLessThan.Vector128.Single.cs" />
<Compile Include="AbsoluteCompareLessThanOrEqual.Vector64.Single.cs" />
<Compile Include="AbsoluteCompareLessThanOrEqual.Vector128.Single.cs" />
<Compile Include="AbsoluteDifference.Vector64.Byte.cs" />
<Compile Include="AbsoluteDifference.Vector64.Int16.cs" />
<Compile Include="AbsoluteDifference.Vector64.Int32.cs" />
<Compile Include="AbsoluteDifference.Vector64.SByte.cs" />
<Compile Include="AbsoluteDifference.Vector64.Single.cs" />
<Compile Include="AbsoluteDifference.Vector64.UInt16.cs" />
<Compile Include="AbsoluteDifference.Vector64.UInt32.cs" />
<Compile Include="AbsoluteDifference.Vector128.Byte.cs" />
<Compile Include="AbsoluteDifference.Vector128.Int16.cs" />
<Compile Include="AbsoluteDifference.Vector128.Int32.cs" />
<Compile Include="AbsoluteDifference.Vector128.SByte.cs" />
<Compile Include="AbsoluteDifference.Vector128.Single.cs" />
<Compile Include="AbsoluteDifference.Vector128.UInt16.cs" />
<Compile Include="AbsoluteDifference.Vector128.UInt32.cs" />
<Compile Include="AbsoluteDifferenceAdd.Vector64.Byte.cs" />
<Compile Include="AbsoluteDifferenceAdd.Vector64.Int16.cs" />
<Compile Include="AbsoluteDifferenceAdd.Vector64.Int32.cs" />
<Compile Include="AbsoluteDifferenceAdd.Vector64.SByte.cs" />
<Compile Include="AbsoluteDifferenceAdd.Vector64.UInt16.cs" />
<Compile Include="AbsoluteDifferenceAdd.Vector64.UInt32.cs" />
<Compile Include="AbsoluteDifferenceAdd.Vector128.Byte.cs" />
<Compile Include="AbsoluteDifferenceAdd.Vector128.Int16.cs" />
<Compile Include="AbsoluteDifferenceAdd.Vector128.Int32.cs" />
<Compile Include="AbsoluteDifferenceAdd.Vector128.SByte.cs" />
<Compile Include="AbsoluteDifferenceAdd.Vector128.UInt16.cs" />
<Compile Include="AbsoluteDifferenceAdd.Vector128.UInt32.cs" />
<Compile Include="AbsoluteDifferenceWideningLower.Vector64.Byte.cs" />
<Compile Include="AbsoluteDifferenceWideningLower.Vector64.Int16.cs" />
<Compile Include="AbsoluteDifferenceWideningLower.Vector64.Int32.cs" />
<Compile Include="AbsoluteDifferenceWideningLower.Vector64.SByte.cs" />
<Compile Include="AbsoluteDifferenceWideningLower.Vector64.UInt16.cs" />
<Compile Include="AbsoluteDifferenceWideningLower.Vector64.UInt32.cs" />
<Compile Include="AbsoluteDifferenceWideningLowerAndAdd.Vector64.Byte.cs" />
<Compile Include="AbsoluteDifferenceWideningLowerAndAdd.Vector64.Int16.cs" />
<Compile Include="AbsoluteDifferenceWideningLowerAndAdd.Vector64.Int32.cs" />
<Compile Include="AbsoluteDifferenceWideningLowerAndAdd.Vector64.SByte.cs" />
<Compile Include="AbsoluteDifferenceWideningLowerAndAdd.Vector64.UInt16.cs" />
<Compile Include="AbsoluteDifferenceWideningLowerAndAdd.Vector64.UInt32.cs" />
<Compile Include="AbsoluteDifferenceWideningUpper.Vector128.Byte.cs" />
<Compile Include="AbsoluteDifferenceWideningUpper.Vector128.Int16.cs" />
<Compile Include="AbsoluteDifferenceWideningUpper.Vector128.Int32.cs" />
<Compile Include="AbsoluteDifferenceWideningUpper.Vector128.SByte.cs" />
<Compile Include="AbsoluteDifferenceWideningUpper.Vector128.UInt16.cs" />
<Compile Include="AbsoluteDifferenceWideningUpper.Vector128.UInt32.cs" />
<Compile Include="AbsoluteDifferenceWideningUpperAndAdd.Vector128.Byte.cs" />
<Compile Include="AbsoluteDifferenceWideningUpperAndAdd.Vector128.Int16.cs" />
<Compile Include="AbsoluteDifferenceWideningUpperAndAdd.Vector128.Int32.cs" />
<Compile Include="AbsoluteDifferenceWideningUpperAndAdd.Vector128.SByte.cs" />
<Compile Include="AbsoluteDifferenceWideningUpperAndAdd.Vector128.UInt16.cs" />
<Compile Include="AbsoluteDifferenceWideningUpperAndAdd.Vector128.UInt32.cs" />
<Compile Include="Add.Vector64.Byte.cs" />
<Compile Include="Add.Vector64.Int16.cs" />
<Compile Include="Add.Vector64.Int32.cs" />
<Compile Include="Add.Vector64.SByte.cs" />
<Compile Include="Add.Vector64.Single.cs" />
<Compile Include="Add.Vector64.UInt16.cs" />
<Compile Include="Add.Vector64.UInt32.cs" />
<Compile Include="Add.Vector128.Byte.cs" />
<Compile Include="Add.Vector128.Int16.cs" />
<Compile Include="Add.Vector128.Int32.cs" />
<Compile Include="Add.Vector128.Int64.cs" />
<Compile Include="Add.Vector128.SByte.cs" />
<Compile Include="Add.Vector128.Single.cs" />
<Compile Include="Add.Vector128.UInt16.cs" />
<Compile Include="Add.Vector128.UInt32.cs" />
<Compile Include="Add.Vector128.UInt64.cs" />
<Compile Include="AddHighNarrowingLower.Vector64.Byte.cs" />
<Compile Include="AddHighNarrowingLower.Vector64.Int16.cs" />
<Compile Include="AddHighNarrowingLower.Vector64.Int32.cs" />
<Compile Include="AddHighNarrowingLower.Vector64.SByte.cs" />
<Compile Include="AddHighNarrowingLower.Vector64.UInt16.cs" />
<Compile Include="AddHighNarrowingLower.Vector64.UInt32.cs" />
<Compile Include="AddHighNarrowingUpper.Vector128.Byte.cs" />
<Compile Include="AddHighNarrowingUpper.Vector128.Int16.cs" />
<Compile Include="AddHighNarrowingUpper.Vector128.Int32.cs" />
<Compile Include="AddHighNarrowingUpper.Vector128.SByte.cs" />
<Compile Include="Program.AdvSimd_Part0.cs" />
<Compile Include="..\Shared\Helpers.cs" />
<Compile Include="..\Shared\Program.cs" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime | 65,924 | Fix libraries GCStress pipeline | Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| BruceForstall | "2022-02-27T04:30:37Z" | "2022-03-01T18:20:06Z" | dfe08b3bea9a1897562635307ab4bb70bc31fada | 79026a542a8364b6783c5d212bb1841ac2c873a9 | Fix libraries GCStress pipeline. Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| ./src/tests/Interop/PInvoke/Array/MarshalArrayAsParam/AsDefault/AsDefaultTest.csproj | <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
</PropertyGroup>
<ItemGroup>
<Compile Include="*.cs" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\LPArrayNative\CMakeLists.txt" />
</ItemGroup>
</Project>
| <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
</PropertyGroup>
<ItemGroup>
<Compile Include="*.cs" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\LPArrayNative\CMakeLists.txt" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime | 65,924 | Fix libraries GCStress pipeline | Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| BruceForstall | "2022-02-27T04:30:37Z" | "2022-03-01T18:20:06Z" | dfe08b3bea9a1897562635307ab4bb70bc31fada | 79026a542a8364b6783c5d212bb1841ac2c873a9 | Fix libraries GCStress pipeline. Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| ./src/libraries/Microsoft.Extensions.HostFactoryResolver/tests/NoSpecialEntryPointPatternBuildsThenThrows/NoSpecialEntryPointPatternBuildsThenThrows.csproj | <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<TargetFrameworks>$(NetCoreAppCurrent);$(NetFrameworkMinimum)</TargetFrameworks>
<EnableDefaultItems>true</EnableDefaultItems>
<OutputType>Exe</OutputType>
</PropertyGroup>
<ItemGroup>
<ProjectReference Include="..\MockHostTypes\MockHostTypes.csproj" />
</ItemGroup>
</Project>
| <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<TargetFrameworks>$(NetCoreAppCurrent);$(NetFrameworkMinimum)</TargetFrameworks>
<EnableDefaultItems>true</EnableDefaultItems>
<OutputType>Exe</OutputType>
</PropertyGroup>
<ItemGroup>
<ProjectReference Include="..\MockHostTypes\MockHostTypes.csproj" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime | 65,924 | Fix libraries GCStress pipeline | Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| BruceForstall | "2022-02-27T04:30:37Z" | "2022-03-01T18:20:06Z" | dfe08b3bea9a1897562635307ab4bb70bc31fada | 79026a542a8364b6783c5d212bb1841ac2c873a9 | Fix libraries GCStress pipeline. Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| ./src/tests/nativeaot/SmokeTests/DynamicGenerics/DynamicGenerics.csproj | <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestKind>BuildAndRun</CLRTestKind>
<CLRTestPriority>0</CLRTestPriority>
<AllowUnsafeBlocks>true</AllowUnsafeBlocks>
<!-- There's just too many of these warnings -->
<SuppressTrimAnalysisWarnings>true</SuppressTrimAnalysisWarnings>
<NoWarn>$(NoWarn);IL3050</NoWarn>
<!-- Look for MULTIMODULE_BUILD #define for the more specific incompatible parts -->
<CLRTestTargetUnsupported Condition="'$(IlcMultiModule)' == 'true'">true</CLRTestTargetUnsupported>
</PropertyGroup>
<ItemGroup>
<RdXmlFile Include="rd.xml" />
</ItemGroup>
<ItemGroup>
<Compile Include="*.cs" />
<Compile Include="Internal\*.cs" />
<Compile Remove="partial_universal_generics.cs" />
<Compile Remove="universal_generics.cs" />
<Compile Remove="UniversalConstrainedCalls.cs" />
<Compile Remove="fieldlayout.cs" />
<Compile Remove="B279085.cs" />
</ItemGroup>
</Project>
| <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestKind>BuildAndRun</CLRTestKind>
<CLRTestPriority>0</CLRTestPriority>
<AllowUnsafeBlocks>true</AllowUnsafeBlocks>
<!-- There's just too many of these warnings -->
<SuppressTrimAnalysisWarnings>true</SuppressTrimAnalysisWarnings>
<NoWarn>$(NoWarn);IL3050</NoWarn>
<!-- Look for MULTIMODULE_BUILD #define for the more specific incompatible parts -->
<CLRTestTargetUnsupported Condition="'$(IlcMultiModule)' == 'true'">true</CLRTestTargetUnsupported>
</PropertyGroup>
<ItemGroup>
<RdXmlFile Include="rd.xml" />
</ItemGroup>
<ItemGroup>
<Compile Include="*.cs" />
<Compile Include="Internal\*.cs" />
<Compile Remove="partial_universal_generics.cs" />
<Compile Remove="universal_generics.cs" />
<Compile Remove="UniversalConstrainedCalls.cs" />
<Compile Remove="fieldlayout.cs" />
<Compile Remove="B279085.cs" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime | 65,924 | Fix libraries GCStress pipeline | Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| BruceForstall | "2022-02-27T04:30:37Z" | "2022-03-01T18:20:06Z" | dfe08b3bea9a1897562635307ab4bb70bc31fada | 79026a542a8364b6783c5d212bb1841ac2c873a9 | Fix libraries GCStress pipeline. Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| ./src/tests/JIT/Methodical/eh/basics/throwincatch_do.csproj | <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<AllowUnsafeBlocks>true</AllowUnsafeBlocks>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType>Full</DebugType>
<Optimize>True</Optimize>
</PropertyGroup>
<ItemGroup>
<Compile Include="throwincatch.cs" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\..\..\common\eh_common.csproj" />
</ItemGroup>
</Project>
| <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<AllowUnsafeBlocks>true</AllowUnsafeBlocks>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType>Full</DebugType>
<Optimize>True</Optimize>
</PropertyGroup>
<ItemGroup>
<Compile Include="throwincatch.cs" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\..\..\common\eh_common.csproj" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime | 65,924 | Fix libraries GCStress pipeline | Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| BruceForstall | "2022-02-27T04:30:37Z" | "2022-03-01T18:20:06Z" | dfe08b3bea9a1897562635307ab4bb70bc31fada | 79026a542a8364b6783c5d212bb1841ac2c873a9 | Fix libraries GCStress pipeline. Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| ./src/tests/Loader/classloader/generics/Misc/TestWithManyParams.csproj | <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<AllowUnsafeBlocks>true</AllowUnsafeBlocks>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<ItemGroup>
<Compile Include="TestWithManyParams.cs" />
</ItemGroup>
</Project>
| <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<AllowUnsafeBlocks>true</AllowUnsafeBlocks>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<ItemGroup>
<Compile Include="TestWithManyParams.cs" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime | 65,924 | Fix libraries GCStress pipeline | Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| BruceForstall | "2022-02-27T04:30:37Z" | "2022-03-01T18:20:06Z" | dfe08b3bea9a1897562635307ab4bb70bc31fada | 79026a542a8364b6783c5d212bb1841ac2c873a9 | Fix libraries GCStress pipeline. Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| ./src/tests/baseservices/threading/regressions/beta1/347011.csproj | <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<ItemGroup>
<Compile Include="347011.cs" />
</ItemGroup>
</Project>
| <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<ItemGroup>
<Compile Include="347011.cs" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime | 65,924 | Fix libraries GCStress pipeline | Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| BruceForstall | "2022-02-27T04:30:37Z" | "2022-03-01T18:20:06Z" | dfe08b3bea9a1897562635307ab4bb70bc31fada | 79026a542a8364b6783c5d212bb1841ac2c873a9 | Fix libraries GCStress pipeline. Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| ./src/tests/Loader/classloader/TypeGeneratorTests/TypeGeneratorTest774/Generated774.il | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
.assembly extern mscorlib { .publickeytoken = (B7 7A 5C 56 19 34 E0 89 ) .ver 4:0:0:0 }
.assembly extern TestFramework { .publickeytoken = ( B0 3F 5F 7F 11 D5 0A 3A ) }
//TYPES IN FORWARDER ASSEMBLIES:
//TEST ASSEMBLY:
.assembly Generated774 { .hash algorithm 0x00008004 }
.assembly extern xunit.core {}
.class public BaseClass0
{
.method public hidebysig specialname rtspecialname instance void .ctor() cil managed {
ldarg.0
call instance void [mscorlib]System.Object::.ctor()
ret
}
}
.class public BaseClass1
extends BaseClass0
{
.method public hidebysig specialname rtspecialname instance void .ctor() cil managed {
ldarg.0
call instance void BaseClass0::.ctor()
ret
}
}
.class public G3_C1246`1<T0>
extends class G2_C293`2<class BaseClass1,class BaseClass0>
implements class IBase1`1<!T0>
{
.method public hidebysig virtual instance string Method4() cil managed noinlining {
ldstr "G3_C1246::Method4.15087()"
ret
}
.method public hidebysig newslot virtual instance string 'IBase1<T0>.Method4'() cil managed noinlining {
.override method instance string class IBase1`1<!T0>::Method4()
ldstr "G3_C1246::Method4.MI.15088()"
ret
}
.method public hidebysig virtual instance string Method5() cil managed noinlining {
ldstr "G3_C1246::Method5.15089()"
ret
}
.method public hidebysig newslot virtual instance string 'IBase1<T0>.Method5'() cil managed noinlining {
.override method instance string class IBase1`1<!T0>::Method5()
ldstr "G3_C1246::Method5.MI.15090()"
ret
}
.method public hidebysig newslot virtual instance string Method6<M0>() cil managed noinlining {
ldstr "G3_C1246::Method6.15091<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot virtual instance string 'IBase1<T0>.Method6'<M0>() cil managed noinlining {
.override method instance string class IBase1`1<!T0>::Method6<[1]>()
ldstr "G3_C1246::Method6.MI.15092<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot virtual instance string ClassMethod3870() cil managed noinlining {
ldstr "G3_C1246::ClassMethod3870.15093()"
ret
}
.method public hidebysig newslot virtual instance string ClassMethod3871<M0>() cil managed noinlining {
ldstr "G3_C1246::ClassMethod3871.15094<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot virtual instance string ClassMethod3872<M0>() cil managed noinlining {
ldstr "G3_C1246::ClassMethod3872.15095<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot virtual instance string 'G2_C293<class BaseClass1,class BaseClass0>.ClassMethod1913'() cil managed noinlining {
.override method instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1913()
ldstr "G3_C1246::ClassMethod1913.MI.15096()"
ret
}
.method public hidebysig newslot virtual instance string 'G2_C293<class BaseClass1,class BaseClass0>.ClassMethod1915'<M0>() cil managed noinlining {
.override method instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1915<[1]>()
ldstr "G3_C1246::ClassMethod1915.MI.15097<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig specialname rtspecialname instance void .ctor() cil managed {
ldarg.0
call instance void class G2_C293`2<class BaseClass1,class BaseClass0>::.ctor()
ret
}
}
.class public G2_C293`2<T0, T1>
extends class G1_C6`2<class BaseClass1,!T1>
implements class IBase2`2<class BaseClass0,class BaseClass0>
{
.method public hidebysig virtual instance string Method7<M0>() cil managed noinlining {
ldstr "G2_C293::Method7.7469<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot virtual instance string 'IBase2<class BaseClass0,class BaseClass0>.Method7'<M0>() cil managed noinlining {
.override method instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<[1]>()
ldstr "G2_C293::Method7.MI.7470<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot virtual instance string ClassMethod1913() cil managed noinlining {
ldstr "G2_C293::ClassMethod1913.7471()"
ret
}
.method public hidebysig newslot virtual instance string ClassMethod1914() cil managed noinlining {
ldstr "G2_C293::ClassMethod1914.7472()"
ret
}
.method public hidebysig newslot virtual instance string ClassMethod1915<M0>() cil managed noinlining {
ldstr "G2_C293::ClassMethod1915.7473<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot virtual instance string ClassMethod1916<M0>() cil managed noinlining {
ldstr "G2_C293::ClassMethod1916.7474<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot virtual instance string 'G1_C6<class BaseClass1,T1>.ClassMethod1327'<M0>() cil managed noinlining {
.override method instance string class G1_C6`2<class BaseClass1,!T1>::ClassMethod1327<[1]>()
ldstr "G2_C293::ClassMethod1327.MI.7475<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig specialname rtspecialname instance void .ctor() cil managed {
ldarg.0
call instance void class G1_C6`2<class BaseClass1,!T1>::.ctor()
ret
}
}
.class interface public abstract IBase1`1<+T0>
{
.method public hidebysig newslot abstract virtual instance string Method4() cil managed { }
.method public hidebysig newslot abstract virtual instance string Method5() cil managed { }
.method public hidebysig newslot abstract virtual instance string Method6<M0>() cil managed { }
}
.class public G1_C6`2<T0, T1>
implements class IBase2`2<class BaseClass1,!T0>
{
.method public hidebysig newslot virtual instance string Method7<M0>() cil managed noinlining {
ldstr "G1_C6::Method7.4808<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot virtual instance string ClassMethod1326() cil managed noinlining {
ldstr "G1_C6::ClassMethod1326.4809()"
ret
}
.method public hidebysig newslot virtual instance string ClassMethod1327<M0>() cil managed noinlining {
ldstr "G1_C6::ClassMethod1327.4810<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig specialname rtspecialname instance void .ctor() cil managed {
ldarg.0
call instance void [mscorlib]System.Object::.ctor()
ret
}
}
.class interface public abstract IBase2`2<+T0, -T1>
{
.method public hidebysig newslot abstract virtual instance string Method7<M0>() cil managed { }
}
.class public auto ansi beforefieldinit Generated774 {
.method static void M.BaseClass0<(BaseClass0)W>(!!W inst, string exp) cil managed {
.maxstack 5
.locals init (string[] actualResults)
ldc.i4.s 0
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.BaseClass0<(BaseClass0)W>(!!W inst, string exp)"
ldc.i4.s 0
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.BaseClass1<(BaseClass1)W>(!!W inst, string exp) cil managed {
.maxstack 5
.locals init (string[] actualResults)
ldc.i4.s 0
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.BaseClass1<(BaseClass1)W>(!!W inst, string exp)"
ldc.i4.s 0
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G3_C1246.T<T0,(class G3_C1246`1<!!T0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 18
.locals init (string[] actualResults)
ldc.i4.s 13
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G3_C1246.T<T0,(class G3_C1246`1<!!T0>)W>(!!W 'inst', string exp)"
ldc.i4.s 13
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<!!T0>::ClassMethod1326()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<!!T0>::ClassMethod1327<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<!!T0>::ClassMethod1913()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<!!T0>::ClassMethod1914()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<!!T0>::ClassMethod1915<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<!!T0>::ClassMethod1916<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 6
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<!!T0>::ClassMethod3870()
stelem.ref
ldloc.s actualResults
ldc.i4.s 7
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<!!T0>::ClassMethod3871<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 8
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<!!T0>::ClassMethod3872<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 9
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<!!T0>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 10
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<!!T0>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 11
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<!!T0>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 12
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<!!T0>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G3_C1246.A<(class G3_C1246`1<class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 18
.locals init (string[] actualResults)
ldc.i4.s 13
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G3_C1246.A<(class G3_C1246`1<class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 13
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<class BaseClass0>::ClassMethod1326()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<class BaseClass0>::ClassMethod1327<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<class BaseClass0>::ClassMethod1913()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<class BaseClass0>::ClassMethod1914()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<class BaseClass0>::ClassMethod1915<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<class BaseClass0>::ClassMethod1916<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 6
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<class BaseClass0>::ClassMethod3870()
stelem.ref
ldloc.s actualResults
ldc.i4.s 7
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<class BaseClass0>::ClassMethod3871<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 8
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<class BaseClass0>::ClassMethod3872<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 9
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<class BaseClass0>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 10
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<class BaseClass0>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 11
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<class BaseClass0>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 12
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<class BaseClass0>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G3_C1246.B<(class G3_C1246`1<class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 18
.locals init (string[] actualResults)
ldc.i4.s 13
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G3_C1246.B<(class G3_C1246`1<class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 13
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<class BaseClass1>::ClassMethod1326()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<class BaseClass1>::ClassMethod1327<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<class BaseClass1>::ClassMethod1913()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<class BaseClass1>::ClassMethod1914()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<class BaseClass1>::ClassMethod1915<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<class BaseClass1>::ClassMethod1916<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 6
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<class BaseClass1>::ClassMethod3870()
stelem.ref
ldloc.s actualResults
ldc.i4.s 7
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<class BaseClass1>::ClassMethod3871<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 8
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<class BaseClass1>::ClassMethod3872<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 9
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<class BaseClass1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 10
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<class BaseClass1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 11
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<class BaseClass1>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 12
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G2_C293.T.T<T0,T1,(class G2_C293`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 12
.locals init (string[] actualResults)
ldc.i4.s 7
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G2_C293.T.T<T0,T1,(class G2_C293`2<!!T0,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 7
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<!!T0,!!T1>::ClassMethod1326()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<!!T0,!!T1>::ClassMethod1327<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<!!T0,!!T1>::ClassMethod1913()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<!!T0,!!T1>::ClassMethod1914()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<!!T0,!!T1>::ClassMethod1915<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<!!T0,!!T1>::ClassMethod1916<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 6
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<!!T0,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G2_C293.A.T<T1,(class G2_C293`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 12
.locals init (string[] actualResults)
ldc.i4.s 7
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G2_C293.A.T<T1,(class G2_C293`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 7
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass0,!!T1>::ClassMethod1326()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass0,!!T1>::ClassMethod1327<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass0,!!T1>::ClassMethod1913()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass0,!!T1>::ClassMethod1914()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass0,!!T1>::ClassMethod1915<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass0,!!T1>::ClassMethod1916<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 6
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass0,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G2_C293.A.A<(class G2_C293`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 12
.locals init (string[] actualResults)
ldc.i4.s 7
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G2_C293.A.A<(class G2_C293`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 7
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass0>::ClassMethod1326()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass0>::ClassMethod1327<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass0>::ClassMethod1913()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass0>::ClassMethod1914()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass0>::ClassMethod1915<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass0>::ClassMethod1916<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 6
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass0>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G2_C293.A.B<(class G2_C293`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 12
.locals init (string[] actualResults)
ldc.i4.s 7
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G2_C293.A.B<(class G2_C293`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 7
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass1>::ClassMethod1326()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass1>::ClassMethod1327<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass1>::ClassMethod1913()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass1>::ClassMethod1914()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass1>::ClassMethod1915<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass1>::ClassMethod1916<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 6
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G2_C293.B.T<T1,(class G2_C293`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 12
.locals init (string[] actualResults)
ldc.i4.s 7
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G2_C293.B.T<T1,(class G2_C293`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 7
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass1,!!T1>::ClassMethod1326()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass1,!!T1>::ClassMethod1327<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass1,!!T1>::ClassMethod1913()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass1,!!T1>::ClassMethod1914()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass1,!!T1>::ClassMethod1915<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass1,!!T1>::ClassMethod1916<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 6
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass1,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G2_C293.B.A<(class G2_C293`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 12
.locals init (string[] actualResults)
ldc.i4.s 7
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G2_C293.B.A<(class G2_C293`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 7
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1326()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1327<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1913()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1914()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1915<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1916<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 6
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G2_C293.B.B<(class G2_C293`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 12
.locals init (string[] actualResults)
ldc.i4.s 7
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G2_C293.B.B<(class G2_C293`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 7
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass1>::ClassMethod1326()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass1>::ClassMethod1327<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass1>::ClassMethod1913()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass1>::ClassMethod1914()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass1>::ClassMethod1915<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass1>::ClassMethod1916<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 6
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase1.T<T0,(class IBase1`1<!!T0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 8
.locals init (string[] actualResults)
ldc.i4.s 3
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase1.T<T0,(class IBase1`1<!!T0>)W>(!!W 'inst', string exp)"
ldc.i4.s 3
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<!!T0>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<!!T0>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<!!T0>::Method6<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase1.A<(class IBase1`1<class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 8
.locals init (string[] actualResults)
ldc.i4.s 3
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase1.A<(class IBase1`1<class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 3
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass0>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass0>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase1.B<(class IBase1`1<class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 8
.locals init (string[] actualResults)
ldc.i4.s 3
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase1.B<(class IBase1`1<class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 3
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G1_C6.T.T<T0,T1,(class G1_C6`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 8
.locals init (string[] actualResults)
ldc.i4.s 3
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G1_C6.T.T<T0,T1,(class G1_C6`2<!!T0,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 3
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C6`2<!!T0,!!T1>::ClassMethod1326()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C6`2<!!T0,!!T1>::ClassMethod1327<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C6`2<!!T0,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G1_C6.A.T<T1,(class G1_C6`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 8
.locals init (string[] actualResults)
ldc.i4.s 3
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G1_C6.A.T<T1,(class G1_C6`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 3
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C6`2<class BaseClass0,!!T1>::ClassMethod1326()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C6`2<class BaseClass0,!!T1>::ClassMethod1327<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C6`2<class BaseClass0,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G1_C6.A.A<(class G1_C6`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 8
.locals init (string[] actualResults)
ldc.i4.s 3
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G1_C6.A.A<(class G1_C6`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 3
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass0>::ClassMethod1326()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass0>::ClassMethod1327<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass0>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G1_C6.A.B<(class G1_C6`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 8
.locals init (string[] actualResults)
ldc.i4.s 3
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G1_C6.A.B<(class G1_C6`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 3
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1326()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1327<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G1_C6.B.T<T1,(class G1_C6`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 8
.locals init (string[] actualResults)
ldc.i4.s 3
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G1_C6.B.T<T1,(class G1_C6`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 3
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C6`2<class BaseClass1,!!T1>::ClassMethod1326()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C6`2<class BaseClass1,!!T1>::ClassMethod1327<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C6`2<class BaseClass1,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G1_C6.B.A<(class G1_C6`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 8
.locals init (string[] actualResults)
ldc.i4.s 3
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G1_C6.B.A<(class G1_C6`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 3
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1326()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1327<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass0>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G1_C6.B.B<(class G1_C6`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 8
.locals init (string[] actualResults)
ldc.i4.s 3
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G1_C6.B.B<(class G1_C6`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 3
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1326()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1327<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.T.T<T0,T1,(class IBase2`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.T.T<T0,T1,(class IBase2`2<!!T0,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<!!T0,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.A.T<T1,(class IBase2`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.A.T<T1,(class IBase2`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass0,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.A.A<(class IBase2`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.A.A<(class IBase2`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.A.B<(class IBase2`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.A.B<(class IBase2`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.B.T<T1,(class IBase2`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.B.T<T1,(class IBase2`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass1,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.B.A<(class IBase2`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.B.A<(class IBase2`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.B.B<(class IBase2`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.B.B<(class IBase2`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method public hidebysig static void MethodCallingTest() cil managed
{
.maxstack 10
.locals init (object V_0)
ldstr "========================== Method Calling Test =========================="
call void [mscorlib]System.Console::WriteLine(string)
newobj instance void class G3_C1246`1<class BaseClass0>::.ctor()
stloc.0
ldloc.0
dup
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1327<object>()
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1326()
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass0>::Method7<object>()
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
ldstr "G2_C293::Method7.MI.7470<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1916<object>()
ldstr "G2_C293::ClassMethod1916.7474<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1915<object>()
ldstr "G3_C1246::ClassMethod1915.MI.15097<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1914()
ldstr "G2_C293::ClassMethod1914.7472()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1913()
ldstr "G3_C1246::ClassMethod1913.MI.15096()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::Method7<object>()
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1327<object>()
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1326()
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
ldstr "G2_C293::Method7.MI.7470<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
castclass class G3_C1246`1<class BaseClass0>
callvirt instance string class G3_C1246`1<class BaseClass0>::ClassMethod3872<object>()
ldstr "G3_C1246::ClassMethod3872.15095<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1246`1<class BaseClass0>
callvirt instance string class G3_C1246`1<class BaseClass0>::ClassMethod3871<object>()
ldstr "G3_C1246::ClassMethod3871.15094<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1246`1<class BaseClass0>
callvirt instance string class G3_C1246`1<class BaseClass0>::ClassMethod3870()
ldstr "G3_C1246::ClassMethod3870.15093()"
ldstr "class G3_C1246`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1246`1<class BaseClass0>
callvirt instance string class G3_C1246`1<class BaseClass0>::Method6<object>()
ldstr "G3_C1246::Method6.15091<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1246`1<class BaseClass0>
callvirt instance string class G3_C1246`1<class BaseClass0>::Method5()
ldstr "G3_C1246::Method5.15089()"
ldstr "class G3_C1246`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1246`1<class BaseClass0>
callvirt instance string class G3_C1246`1<class BaseClass0>::Method4()
ldstr "G3_C1246::Method4.15087()"
ldstr "class G3_C1246`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1246`1<class BaseClass0>
callvirt instance string class G3_C1246`1<class BaseClass0>::ClassMethod1916<object>()
ldstr "G2_C293::ClassMethod1916.7474<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1246`1<class BaseClass0>
callvirt instance string class G3_C1246`1<class BaseClass0>::ClassMethod1915<object>()
ldstr "G3_C1246::ClassMethod1915.MI.15097<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1246`1<class BaseClass0>
callvirt instance string class G3_C1246`1<class BaseClass0>::ClassMethod1914()
ldstr "G2_C293::ClassMethod1914.7472()"
ldstr "class G3_C1246`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1246`1<class BaseClass0>
callvirt instance string class G3_C1246`1<class BaseClass0>::ClassMethod1913()
ldstr "G3_C1246::ClassMethod1913.MI.15096()"
ldstr "class G3_C1246`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1246`1<class BaseClass0>
callvirt instance string class G3_C1246`1<class BaseClass0>::Method7<object>()
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1246`1<class BaseClass0>
callvirt instance string class G3_C1246`1<class BaseClass0>::ClassMethod1327<object>()
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1246`1<class BaseClass0>
callvirt instance string class G3_C1246`1<class BaseClass0>::ClassMethod1326()
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G3_C1246`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method4()
ldstr "G3_C1246::Method4.MI.15088()"
ldstr "class IBase1`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method5()
ldstr "G3_C1246::Method5.MI.15090()"
ldstr "class IBase1`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>()
ldstr "G3_C1246::Method6.MI.15092<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
newobj instance void class G3_C1246`1<class BaseClass1>::.ctor()
stloc.0
ldloc.0
dup
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1327<object>()
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1326()
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass0>::Method7<object>()
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
ldstr "G2_C293::Method7.MI.7470<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1916<object>()
ldstr "G2_C293::ClassMethod1916.7474<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1915<object>()
ldstr "G3_C1246::ClassMethod1915.MI.15097<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1914()
ldstr "G2_C293::ClassMethod1914.7472()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1913()
ldstr "G3_C1246::ClassMethod1913.MI.15096()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::Method7<object>()
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1327<object>()
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1326()
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
ldstr "G2_C293::Method7.MI.7470<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
castclass class G3_C1246`1<class BaseClass1>
callvirt instance string class G3_C1246`1<class BaseClass1>::ClassMethod3872<object>()
ldstr "G3_C1246::ClassMethod3872.15095<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1246`1<class BaseClass1>
callvirt instance string class G3_C1246`1<class BaseClass1>::ClassMethod3871<object>()
ldstr "G3_C1246::ClassMethod3871.15094<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1246`1<class BaseClass1>
callvirt instance string class G3_C1246`1<class BaseClass1>::ClassMethod3870()
ldstr "G3_C1246::ClassMethod3870.15093()"
ldstr "class G3_C1246`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1246`1<class BaseClass1>
callvirt instance string class G3_C1246`1<class BaseClass1>::Method6<object>()
ldstr "G3_C1246::Method6.15091<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1246`1<class BaseClass1>
callvirt instance string class G3_C1246`1<class BaseClass1>::Method5()
ldstr "G3_C1246::Method5.15089()"
ldstr "class G3_C1246`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1246`1<class BaseClass1>
callvirt instance string class G3_C1246`1<class BaseClass1>::Method4()
ldstr "G3_C1246::Method4.15087()"
ldstr "class G3_C1246`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1246`1<class BaseClass1>
callvirt instance string class G3_C1246`1<class BaseClass1>::ClassMethod1916<object>()
ldstr "G2_C293::ClassMethod1916.7474<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1246`1<class BaseClass1>
callvirt instance string class G3_C1246`1<class BaseClass1>::ClassMethod1915<object>()
ldstr "G3_C1246::ClassMethod1915.MI.15097<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1246`1<class BaseClass1>
callvirt instance string class G3_C1246`1<class BaseClass1>::ClassMethod1914()
ldstr "G2_C293::ClassMethod1914.7472()"
ldstr "class G3_C1246`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1246`1<class BaseClass1>
callvirt instance string class G3_C1246`1<class BaseClass1>::ClassMethod1913()
ldstr "G3_C1246::ClassMethod1913.MI.15096()"
ldstr "class G3_C1246`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1246`1<class BaseClass1>
callvirt instance string class G3_C1246`1<class BaseClass1>::Method7<object>()
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1246`1<class BaseClass1>
callvirt instance string class G3_C1246`1<class BaseClass1>::ClassMethod1327<object>()
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1246`1<class BaseClass1>
callvirt instance string class G3_C1246`1<class BaseClass1>::ClassMethod1326()
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G3_C1246`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
ldstr "G3_C1246::Method4.MI.15088()"
ldstr "class IBase1`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
ldstr "G3_C1246::Method5.MI.15090()"
ldstr "class IBase1`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
ldstr "G3_C1246::Method6.MI.15092<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method4()
ldstr "G3_C1246::Method4.MI.15088()"
ldstr "class IBase1`1<class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method5()
ldstr "G3_C1246::Method5.MI.15090()"
ldstr "class IBase1`1<class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>()
ldstr "G3_C1246::Method6.MI.15092<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
newobj instance void class G2_C293`2<class BaseClass0,class BaseClass0>::.ctor()
stloc.0
ldloc.0
dup
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1327<object>()
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G2_C293`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1326()
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G2_C293`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass0>::Method7<object>()
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G2_C293`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
ldstr "G2_C293::Method7.MI.7470<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
castclass class G2_C293`2<class BaseClass0,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass0>::ClassMethod1916<object>()
ldstr "G2_C293::ClassMethod1916.7474<System.Object>()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass0> on type class G2_C293`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass0,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass0>::ClassMethod1915<object>()
ldstr "G2_C293::ClassMethod1915.7473<System.Object>()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass0> on type class G2_C293`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass0,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass0>::ClassMethod1914()
ldstr "G2_C293::ClassMethod1914.7472()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass0> on type class G2_C293`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass0,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass0>::ClassMethod1913()
ldstr "G2_C293::ClassMethod1913.7471()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass0> on type class G2_C293`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass0,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass0>::Method7<object>()
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass0> on type class G2_C293`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass0,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass0>::ClassMethod1327<object>()
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass0> on type class G2_C293`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass0,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass0>::ClassMethod1326()
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass0> on type class G2_C293`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
ldstr "G2_C293::Method7.MI.7470<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C293`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
newobj instance void class G2_C293`2<class BaseClass0,class BaseClass1>::.ctor()
stloc.0
ldloc.0
dup
castclass class G1_C6`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1327<object>()
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C6`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1326()
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C6`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
ldstr "G2_C293::Method7.MI.7470<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
castclass class G2_C293`2<class BaseClass0,class BaseClass1>
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass1>::ClassMethod1916<object>()
ldstr "G2_C293::ClassMethod1916.7474<System.Object>()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass0,class BaseClass1>
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass1>::ClassMethod1915<object>()
ldstr "G2_C293::ClassMethod1915.7473<System.Object>()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass0,class BaseClass1>
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass1>::ClassMethod1914()
ldstr "G2_C293::ClassMethod1914.7472()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass0,class BaseClass1>
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass1>::ClassMethod1913()
ldstr "G2_C293::ClassMethod1913.7471()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass0,class BaseClass1>
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass1>::Method7<object>()
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass0,class BaseClass1>
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass1>::ClassMethod1327<object>()
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass0,class BaseClass1>
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass1>::ClassMethod1326()
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
ldstr "G2_C293::Method7.MI.7470<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C293`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
newobj instance void class G2_C293`2<class BaseClass1,class BaseClass0>::.ctor()
stloc.0
ldloc.0
dup
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1327<object>()
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G2_C293`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1326()
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G2_C293`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass0>::Method7<object>()
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G2_C293`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
ldstr "G2_C293::Method7.MI.7470<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1916<object>()
ldstr "G2_C293::ClassMethod1916.7474<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G2_C293`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1915<object>()
ldstr "G2_C293::ClassMethod1915.7473<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G2_C293`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1914()
ldstr "G2_C293::ClassMethod1914.7472()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G2_C293`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1913()
ldstr "G2_C293::ClassMethod1913.7471()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G2_C293`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::Method7<object>()
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G2_C293`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1327<object>()
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G2_C293`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1326()
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G2_C293`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
ldstr "G2_C293::Method7.MI.7470<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C293`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
newobj instance void class G2_C293`2<class BaseClass1,class BaseClass1>::.ctor()
stloc.0
ldloc.0
dup
castclass class G1_C6`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1327<object>()
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C6`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1326()
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C6`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
ldstr "G2_C293::Method7.MI.7470<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass1>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass1>::ClassMethod1916<object>()
ldstr "G2_C293::ClassMethod1916.7474<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass1>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass1>::ClassMethod1915<object>()
ldstr "G2_C293::ClassMethod1915.7473<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass1>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass1>::ClassMethod1914()
ldstr "G2_C293::ClassMethod1914.7472()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass1>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass1>::ClassMethod1913()
ldstr "G2_C293::ClassMethod1913.7471()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass1>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass1>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass1>::ClassMethod1327<object>()
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass1>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass1>::ClassMethod1326()
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
ldstr "G2_C293::Method7.MI.7470<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C293`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
newobj instance void class G1_C6`2<class BaseClass0,class BaseClass0>::.ctor()
stloc.0
ldloc.0
dup
castclass class G1_C6`2<class BaseClass0,class BaseClass0>
callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass0>::ClassMethod1327<object>()
ldstr "G1_C6::ClassMethod1327.4810<System.Object>()"
ldstr "class G1_C6`2<class BaseClass0,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C6`2<class BaseClass0,class BaseClass0>
callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass0>::ClassMethod1326()
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G1_C6`2<class BaseClass0,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C6`2<class BaseClass0,class BaseClass0>
callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass0>::Method7<object>()
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class G1_C6`2<class BaseClass0,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
newobj instance void class G1_C6`2<class BaseClass0,class BaseClass1>::.ctor()
stloc.0
ldloc.0
dup
castclass class G1_C6`2<class BaseClass0,class BaseClass1>
callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1327<object>()
ldstr "G1_C6::ClassMethod1327.4810<System.Object>()"
ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C6`2<class BaseClass0,class BaseClass1>
callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1326()
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C6`2<class BaseClass0,class BaseClass1>
callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass1>::Method7<object>()
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
newobj instance void class G1_C6`2<class BaseClass1,class BaseClass0>::.ctor()
stloc.0
ldloc.0
dup
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1327<object>()
ldstr "G1_C6::ClassMethod1327.4810<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G1_C6`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1326()
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G1_C6`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass0>::Method7<object>()
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G1_C6`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
newobj instance void class G1_C6`2<class BaseClass1,class BaseClass1>::.ctor()
stloc.0
ldloc.0
dup
castclass class G1_C6`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1327<object>()
ldstr "G1_C6::ClassMethod1327.4810<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C6`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1326()
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C6`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static void ConstrainedCallsTest() cil managed
{
.maxstack 10
.locals init (object V_0)
ldstr "========================== Constrained Calls Test =========================="
call void [mscorlib]System.Console::WriteLine(string)
newobj instance void class G3_C1246`1<class BaseClass0>::.ctor()
stloc.0
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G1_C6.T.T<class BaseClass1,class BaseClass0,class G3_C1246`1<class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G1_C6.B.T<class BaseClass0,class G3_C1246`1<class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G1_C6.B.A<class G3_C1246`1<class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G3_C1246`1<class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.IBase2.B.T<class BaseClass1,class G3_C1246`1<class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.IBase2.B.B<class G3_C1246`1<class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G3_C1246`1<class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.A.T<class BaseClass1,class G3_C1246`1<class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.A.B<class G3_C1246`1<class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G3_C1246::ClassMethod1913.MI.15096()#G2_C293::ClassMethod1914.7472()#G3_C1246::ClassMethod1915.MI.15097<System.Object>()#G2_C293::ClassMethod1916.7474<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G2_C293.T.T<class BaseClass1,class BaseClass0,class G3_C1246`1<class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G3_C1246::ClassMethod1913.MI.15096()#G2_C293::ClassMethod1914.7472()#G3_C1246::ClassMethod1915.MI.15097<System.Object>()#G2_C293::ClassMethod1916.7474<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G2_C293.B.T<class BaseClass0,class G3_C1246`1<class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G3_C1246::ClassMethod1913.MI.15096()#G2_C293::ClassMethod1914.7472()#G3_C1246::ClassMethod1915.MI.15097<System.Object>()#G2_C293::ClassMethod1916.7474<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G2_C293.B.A<class G3_C1246`1<class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G3_C1246`1<class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.A.T<class BaseClass0,class G3_C1246`1<class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.A.A<class G3_C1246`1<class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G3_C1246::ClassMethod1913.MI.15096()#G2_C293::ClassMethod1914.7472()#G3_C1246::ClassMethod1915.MI.15097<System.Object>()#G2_C293::ClassMethod1916.7474<System.Object>()#G3_C1246::ClassMethod3870.15093()#G3_C1246::ClassMethod3871.15094<System.Object>()#G3_C1246::ClassMethod3872.15095<System.Object>()#G3_C1246::Method4.15087()#G3_C1246::Method5.15089()#G3_C1246::Method6.15091<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G3_C1246.T<class BaseClass0,class G3_C1246`1<class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G3_C1246::ClassMethod1913.MI.15096()#G2_C293::ClassMethod1914.7472()#G3_C1246::ClassMethod1915.MI.15097<System.Object>()#G2_C293::ClassMethod1916.7474<System.Object>()#G3_C1246::ClassMethod3870.15093()#G3_C1246::ClassMethod3871.15094<System.Object>()#G3_C1246::ClassMethod3872.15095<System.Object>()#G3_C1246::Method4.15087()#G3_C1246::Method5.15089()#G3_C1246::Method6.15091<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G3_C1246.A<class G3_C1246`1<class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G3_C1246::Method4.MI.15088()#G3_C1246::Method5.MI.15090()#G3_C1246::Method6.MI.15092<System.Object>()#"
call void Generated774::M.IBase1.T<class BaseClass0,class G3_C1246`1<class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G3_C1246::Method4.MI.15088()#G3_C1246::Method5.MI.15090()#G3_C1246::Method6.MI.15092<System.Object>()#"
call void Generated774::M.IBase1.A<class G3_C1246`1<class BaseClass0>>(!!0,string)
newobj instance void class G3_C1246`1<class BaseClass1>::.ctor()
stloc.0
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G1_C6.T.T<class BaseClass1,class BaseClass0,class G3_C1246`1<class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G1_C6.B.T<class BaseClass0,class G3_C1246`1<class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G1_C6.B.A<class G3_C1246`1<class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G3_C1246`1<class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.IBase2.B.T<class BaseClass1,class G3_C1246`1<class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.IBase2.B.B<class G3_C1246`1<class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G3_C1246`1<class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.A.T<class BaseClass1,class G3_C1246`1<class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.A.B<class G3_C1246`1<class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G3_C1246::ClassMethod1913.MI.15096()#G2_C293::ClassMethod1914.7472()#G3_C1246::ClassMethod1915.MI.15097<System.Object>()#G2_C293::ClassMethod1916.7474<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G2_C293.T.T<class BaseClass1,class BaseClass0,class G3_C1246`1<class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G3_C1246::ClassMethod1913.MI.15096()#G2_C293::ClassMethod1914.7472()#G3_C1246::ClassMethod1915.MI.15097<System.Object>()#G2_C293::ClassMethod1916.7474<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G2_C293.B.T<class BaseClass0,class G3_C1246`1<class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G3_C1246::ClassMethod1913.MI.15096()#G2_C293::ClassMethod1914.7472()#G3_C1246::ClassMethod1915.MI.15097<System.Object>()#G2_C293::ClassMethod1916.7474<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G2_C293.B.A<class G3_C1246`1<class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G3_C1246`1<class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.A.T<class BaseClass0,class G3_C1246`1<class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.A.A<class G3_C1246`1<class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G3_C1246::ClassMethod1913.MI.15096()#G2_C293::ClassMethod1914.7472()#G3_C1246::ClassMethod1915.MI.15097<System.Object>()#G2_C293::ClassMethod1916.7474<System.Object>()#G3_C1246::ClassMethod3870.15093()#G3_C1246::ClassMethod3871.15094<System.Object>()#G3_C1246::ClassMethod3872.15095<System.Object>()#G3_C1246::Method4.15087()#G3_C1246::Method5.15089()#G3_C1246::Method6.15091<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G3_C1246.T<class BaseClass1,class G3_C1246`1<class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G3_C1246::ClassMethod1913.MI.15096()#G2_C293::ClassMethod1914.7472()#G3_C1246::ClassMethod1915.MI.15097<System.Object>()#G2_C293::ClassMethod1916.7474<System.Object>()#G3_C1246::ClassMethod3870.15093()#G3_C1246::ClassMethod3871.15094<System.Object>()#G3_C1246::ClassMethod3872.15095<System.Object>()#G3_C1246::Method4.15087()#G3_C1246::Method5.15089()#G3_C1246::Method6.15091<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G3_C1246.B<class G3_C1246`1<class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G3_C1246::Method4.MI.15088()#G3_C1246::Method5.MI.15090()#G3_C1246::Method6.MI.15092<System.Object>()#"
call void Generated774::M.IBase1.T<class BaseClass1,class G3_C1246`1<class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G3_C1246::Method4.MI.15088()#G3_C1246::Method5.MI.15090()#G3_C1246::Method6.MI.15092<System.Object>()#"
call void Generated774::M.IBase1.B<class G3_C1246`1<class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G3_C1246::Method4.MI.15088()#G3_C1246::Method5.MI.15090()#G3_C1246::Method6.MI.15092<System.Object>()#"
call void Generated774::M.IBase1.T<class BaseClass0,class G3_C1246`1<class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G3_C1246::Method4.MI.15088()#G3_C1246::Method5.MI.15090()#G3_C1246::Method6.MI.15092<System.Object>()#"
call void Generated774::M.IBase1.A<class G3_C1246`1<class BaseClass1>>(!!0,string)
newobj instance void class G2_C293`2<class BaseClass0,class BaseClass0>::.ctor()
stloc.0
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G1_C6.T.T<class BaseClass1,class BaseClass0,class G2_C293`2<class BaseClass0,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G1_C6.B.T<class BaseClass0,class G2_C293`2<class BaseClass0,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G1_C6.B.A<class G2_C293`2<class BaseClass0,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G2_C293`2<class BaseClass0,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.IBase2.B.T<class BaseClass1,class G2_C293`2<class BaseClass0,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.IBase2.B.B<class G2_C293`2<class BaseClass0,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C293`2<class BaseClass0,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.A.T<class BaseClass1,class G2_C293`2<class BaseClass0,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.A.B<class G2_C293`2<class BaseClass0,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::ClassMethod1913.7471()#G2_C293::ClassMethod1914.7472()#G2_C293::ClassMethod1915.7473<System.Object>()#G2_C293::ClassMethod1916.7474<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G2_C293.T.T<class BaseClass0,class BaseClass0,class G2_C293`2<class BaseClass0,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::ClassMethod1913.7471()#G2_C293::ClassMethod1914.7472()#G2_C293::ClassMethod1915.7473<System.Object>()#G2_C293::ClassMethod1916.7474<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G2_C293.A.T<class BaseClass0,class G2_C293`2<class BaseClass0,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::ClassMethod1913.7471()#G2_C293::ClassMethod1914.7472()#G2_C293::ClassMethod1915.7473<System.Object>()#G2_C293::ClassMethod1916.7474<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G2_C293.A.A<class G2_C293`2<class BaseClass0,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G2_C293`2<class BaseClass0,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.A.T<class BaseClass0,class G2_C293`2<class BaseClass0,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.A.A<class G2_C293`2<class BaseClass0,class BaseClass0>>(!!0,string)
newobj instance void class G2_C293`2<class BaseClass0,class BaseClass1>::.ctor()
stloc.0
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G1_C6.T.T<class BaseClass1,class BaseClass1,class G2_C293`2<class BaseClass0,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G1_C6.B.T<class BaseClass1,class G2_C293`2<class BaseClass0,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G1_C6.B.B<class G2_C293`2<class BaseClass0,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G2_C293`2<class BaseClass0,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.IBase2.B.T<class BaseClass1,class G2_C293`2<class BaseClass0,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.IBase2.B.B<class G2_C293`2<class BaseClass0,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C293`2<class BaseClass0,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.A.T<class BaseClass1,class G2_C293`2<class BaseClass0,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.A.B<class G2_C293`2<class BaseClass0,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::ClassMethod1913.7471()#G2_C293::ClassMethod1914.7472()#G2_C293::ClassMethod1915.7473<System.Object>()#G2_C293::ClassMethod1916.7474<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G2_C293.T.T<class BaseClass0,class BaseClass1,class G2_C293`2<class BaseClass0,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::ClassMethod1913.7471()#G2_C293::ClassMethod1914.7472()#G2_C293::ClassMethod1915.7473<System.Object>()#G2_C293::ClassMethod1916.7474<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G2_C293.A.T<class BaseClass1,class G2_C293`2<class BaseClass0,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::ClassMethod1913.7471()#G2_C293::ClassMethod1914.7472()#G2_C293::ClassMethod1915.7473<System.Object>()#G2_C293::ClassMethod1916.7474<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G2_C293.A.B<class G2_C293`2<class BaseClass0,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G2_C293`2<class BaseClass0,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.A.T<class BaseClass0,class G2_C293`2<class BaseClass0,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.A.A<class G2_C293`2<class BaseClass0,class BaseClass1>>(!!0,string)
newobj instance void class G2_C293`2<class BaseClass1,class BaseClass0>::.ctor()
stloc.0
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G1_C6.T.T<class BaseClass1,class BaseClass0,class G2_C293`2<class BaseClass1,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G1_C6.B.T<class BaseClass0,class G2_C293`2<class BaseClass1,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G1_C6.B.A<class G2_C293`2<class BaseClass1,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G2_C293`2<class BaseClass1,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.IBase2.B.T<class BaseClass1,class G2_C293`2<class BaseClass1,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.IBase2.B.B<class G2_C293`2<class BaseClass1,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C293`2<class BaseClass1,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.A.T<class BaseClass1,class G2_C293`2<class BaseClass1,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.A.B<class G2_C293`2<class BaseClass1,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::ClassMethod1913.7471()#G2_C293::ClassMethod1914.7472()#G2_C293::ClassMethod1915.7473<System.Object>()#G2_C293::ClassMethod1916.7474<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G2_C293.T.T<class BaseClass1,class BaseClass0,class G2_C293`2<class BaseClass1,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::ClassMethod1913.7471()#G2_C293::ClassMethod1914.7472()#G2_C293::ClassMethod1915.7473<System.Object>()#G2_C293::ClassMethod1916.7474<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G2_C293.B.T<class BaseClass0,class G2_C293`2<class BaseClass1,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::ClassMethod1913.7471()#G2_C293::ClassMethod1914.7472()#G2_C293::ClassMethod1915.7473<System.Object>()#G2_C293::ClassMethod1916.7474<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G2_C293.B.A<class G2_C293`2<class BaseClass1,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G2_C293`2<class BaseClass1,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.A.T<class BaseClass0,class G2_C293`2<class BaseClass1,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.A.A<class G2_C293`2<class BaseClass1,class BaseClass0>>(!!0,string)
newobj instance void class G2_C293`2<class BaseClass1,class BaseClass1>::.ctor()
stloc.0
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G1_C6.T.T<class BaseClass1,class BaseClass1,class G2_C293`2<class BaseClass1,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G1_C6.B.T<class BaseClass1,class G2_C293`2<class BaseClass1,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G1_C6.B.B<class G2_C293`2<class BaseClass1,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G2_C293`2<class BaseClass1,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.IBase2.B.T<class BaseClass1,class G2_C293`2<class BaseClass1,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.IBase2.B.B<class G2_C293`2<class BaseClass1,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C293`2<class BaseClass1,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.A.T<class BaseClass1,class G2_C293`2<class BaseClass1,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.A.B<class G2_C293`2<class BaseClass1,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::ClassMethod1913.7471()#G2_C293::ClassMethod1914.7472()#G2_C293::ClassMethod1915.7473<System.Object>()#G2_C293::ClassMethod1916.7474<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G2_C293.T.T<class BaseClass1,class BaseClass1,class G2_C293`2<class BaseClass1,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::ClassMethod1913.7471()#G2_C293::ClassMethod1914.7472()#G2_C293::ClassMethod1915.7473<System.Object>()#G2_C293::ClassMethod1916.7474<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G2_C293.B.T<class BaseClass1,class G2_C293`2<class BaseClass1,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::ClassMethod1913.7471()#G2_C293::ClassMethod1914.7472()#G2_C293::ClassMethod1915.7473<System.Object>()#G2_C293::ClassMethod1916.7474<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G2_C293.B.B<class G2_C293`2<class BaseClass1,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G2_C293`2<class BaseClass1,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.A.T<class BaseClass0,class G2_C293`2<class BaseClass1,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.A.A<class G2_C293`2<class BaseClass1,class BaseClass1>>(!!0,string)
newobj instance void class G1_C6`2<class BaseClass0,class BaseClass0>::.ctor()
stloc.0
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G1_C6::ClassMethod1327.4810<System.Object>()#G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.G1_C6.T.T<class BaseClass0,class BaseClass0,class G1_C6`2<class BaseClass0,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G1_C6::ClassMethod1327.4810<System.Object>()#G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.G1_C6.A.T<class BaseClass0,class G1_C6`2<class BaseClass0,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G1_C6::ClassMethod1327.4810<System.Object>()#G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.G1_C6.A.A<class G1_C6`2<class BaseClass0,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass1,class BaseClass0,class G1_C6`2<class BaseClass0,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.B.T<class BaseClass0,class G1_C6`2<class BaseClass0,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.B.A<class G1_C6`2<class BaseClass0,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G1_C6`2<class BaseClass0,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.A.T<class BaseClass0,class G1_C6`2<class BaseClass0,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.A.A<class G1_C6`2<class BaseClass0,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G1_C6`2<class BaseClass0,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.A.T<class BaseClass1,class G1_C6`2<class BaseClass0,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.A.B<class G1_C6`2<class BaseClass0,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G1_C6`2<class BaseClass0,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.B.T<class BaseClass1,class G1_C6`2<class BaseClass0,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.B.B<class G1_C6`2<class BaseClass0,class BaseClass0>>(!!0,string)
newobj instance void class G1_C6`2<class BaseClass0,class BaseClass1>::.ctor()
stloc.0
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G1_C6::ClassMethod1327.4810<System.Object>()#G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.G1_C6.T.T<class BaseClass0,class BaseClass1,class G1_C6`2<class BaseClass0,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G1_C6::ClassMethod1327.4810<System.Object>()#G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.G1_C6.A.T<class BaseClass1,class G1_C6`2<class BaseClass0,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G1_C6::ClassMethod1327.4810<System.Object>()#G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.G1_C6.A.B<class G1_C6`2<class BaseClass0,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass1,class BaseClass0,class G1_C6`2<class BaseClass0,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.B.T<class BaseClass0,class G1_C6`2<class BaseClass0,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.B.A<class G1_C6`2<class BaseClass0,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G1_C6`2<class BaseClass0,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.A.T<class BaseClass0,class G1_C6`2<class BaseClass0,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.A.A<class G1_C6`2<class BaseClass0,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G1_C6`2<class BaseClass0,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.A.T<class BaseClass1,class G1_C6`2<class BaseClass0,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.A.B<class G1_C6`2<class BaseClass0,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G1_C6`2<class BaseClass0,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.B.T<class BaseClass1,class G1_C6`2<class BaseClass0,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.B.B<class G1_C6`2<class BaseClass0,class BaseClass1>>(!!0,string)
newobj instance void class G1_C6`2<class BaseClass1,class BaseClass0>::.ctor()
stloc.0
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G1_C6::ClassMethod1327.4810<System.Object>()#G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.G1_C6.T.T<class BaseClass1,class BaseClass0,class G1_C6`2<class BaseClass1,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G1_C6::ClassMethod1327.4810<System.Object>()#G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.G1_C6.B.T<class BaseClass0,class G1_C6`2<class BaseClass1,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G1_C6::ClassMethod1327.4810<System.Object>()#G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.G1_C6.B.A<class G1_C6`2<class BaseClass1,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G1_C6`2<class BaseClass1,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.B.T<class BaseClass1,class G1_C6`2<class BaseClass1,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.B.B<class G1_C6`2<class BaseClass1,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G1_C6`2<class BaseClass1,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.A.T<class BaseClass1,class G1_C6`2<class BaseClass1,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.A.B<class G1_C6`2<class BaseClass1,class BaseClass0>>(!!0,string)
newobj instance void class G1_C6`2<class BaseClass1,class BaseClass1>::.ctor()
stloc.0
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G1_C6::ClassMethod1327.4810<System.Object>()#G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.G1_C6.T.T<class BaseClass1,class BaseClass1,class G1_C6`2<class BaseClass1,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G1_C6::ClassMethod1327.4810<System.Object>()#G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.G1_C6.B.T<class BaseClass1,class G1_C6`2<class BaseClass1,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G1_C6::ClassMethod1327.4810<System.Object>()#G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.G1_C6.B.B<class G1_C6`2<class BaseClass1,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G1_C6`2<class BaseClass1,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.B.T<class BaseClass1,class G1_C6`2<class BaseClass1,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.B.B<class G1_C6`2<class BaseClass1,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G1_C6`2<class BaseClass1,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.A.T<class BaseClass1,class G1_C6`2<class BaseClass1,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.A.B<class G1_C6`2<class BaseClass1,class BaseClass1>>(!!0,string)
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static void StructConstrainedInterfaceCallsTest() cil managed
{
.maxstack 10
ldstr "===================== Struct Constrained Interface Calls Test ====================="
call void [mscorlib]System.Console::WriteLine(string)
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static void CalliTest() cil managed
{
.maxstack 10
.locals init (object V_0)
ldstr "========================== Method Calli Test =========================="
call void [mscorlib]System.Console::WriteLine(string)
newobj instance void class G3_C1246`1<class BaseClass0>::.ctor()
stloc.0
ldloc.0
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1327<object>()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1326()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass0>::Method7<object>()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G2_C293::Method7.MI.7470<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1916<object>()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G2_C293::ClassMethod1916.7474<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1915<object>()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G3_C1246::ClassMethod1915.MI.15097<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1914()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G2_C293::ClassMethod1914.7472()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1913()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G3_C1246::ClassMethod1913.MI.15096()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass0>::Method7<object>()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1327<object>()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1326()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G2_C293::Method7.MI.7470<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1246`1<class BaseClass0>
ldloc.0
ldvirtftn instance string class G3_C1246`1<class BaseClass0>::ClassMethod3872<object>()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G3_C1246::ClassMethod3872.15095<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1246`1<class BaseClass0>
ldloc.0
ldvirtftn instance string class G3_C1246`1<class BaseClass0>::ClassMethod3871<object>()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G3_C1246::ClassMethod3871.15094<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1246`1<class BaseClass0>
ldloc.0
ldvirtftn instance string class G3_C1246`1<class BaseClass0>::ClassMethod3870()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G3_C1246::ClassMethod3870.15093()"
ldstr "class G3_C1246`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1246`1<class BaseClass0>
ldloc.0
ldvirtftn instance string class G3_C1246`1<class BaseClass0>::Method6<object>()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G3_C1246::Method6.15091<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1246`1<class BaseClass0>
ldloc.0
ldvirtftn instance string class G3_C1246`1<class BaseClass0>::Method5()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G3_C1246::Method5.15089()"
ldstr "class G3_C1246`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1246`1<class BaseClass0>
ldloc.0
ldvirtftn instance string class G3_C1246`1<class BaseClass0>::Method4()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G3_C1246::Method4.15087()"
ldstr "class G3_C1246`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1246`1<class BaseClass0>
ldloc.0
ldvirtftn instance string class G3_C1246`1<class BaseClass0>::ClassMethod1916<object>()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G2_C293::ClassMethod1916.7474<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1246`1<class BaseClass0>
ldloc.0
ldvirtftn instance string class G3_C1246`1<class BaseClass0>::ClassMethod1915<object>()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G3_C1246::ClassMethod1915.MI.15097<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1246`1<class BaseClass0>
ldloc.0
ldvirtftn instance string class G3_C1246`1<class BaseClass0>::ClassMethod1914()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G2_C293::ClassMethod1914.7472()"
ldstr "class G3_C1246`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1246`1<class BaseClass0>
ldloc.0
ldvirtftn instance string class G3_C1246`1<class BaseClass0>::ClassMethod1913()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G3_C1246::ClassMethod1913.MI.15096()"
ldstr "class G3_C1246`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1246`1<class BaseClass0>
ldloc.0
ldvirtftn instance string class G3_C1246`1<class BaseClass0>::Method7<object>()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1246`1<class BaseClass0>
ldloc.0
ldvirtftn instance string class G3_C1246`1<class BaseClass0>::ClassMethod1327<object>()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1246`1<class BaseClass0>
ldloc.0
ldvirtftn instance string class G3_C1246`1<class BaseClass0>::ClassMethod1326()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G3_C1246`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G3_C1246::Method4.MI.15088()"
ldstr "class IBase1`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G3_C1246::Method5.MI.15090()"
ldstr "class IBase1`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G3_C1246::Method6.MI.15092<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
newobj instance void class G3_C1246`1<class BaseClass1>::.ctor()
stloc.0
ldloc.0
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1327<object>()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1326()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass0>::Method7<object>()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G2_C293::Method7.MI.7470<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1916<object>()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G2_C293::ClassMethod1916.7474<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1915<object>()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G3_C1246::ClassMethod1915.MI.15097<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1914()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G2_C293::ClassMethod1914.7472()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1913()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G3_C1246::ClassMethod1913.MI.15096()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass0>::Method7<object>()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1327<object>()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1326()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G2_C293::Method7.MI.7470<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1246`1<class BaseClass1>
ldloc.0
ldvirtftn instance string class G3_C1246`1<class BaseClass1>::ClassMethod3872<object>()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G3_C1246::ClassMethod3872.15095<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1246`1<class BaseClass1>
ldloc.0
ldvirtftn instance string class G3_C1246`1<class BaseClass1>::ClassMethod3871<object>()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G3_C1246::ClassMethod3871.15094<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1246`1<class BaseClass1>
ldloc.0
ldvirtftn instance string class G3_C1246`1<class BaseClass1>::ClassMethod3870()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G3_C1246::ClassMethod3870.15093()"
ldstr "class G3_C1246`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1246`1<class BaseClass1>
ldloc.0
ldvirtftn instance string class G3_C1246`1<class BaseClass1>::Method6<object>()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G3_C1246::Method6.15091<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1246`1<class BaseClass1>
ldloc.0
ldvirtftn instance string class G3_C1246`1<class BaseClass1>::Method5()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G3_C1246::Method5.15089()"
ldstr "class G3_C1246`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1246`1<class BaseClass1>
ldloc.0
ldvirtftn instance string class G3_C1246`1<class BaseClass1>::Method4()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G3_C1246::Method4.15087()"
ldstr "class G3_C1246`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1246`1<class BaseClass1>
ldloc.0
ldvirtftn instance string class G3_C1246`1<class BaseClass1>::ClassMethod1916<object>()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G2_C293::ClassMethod1916.7474<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1246`1<class BaseClass1>
ldloc.0
ldvirtftn instance string class G3_C1246`1<class BaseClass1>::ClassMethod1915<object>()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G3_C1246::ClassMethod1915.MI.15097<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1246`1<class BaseClass1>
ldloc.0
ldvirtftn instance string class G3_C1246`1<class BaseClass1>::ClassMethod1914()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G2_C293::ClassMethod1914.7472()"
ldstr "class G3_C1246`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1246`1<class BaseClass1>
ldloc.0
ldvirtftn instance string class G3_C1246`1<class BaseClass1>::ClassMethod1913()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G3_C1246::ClassMethod1913.MI.15096()"
ldstr "class G3_C1246`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1246`1<class BaseClass1>
ldloc.0
ldvirtftn instance string class G3_C1246`1<class BaseClass1>::Method7<object>()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1246`1<class BaseClass1>
ldloc.0
ldvirtftn instance string class G3_C1246`1<class BaseClass1>::ClassMethod1327<object>()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1246`1<class BaseClass1>
ldloc.0
ldvirtftn instance string class G3_C1246`1<class BaseClass1>::ClassMethod1326()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G3_C1246`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G3_C1246::Method4.MI.15088()"
ldstr "class IBase1`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G3_C1246::Method5.MI.15090()"
ldstr "class IBase1`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G3_C1246::Method6.MI.15092<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G3_C1246::Method4.MI.15088()"
ldstr "class IBase1`1<class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G3_C1246::Method5.MI.15090()"
ldstr "class IBase1`1<class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G3_C1246::Method6.MI.15092<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
newobj instance void class G2_C293`2<class BaseClass0,class BaseClass0>::.ctor()
stloc.0
ldloc.0
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1327<object>()
calli default string(class G2_C293`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G2_C293`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1326()
calli default string(class G2_C293`2<class BaseClass0,class BaseClass0>)
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G2_C293`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass0>::Method7<object>()
calli default string(class G2_C293`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G2_C293`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G2_C293`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
calli default string(class G2_C293`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C293::Method7.MI.7470<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass0,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass0,class BaseClass0>::ClassMethod1916<object>()
calli default string(class G2_C293`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C293::ClassMethod1916.7474<System.Object>()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass0> on type class G2_C293`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass0,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass0,class BaseClass0>::ClassMethod1915<object>()
calli default string(class G2_C293`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C293::ClassMethod1915.7473<System.Object>()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass0> on type class G2_C293`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass0,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass0,class BaseClass0>::ClassMethod1914()
calli default string(class G2_C293`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C293::ClassMethod1914.7472()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass0> on type class G2_C293`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass0,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass0,class BaseClass0>::ClassMethod1913()
calli default string(class G2_C293`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C293::ClassMethod1913.7471()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass0> on type class G2_C293`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass0,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass0,class BaseClass0>::Method7<object>()
calli default string(class G2_C293`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass0> on type class G2_C293`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass0,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass0,class BaseClass0>::ClassMethod1327<object>()
calli default string(class G2_C293`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass0> on type class G2_C293`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass0,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass0,class BaseClass0>::ClassMethod1326()
calli default string(class G2_C293`2<class BaseClass0,class BaseClass0>)
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass0> on type class G2_C293`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
calli default string(class G2_C293`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C293::Method7.MI.7470<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C293`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
newobj instance void class G2_C293`2<class BaseClass0,class BaseClass1>::.ctor()
stloc.0
ldloc.0
castclass class G1_C6`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1327<object>()
calli default string(class G2_C293`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C6`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1326()
calli default string(class G2_C293`2<class BaseClass0,class BaseClass1>)
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C6`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G2_C293`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G2_C293`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
calli default string(class G2_C293`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C293::Method7.MI.7470<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass0,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass0,class BaseClass1>::ClassMethod1916<object>()
calli default string(class G2_C293`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C293::ClassMethod1916.7474<System.Object>()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass0,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass0,class BaseClass1>::ClassMethod1915<object>()
calli default string(class G2_C293`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C293::ClassMethod1915.7473<System.Object>()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass0,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass0,class BaseClass1>::ClassMethod1914()
calli default string(class G2_C293`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C293::ClassMethod1914.7472()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass0,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass0,class BaseClass1>::ClassMethod1913()
calli default string(class G2_C293`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C293::ClassMethod1913.7471()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass0,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass0,class BaseClass1>::Method7<object>()
calli default string(class G2_C293`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass0,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass0,class BaseClass1>::ClassMethod1327<object>()
calli default string(class G2_C293`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass0,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass0,class BaseClass1>::ClassMethod1326()
calli default string(class G2_C293`2<class BaseClass0,class BaseClass1>)
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
calli default string(class G2_C293`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C293::Method7.MI.7470<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C293`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
newobj instance void class G2_C293`2<class BaseClass1,class BaseClass0>::.ctor()
stloc.0
ldloc.0
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1327<object>()
calli default string(class G2_C293`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G2_C293`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1326()
calli default string(class G2_C293`2<class BaseClass1,class BaseClass0>)
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G2_C293`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass0>::Method7<object>()
calli default string(class G2_C293`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G2_C293`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G2_C293`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
calli default string(class G2_C293`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C293::Method7.MI.7470<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1916<object>()
calli default string(class G2_C293`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C293::ClassMethod1916.7474<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G2_C293`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1915<object>()
calli default string(class G2_C293`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C293::ClassMethod1915.7473<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G2_C293`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1914()
calli default string(class G2_C293`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C293::ClassMethod1914.7472()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G2_C293`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1913()
calli default string(class G2_C293`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C293::ClassMethod1913.7471()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G2_C293`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass0>::Method7<object>()
calli default string(class G2_C293`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G2_C293`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1327<object>()
calli default string(class G2_C293`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G2_C293`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1326()
calli default string(class G2_C293`2<class BaseClass1,class BaseClass0>)
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G2_C293`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
calli default string(class G2_C293`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C293::Method7.MI.7470<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C293`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
newobj instance void class G2_C293`2<class BaseClass1,class BaseClass1>::.ctor()
stloc.0
ldloc.0
castclass class G1_C6`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1327<object>()
calli default string(class G2_C293`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C6`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1326()
calli default string(class G2_C293`2<class BaseClass1,class BaseClass1>)
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C6`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G2_C293`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G2_C293`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
calli default string(class G2_C293`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C293::Method7.MI.7470<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass1>::ClassMethod1916<object>()
calli default string(class G2_C293`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C293::ClassMethod1916.7474<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass1>::ClassMethod1915<object>()
calli default string(class G2_C293`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C293::ClassMethod1915.7473<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass1>::ClassMethod1914()
calli default string(class G2_C293`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C293::ClassMethod1914.7472()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass1>::ClassMethod1913()
calli default string(class G2_C293`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C293::ClassMethod1913.7471()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G2_C293`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass1>::ClassMethod1327<object>()
calli default string(class G2_C293`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass1>::ClassMethod1326()
calli default string(class G2_C293`2<class BaseClass1,class BaseClass1>)
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
calli default string(class G2_C293`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C293::Method7.MI.7470<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C293`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
newobj instance void class G1_C6`2<class BaseClass0,class BaseClass0>::.ctor()
stloc.0
ldloc.0
castclass class G1_C6`2<class BaseClass0,class BaseClass0>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass0,class BaseClass0>::ClassMethod1327<object>()
calli default string(class G1_C6`2<class BaseClass0,class BaseClass0>)
ldstr "G1_C6::ClassMethod1327.4810<System.Object>()"
ldstr "class G1_C6`2<class BaseClass0,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C6`2<class BaseClass0,class BaseClass0>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass0,class BaseClass0>::ClassMethod1326()
calli default string(class G1_C6`2<class BaseClass0,class BaseClass0>)
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G1_C6`2<class BaseClass0,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C6`2<class BaseClass0,class BaseClass0>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass0,class BaseClass0>::Method7<object>()
calli default string(class G1_C6`2<class BaseClass0,class BaseClass0>)
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class G1_C6`2<class BaseClass0,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
calli default string(class G1_C6`2<class BaseClass0,class BaseClass0>)
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
calli default string(class G1_C6`2<class BaseClass0,class BaseClass0>)
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
calli default string(class G1_C6`2<class BaseClass0,class BaseClass0>)
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G1_C6`2<class BaseClass0,class BaseClass0>)
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
newobj instance void class G1_C6`2<class BaseClass0,class BaseClass1>::.ctor()
stloc.0
ldloc.0
castclass class G1_C6`2<class BaseClass0,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1327<object>()
calli default string(class G1_C6`2<class BaseClass0,class BaseClass1>)
ldstr "G1_C6::ClassMethod1327.4810<System.Object>()"
ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C6`2<class BaseClass0,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1326()
calli default string(class G1_C6`2<class BaseClass0,class BaseClass1>)
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C6`2<class BaseClass0,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass0,class BaseClass1>::Method7<object>()
calli default string(class G1_C6`2<class BaseClass0,class BaseClass1>)
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
calli default string(class G1_C6`2<class BaseClass0,class BaseClass1>)
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
calli default string(class G1_C6`2<class BaseClass0,class BaseClass1>)
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
calli default string(class G1_C6`2<class BaseClass0,class BaseClass1>)
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G1_C6`2<class BaseClass0,class BaseClass1>)
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
newobj instance void class G1_C6`2<class BaseClass1,class BaseClass0>::.ctor()
stloc.0
ldloc.0
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1327<object>()
calli default string(class G1_C6`2<class BaseClass1,class BaseClass0>)
ldstr "G1_C6::ClassMethod1327.4810<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G1_C6`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1326()
calli default string(class G1_C6`2<class BaseClass1,class BaseClass0>)
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G1_C6`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass0>::Method7<object>()
calli default string(class G1_C6`2<class BaseClass1,class BaseClass0>)
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G1_C6`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G1_C6`2<class BaseClass1,class BaseClass0>)
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
calli default string(class G1_C6`2<class BaseClass1,class BaseClass0>)
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
newobj instance void class G1_C6`2<class BaseClass1,class BaseClass1>::.ctor()
stloc.0
ldloc.0
castclass class G1_C6`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1327<object>()
calli default string(class G1_C6`2<class BaseClass1,class BaseClass1>)
ldstr "G1_C6::ClassMethod1327.4810<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C6`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1326()
calli default string(class G1_C6`2<class BaseClass1,class BaseClass1>)
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C6`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G1_C6`2<class BaseClass1,class BaseClass1>)
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G1_C6`2<class BaseClass1,class BaseClass1>)
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
calli default string(class G1_C6`2<class BaseClass1,class BaseClass1>)
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static int32 Main() cil managed
{
.custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = (
01 00 00 00
)
.entrypoint
.maxstack 10
call void Generated774::MethodCallingTest()
call void Generated774::ConstrainedCallsTest()
call void Generated774::StructConstrainedInterfaceCallsTest()
call void Generated774::CalliTest()
ldc.i4 100
ret
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
.assembly extern mscorlib { .publickeytoken = (B7 7A 5C 56 19 34 E0 89 ) .ver 4:0:0:0 }
.assembly extern TestFramework { .publickeytoken = ( B0 3F 5F 7F 11 D5 0A 3A ) }
//TYPES IN FORWARDER ASSEMBLIES:
//TEST ASSEMBLY:
.assembly Generated774 { .hash algorithm 0x00008004 }
.assembly extern xunit.core {}
.class public BaseClass0
{
.method public hidebysig specialname rtspecialname instance void .ctor() cil managed {
ldarg.0
call instance void [mscorlib]System.Object::.ctor()
ret
}
}
.class public BaseClass1
extends BaseClass0
{
.method public hidebysig specialname rtspecialname instance void .ctor() cil managed {
ldarg.0
call instance void BaseClass0::.ctor()
ret
}
}
.class public G3_C1246`1<T0>
extends class G2_C293`2<class BaseClass1,class BaseClass0>
implements class IBase1`1<!T0>
{
.method public hidebysig virtual instance string Method4() cil managed noinlining {
ldstr "G3_C1246::Method4.15087()"
ret
}
.method public hidebysig newslot virtual instance string 'IBase1<T0>.Method4'() cil managed noinlining {
.override method instance string class IBase1`1<!T0>::Method4()
ldstr "G3_C1246::Method4.MI.15088()"
ret
}
.method public hidebysig virtual instance string Method5() cil managed noinlining {
ldstr "G3_C1246::Method5.15089()"
ret
}
.method public hidebysig newslot virtual instance string 'IBase1<T0>.Method5'() cil managed noinlining {
.override method instance string class IBase1`1<!T0>::Method5()
ldstr "G3_C1246::Method5.MI.15090()"
ret
}
.method public hidebysig newslot virtual instance string Method6<M0>() cil managed noinlining {
ldstr "G3_C1246::Method6.15091<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot virtual instance string 'IBase1<T0>.Method6'<M0>() cil managed noinlining {
.override method instance string class IBase1`1<!T0>::Method6<[1]>()
ldstr "G3_C1246::Method6.MI.15092<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot virtual instance string ClassMethod3870() cil managed noinlining {
ldstr "G3_C1246::ClassMethod3870.15093()"
ret
}
.method public hidebysig newslot virtual instance string ClassMethod3871<M0>() cil managed noinlining {
ldstr "G3_C1246::ClassMethod3871.15094<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot virtual instance string ClassMethod3872<M0>() cil managed noinlining {
ldstr "G3_C1246::ClassMethod3872.15095<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot virtual instance string 'G2_C293<class BaseClass1,class BaseClass0>.ClassMethod1913'() cil managed noinlining {
.override method instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1913()
ldstr "G3_C1246::ClassMethod1913.MI.15096()"
ret
}
.method public hidebysig newslot virtual instance string 'G2_C293<class BaseClass1,class BaseClass0>.ClassMethod1915'<M0>() cil managed noinlining {
.override method instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1915<[1]>()
ldstr "G3_C1246::ClassMethod1915.MI.15097<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig specialname rtspecialname instance void .ctor() cil managed {
ldarg.0
call instance void class G2_C293`2<class BaseClass1,class BaseClass0>::.ctor()
ret
}
}
.class public G2_C293`2<T0, T1>
extends class G1_C6`2<class BaseClass1,!T1>
implements class IBase2`2<class BaseClass0,class BaseClass0>
{
.method public hidebysig virtual instance string Method7<M0>() cil managed noinlining {
ldstr "G2_C293::Method7.7469<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot virtual instance string 'IBase2<class BaseClass0,class BaseClass0>.Method7'<M0>() cil managed noinlining {
.override method instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<[1]>()
ldstr "G2_C293::Method7.MI.7470<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot virtual instance string ClassMethod1913() cil managed noinlining {
ldstr "G2_C293::ClassMethod1913.7471()"
ret
}
.method public hidebysig newslot virtual instance string ClassMethod1914() cil managed noinlining {
ldstr "G2_C293::ClassMethod1914.7472()"
ret
}
.method public hidebysig newslot virtual instance string ClassMethod1915<M0>() cil managed noinlining {
ldstr "G2_C293::ClassMethod1915.7473<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot virtual instance string ClassMethod1916<M0>() cil managed noinlining {
ldstr "G2_C293::ClassMethod1916.7474<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot virtual instance string 'G1_C6<class BaseClass1,T1>.ClassMethod1327'<M0>() cil managed noinlining {
.override method instance string class G1_C6`2<class BaseClass1,!T1>::ClassMethod1327<[1]>()
ldstr "G2_C293::ClassMethod1327.MI.7475<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig specialname rtspecialname instance void .ctor() cil managed {
ldarg.0
call instance void class G1_C6`2<class BaseClass1,!T1>::.ctor()
ret
}
}
.class interface public abstract IBase1`1<+T0>
{
.method public hidebysig newslot abstract virtual instance string Method4() cil managed { }
.method public hidebysig newslot abstract virtual instance string Method5() cil managed { }
.method public hidebysig newslot abstract virtual instance string Method6<M0>() cil managed { }
}
.class public G1_C6`2<T0, T1>
implements class IBase2`2<class BaseClass1,!T0>
{
.method public hidebysig newslot virtual instance string Method7<M0>() cil managed noinlining {
ldstr "G1_C6::Method7.4808<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot virtual instance string ClassMethod1326() cil managed noinlining {
ldstr "G1_C6::ClassMethod1326.4809()"
ret
}
.method public hidebysig newslot virtual instance string ClassMethod1327<M0>() cil managed noinlining {
ldstr "G1_C6::ClassMethod1327.4810<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig specialname rtspecialname instance void .ctor() cil managed {
ldarg.0
call instance void [mscorlib]System.Object::.ctor()
ret
}
}
.class interface public abstract IBase2`2<+T0, -T1>
{
.method public hidebysig newslot abstract virtual instance string Method7<M0>() cil managed { }
}
.class public auto ansi beforefieldinit Generated774 {
.method static void M.BaseClass0<(BaseClass0)W>(!!W inst, string exp) cil managed {
.maxstack 5
.locals init (string[] actualResults)
ldc.i4.s 0
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.BaseClass0<(BaseClass0)W>(!!W inst, string exp)"
ldc.i4.s 0
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.BaseClass1<(BaseClass1)W>(!!W inst, string exp) cil managed {
.maxstack 5
.locals init (string[] actualResults)
ldc.i4.s 0
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.BaseClass1<(BaseClass1)W>(!!W inst, string exp)"
ldc.i4.s 0
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G3_C1246.T<T0,(class G3_C1246`1<!!T0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 18
.locals init (string[] actualResults)
ldc.i4.s 13
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G3_C1246.T<T0,(class G3_C1246`1<!!T0>)W>(!!W 'inst', string exp)"
ldc.i4.s 13
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<!!T0>::ClassMethod1326()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<!!T0>::ClassMethod1327<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<!!T0>::ClassMethod1913()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<!!T0>::ClassMethod1914()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<!!T0>::ClassMethod1915<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<!!T0>::ClassMethod1916<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 6
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<!!T0>::ClassMethod3870()
stelem.ref
ldloc.s actualResults
ldc.i4.s 7
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<!!T0>::ClassMethod3871<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 8
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<!!T0>::ClassMethod3872<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 9
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<!!T0>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 10
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<!!T0>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 11
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<!!T0>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 12
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<!!T0>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G3_C1246.A<(class G3_C1246`1<class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 18
.locals init (string[] actualResults)
ldc.i4.s 13
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G3_C1246.A<(class G3_C1246`1<class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 13
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<class BaseClass0>::ClassMethod1326()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<class BaseClass0>::ClassMethod1327<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<class BaseClass0>::ClassMethod1913()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<class BaseClass0>::ClassMethod1914()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<class BaseClass0>::ClassMethod1915<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<class BaseClass0>::ClassMethod1916<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 6
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<class BaseClass0>::ClassMethod3870()
stelem.ref
ldloc.s actualResults
ldc.i4.s 7
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<class BaseClass0>::ClassMethod3871<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 8
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<class BaseClass0>::ClassMethod3872<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 9
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<class BaseClass0>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 10
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<class BaseClass0>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 11
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<class BaseClass0>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 12
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<class BaseClass0>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G3_C1246.B<(class G3_C1246`1<class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 18
.locals init (string[] actualResults)
ldc.i4.s 13
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G3_C1246.B<(class G3_C1246`1<class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 13
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<class BaseClass1>::ClassMethod1326()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<class BaseClass1>::ClassMethod1327<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<class BaseClass1>::ClassMethod1913()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<class BaseClass1>::ClassMethod1914()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<class BaseClass1>::ClassMethod1915<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<class BaseClass1>::ClassMethod1916<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 6
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<class BaseClass1>::ClassMethod3870()
stelem.ref
ldloc.s actualResults
ldc.i4.s 7
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<class BaseClass1>::ClassMethod3871<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 8
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<class BaseClass1>::ClassMethod3872<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 9
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<class BaseClass1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 10
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<class BaseClass1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 11
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<class BaseClass1>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 12
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1246`1<class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G2_C293.T.T<T0,T1,(class G2_C293`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 12
.locals init (string[] actualResults)
ldc.i4.s 7
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G2_C293.T.T<T0,T1,(class G2_C293`2<!!T0,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 7
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<!!T0,!!T1>::ClassMethod1326()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<!!T0,!!T1>::ClassMethod1327<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<!!T0,!!T1>::ClassMethod1913()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<!!T0,!!T1>::ClassMethod1914()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<!!T0,!!T1>::ClassMethod1915<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<!!T0,!!T1>::ClassMethod1916<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 6
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<!!T0,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G2_C293.A.T<T1,(class G2_C293`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 12
.locals init (string[] actualResults)
ldc.i4.s 7
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G2_C293.A.T<T1,(class G2_C293`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 7
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass0,!!T1>::ClassMethod1326()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass0,!!T1>::ClassMethod1327<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass0,!!T1>::ClassMethod1913()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass0,!!T1>::ClassMethod1914()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass0,!!T1>::ClassMethod1915<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass0,!!T1>::ClassMethod1916<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 6
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass0,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G2_C293.A.A<(class G2_C293`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 12
.locals init (string[] actualResults)
ldc.i4.s 7
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G2_C293.A.A<(class G2_C293`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 7
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass0>::ClassMethod1326()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass0>::ClassMethod1327<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass0>::ClassMethod1913()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass0>::ClassMethod1914()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass0>::ClassMethod1915<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass0>::ClassMethod1916<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 6
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass0>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G2_C293.A.B<(class G2_C293`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 12
.locals init (string[] actualResults)
ldc.i4.s 7
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G2_C293.A.B<(class G2_C293`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 7
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass1>::ClassMethod1326()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass1>::ClassMethod1327<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass1>::ClassMethod1913()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass1>::ClassMethod1914()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass1>::ClassMethod1915<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass1>::ClassMethod1916<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 6
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G2_C293.B.T<T1,(class G2_C293`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 12
.locals init (string[] actualResults)
ldc.i4.s 7
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G2_C293.B.T<T1,(class G2_C293`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 7
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass1,!!T1>::ClassMethod1326()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass1,!!T1>::ClassMethod1327<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass1,!!T1>::ClassMethod1913()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass1,!!T1>::ClassMethod1914()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass1,!!T1>::ClassMethod1915<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass1,!!T1>::ClassMethod1916<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 6
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass1,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G2_C293.B.A<(class G2_C293`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 12
.locals init (string[] actualResults)
ldc.i4.s 7
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G2_C293.B.A<(class G2_C293`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 7
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1326()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1327<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1913()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1914()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1915<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1916<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 6
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G2_C293.B.B<(class G2_C293`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 12
.locals init (string[] actualResults)
ldc.i4.s 7
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G2_C293.B.B<(class G2_C293`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 7
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass1>::ClassMethod1326()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass1>::ClassMethod1327<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass1>::ClassMethod1913()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass1>::ClassMethod1914()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass1>::ClassMethod1915<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass1>::ClassMethod1916<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 6
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase1.T<T0,(class IBase1`1<!!T0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 8
.locals init (string[] actualResults)
ldc.i4.s 3
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase1.T<T0,(class IBase1`1<!!T0>)W>(!!W 'inst', string exp)"
ldc.i4.s 3
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<!!T0>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<!!T0>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<!!T0>::Method6<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase1.A<(class IBase1`1<class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 8
.locals init (string[] actualResults)
ldc.i4.s 3
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase1.A<(class IBase1`1<class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 3
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass0>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass0>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase1.B<(class IBase1`1<class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 8
.locals init (string[] actualResults)
ldc.i4.s 3
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase1.B<(class IBase1`1<class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 3
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G1_C6.T.T<T0,T1,(class G1_C6`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 8
.locals init (string[] actualResults)
ldc.i4.s 3
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G1_C6.T.T<T0,T1,(class G1_C6`2<!!T0,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 3
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C6`2<!!T0,!!T1>::ClassMethod1326()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C6`2<!!T0,!!T1>::ClassMethod1327<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C6`2<!!T0,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G1_C6.A.T<T1,(class G1_C6`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 8
.locals init (string[] actualResults)
ldc.i4.s 3
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G1_C6.A.T<T1,(class G1_C6`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 3
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C6`2<class BaseClass0,!!T1>::ClassMethod1326()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C6`2<class BaseClass0,!!T1>::ClassMethod1327<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C6`2<class BaseClass0,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G1_C6.A.A<(class G1_C6`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 8
.locals init (string[] actualResults)
ldc.i4.s 3
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G1_C6.A.A<(class G1_C6`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 3
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass0>::ClassMethod1326()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass0>::ClassMethod1327<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass0>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G1_C6.A.B<(class G1_C6`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 8
.locals init (string[] actualResults)
ldc.i4.s 3
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G1_C6.A.B<(class G1_C6`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 3
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1326()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1327<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G1_C6.B.T<T1,(class G1_C6`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 8
.locals init (string[] actualResults)
ldc.i4.s 3
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G1_C6.B.T<T1,(class G1_C6`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 3
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C6`2<class BaseClass1,!!T1>::ClassMethod1326()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C6`2<class BaseClass1,!!T1>::ClassMethod1327<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C6`2<class BaseClass1,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G1_C6.B.A<(class G1_C6`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 8
.locals init (string[] actualResults)
ldc.i4.s 3
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G1_C6.B.A<(class G1_C6`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 3
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1326()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1327<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass0>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G1_C6.B.B<(class G1_C6`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 8
.locals init (string[] actualResults)
ldc.i4.s 3
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G1_C6.B.B<(class G1_C6`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 3
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1326()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1327<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.T.T<T0,T1,(class IBase2`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.T.T<T0,T1,(class IBase2`2<!!T0,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<!!T0,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.A.T<T1,(class IBase2`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.A.T<T1,(class IBase2`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass0,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.A.A<(class IBase2`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.A.A<(class IBase2`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.A.B<(class IBase2`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.A.B<(class IBase2`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.B.T<T1,(class IBase2`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.B.T<T1,(class IBase2`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass1,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.B.A<(class IBase2`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.B.A<(class IBase2`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.B.B<(class IBase2`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.B.B<(class IBase2`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method public hidebysig static void MethodCallingTest() cil managed
{
.maxstack 10
.locals init (object V_0)
ldstr "========================== Method Calling Test =========================="
call void [mscorlib]System.Console::WriteLine(string)
newobj instance void class G3_C1246`1<class BaseClass0>::.ctor()
stloc.0
ldloc.0
dup
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1327<object>()
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1326()
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass0>::Method7<object>()
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
ldstr "G2_C293::Method7.MI.7470<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1916<object>()
ldstr "G2_C293::ClassMethod1916.7474<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1915<object>()
ldstr "G3_C1246::ClassMethod1915.MI.15097<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1914()
ldstr "G2_C293::ClassMethod1914.7472()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1913()
ldstr "G3_C1246::ClassMethod1913.MI.15096()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::Method7<object>()
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1327<object>()
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1326()
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
ldstr "G2_C293::Method7.MI.7470<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
castclass class G3_C1246`1<class BaseClass0>
callvirt instance string class G3_C1246`1<class BaseClass0>::ClassMethod3872<object>()
ldstr "G3_C1246::ClassMethod3872.15095<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1246`1<class BaseClass0>
callvirt instance string class G3_C1246`1<class BaseClass0>::ClassMethod3871<object>()
ldstr "G3_C1246::ClassMethod3871.15094<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1246`1<class BaseClass0>
callvirt instance string class G3_C1246`1<class BaseClass0>::ClassMethod3870()
ldstr "G3_C1246::ClassMethod3870.15093()"
ldstr "class G3_C1246`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1246`1<class BaseClass0>
callvirt instance string class G3_C1246`1<class BaseClass0>::Method6<object>()
ldstr "G3_C1246::Method6.15091<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1246`1<class BaseClass0>
callvirt instance string class G3_C1246`1<class BaseClass0>::Method5()
ldstr "G3_C1246::Method5.15089()"
ldstr "class G3_C1246`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1246`1<class BaseClass0>
callvirt instance string class G3_C1246`1<class BaseClass0>::Method4()
ldstr "G3_C1246::Method4.15087()"
ldstr "class G3_C1246`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1246`1<class BaseClass0>
callvirt instance string class G3_C1246`1<class BaseClass0>::ClassMethod1916<object>()
ldstr "G2_C293::ClassMethod1916.7474<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1246`1<class BaseClass0>
callvirt instance string class G3_C1246`1<class BaseClass0>::ClassMethod1915<object>()
ldstr "G3_C1246::ClassMethod1915.MI.15097<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1246`1<class BaseClass0>
callvirt instance string class G3_C1246`1<class BaseClass0>::ClassMethod1914()
ldstr "G2_C293::ClassMethod1914.7472()"
ldstr "class G3_C1246`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1246`1<class BaseClass0>
callvirt instance string class G3_C1246`1<class BaseClass0>::ClassMethod1913()
ldstr "G3_C1246::ClassMethod1913.MI.15096()"
ldstr "class G3_C1246`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1246`1<class BaseClass0>
callvirt instance string class G3_C1246`1<class BaseClass0>::Method7<object>()
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1246`1<class BaseClass0>
callvirt instance string class G3_C1246`1<class BaseClass0>::ClassMethod1327<object>()
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1246`1<class BaseClass0>
callvirt instance string class G3_C1246`1<class BaseClass0>::ClassMethod1326()
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G3_C1246`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method4()
ldstr "G3_C1246::Method4.MI.15088()"
ldstr "class IBase1`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method5()
ldstr "G3_C1246::Method5.MI.15090()"
ldstr "class IBase1`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>()
ldstr "G3_C1246::Method6.MI.15092<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
newobj instance void class G3_C1246`1<class BaseClass1>::.ctor()
stloc.0
ldloc.0
dup
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1327<object>()
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1326()
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass0>::Method7<object>()
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
ldstr "G2_C293::Method7.MI.7470<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1916<object>()
ldstr "G2_C293::ClassMethod1916.7474<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1915<object>()
ldstr "G3_C1246::ClassMethod1915.MI.15097<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1914()
ldstr "G2_C293::ClassMethod1914.7472()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1913()
ldstr "G3_C1246::ClassMethod1913.MI.15096()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::Method7<object>()
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1327<object>()
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1326()
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
ldstr "G2_C293::Method7.MI.7470<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
castclass class G3_C1246`1<class BaseClass1>
callvirt instance string class G3_C1246`1<class BaseClass1>::ClassMethod3872<object>()
ldstr "G3_C1246::ClassMethod3872.15095<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1246`1<class BaseClass1>
callvirt instance string class G3_C1246`1<class BaseClass1>::ClassMethod3871<object>()
ldstr "G3_C1246::ClassMethod3871.15094<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1246`1<class BaseClass1>
callvirt instance string class G3_C1246`1<class BaseClass1>::ClassMethod3870()
ldstr "G3_C1246::ClassMethod3870.15093()"
ldstr "class G3_C1246`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1246`1<class BaseClass1>
callvirt instance string class G3_C1246`1<class BaseClass1>::Method6<object>()
ldstr "G3_C1246::Method6.15091<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1246`1<class BaseClass1>
callvirt instance string class G3_C1246`1<class BaseClass1>::Method5()
ldstr "G3_C1246::Method5.15089()"
ldstr "class G3_C1246`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1246`1<class BaseClass1>
callvirt instance string class G3_C1246`1<class BaseClass1>::Method4()
ldstr "G3_C1246::Method4.15087()"
ldstr "class G3_C1246`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1246`1<class BaseClass1>
callvirt instance string class G3_C1246`1<class BaseClass1>::ClassMethod1916<object>()
ldstr "G2_C293::ClassMethod1916.7474<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1246`1<class BaseClass1>
callvirt instance string class G3_C1246`1<class BaseClass1>::ClassMethod1915<object>()
ldstr "G3_C1246::ClassMethod1915.MI.15097<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1246`1<class BaseClass1>
callvirt instance string class G3_C1246`1<class BaseClass1>::ClassMethod1914()
ldstr "G2_C293::ClassMethod1914.7472()"
ldstr "class G3_C1246`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1246`1<class BaseClass1>
callvirt instance string class G3_C1246`1<class BaseClass1>::ClassMethod1913()
ldstr "G3_C1246::ClassMethod1913.MI.15096()"
ldstr "class G3_C1246`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1246`1<class BaseClass1>
callvirt instance string class G3_C1246`1<class BaseClass1>::Method7<object>()
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1246`1<class BaseClass1>
callvirt instance string class G3_C1246`1<class BaseClass1>::ClassMethod1327<object>()
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1246`1<class BaseClass1>
callvirt instance string class G3_C1246`1<class BaseClass1>::ClassMethod1326()
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G3_C1246`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
ldstr "G3_C1246::Method4.MI.15088()"
ldstr "class IBase1`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
ldstr "G3_C1246::Method5.MI.15090()"
ldstr "class IBase1`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
ldstr "G3_C1246::Method6.MI.15092<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method4()
ldstr "G3_C1246::Method4.MI.15088()"
ldstr "class IBase1`1<class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method5()
ldstr "G3_C1246::Method5.MI.15090()"
ldstr "class IBase1`1<class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>()
ldstr "G3_C1246::Method6.MI.15092<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
newobj instance void class G2_C293`2<class BaseClass0,class BaseClass0>::.ctor()
stloc.0
ldloc.0
dup
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1327<object>()
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G2_C293`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1326()
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G2_C293`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass0>::Method7<object>()
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G2_C293`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
ldstr "G2_C293::Method7.MI.7470<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
castclass class G2_C293`2<class BaseClass0,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass0>::ClassMethod1916<object>()
ldstr "G2_C293::ClassMethod1916.7474<System.Object>()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass0> on type class G2_C293`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass0,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass0>::ClassMethod1915<object>()
ldstr "G2_C293::ClassMethod1915.7473<System.Object>()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass0> on type class G2_C293`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass0,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass0>::ClassMethod1914()
ldstr "G2_C293::ClassMethod1914.7472()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass0> on type class G2_C293`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass0,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass0>::ClassMethod1913()
ldstr "G2_C293::ClassMethod1913.7471()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass0> on type class G2_C293`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass0,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass0>::Method7<object>()
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass0> on type class G2_C293`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass0,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass0>::ClassMethod1327<object>()
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass0> on type class G2_C293`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass0,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass0>::ClassMethod1326()
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass0> on type class G2_C293`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
ldstr "G2_C293::Method7.MI.7470<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C293`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
newobj instance void class G2_C293`2<class BaseClass0,class BaseClass1>::.ctor()
stloc.0
ldloc.0
dup
castclass class G1_C6`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1327<object>()
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C6`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1326()
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C6`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
ldstr "G2_C293::Method7.MI.7470<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
castclass class G2_C293`2<class BaseClass0,class BaseClass1>
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass1>::ClassMethod1916<object>()
ldstr "G2_C293::ClassMethod1916.7474<System.Object>()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass0,class BaseClass1>
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass1>::ClassMethod1915<object>()
ldstr "G2_C293::ClassMethod1915.7473<System.Object>()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass0,class BaseClass1>
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass1>::ClassMethod1914()
ldstr "G2_C293::ClassMethod1914.7472()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass0,class BaseClass1>
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass1>::ClassMethod1913()
ldstr "G2_C293::ClassMethod1913.7471()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass0,class BaseClass1>
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass1>::Method7<object>()
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass0,class BaseClass1>
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass1>::ClassMethod1327<object>()
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass0,class BaseClass1>
callvirt instance string class G2_C293`2<class BaseClass0,class BaseClass1>::ClassMethod1326()
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
ldstr "G2_C293::Method7.MI.7470<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C293`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
newobj instance void class G2_C293`2<class BaseClass1,class BaseClass0>::.ctor()
stloc.0
ldloc.0
dup
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1327<object>()
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G2_C293`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1326()
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G2_C293`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass0>::Method7<object>()
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G2_C293`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
ldstr "G2_C293::Method7.MI.7470<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1916<object>()
ldstr "G2_C293::ClassMethod1916.7474<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G2_C293`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1915<object>()
ldstr "G2_C293::ClassMethod1915.7473<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G2_C293`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1914()
ldstr "G2_C293::ClassMethod1914.7472()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G2_C293`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1913()
ldstr "G2_C293::ClassMethod1913.7471()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G2_C293`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::Method7<object>()
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G2_C293`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1327<object>()
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G2_C293`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1326()
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G2_C293`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
ldstr "G2_C293::Method7.MI.7470<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C293`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
newobj instance void class G2_C293`2<class BaseClass1,class BaseClass1>::.ctor()
stloc.0
ldloc.0
dup
castclass class G1_C6`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1327<object>()
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C6`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1326()
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C6`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
ldstr "G2_C293::Method7.MI.7470<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass1>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass1>::ClassMethod1916<object>()
ldstr "G2_C293::ClassMethod1916.7474<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass1>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass1>::ClassMethod1915<object>()
ldstr "G2_C293::ClassMethod1915.7473<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass1>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass1>::ClassMethod1914()
ldstr "G2_C293::ClassMethod1914.7472()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass1>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass1>::ClassMethod1913()
ldstr "G2_C293::ClassMethod1913.7471()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass1>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass1>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass1>::ClassMethod1327<object>()
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C293`2<class BaseClass1,class BaseClass1>
callvirt instance string class G2_C293`2<class BaseClass1,class BaseClass1>::ClassMethod1326()
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
ldstr "G2_C293::Method7.MI.7470<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C293`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
newobj instance void class G1_C6`2<class BaseClass0,class BaseClass0>::.ctor()
stloc.0
ldloc.0
dup
castclass class G1_C6`2<class BaseClass0,class BaseClass0>
callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass0>::ClassMethod1327<object>()
ldstr "G1_C6::ClassMethod1327.4810<System.Object>()"
ldstr "class G1_C6`2<class BaseClass0,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C6`2<class BaseClass0,class BaseClass0>
callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass0>::ClassMethod1326()
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G1_C6`2<class BaseClass0,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C6`2<class BaseClass0,class BaseClass0>
callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass0>::Method7<object>()
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class G1_C6`2<class BaseClass0,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
newobj instance void class G1_C6`2<class BaseClass0,class BaseClass1>::.ctor()
stloc.0
ldloc.0
dup
castclass class G1_C6`2<class BaseClass0,class BaseClass1>
callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1327<object>()
ldstr "G1_C6::ClassMethod1327.4810<System.Object>()"
ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C6`2<class BaseClass0,class BaseClass1>
callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1326()
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C6`2<class BaseClass0,class BaseClass1>
callvirt instance string class G1_C6`2<class BaseClass0,class BaseClass1>::Method7<object>()
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
newobj instance void class G1_C6`2<class BaseClass1,class BaseClass0>::.ctor()
stloc.0
ldloc.0
dup
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1327<object>()
ldstr "G1_C6::ClassMethod1327.4810<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G1_C6`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1326()
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G1_C6`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass0>::Method7<object>()
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G1_C6`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
newobj instance void class G1_C6`2<class BaseClass1,class BaseClass1>::.ctor()
stloc.0
ldloc.0
dup
castclass class G1_C6`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1327<object>()
ldstr "G1_C6::ClassMethod1327.4810<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C6`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1326()
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C6`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C6`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static void ConstrainedCallsTest() cil managed
{
.maxstack 10
.locals init (object V_0)
ldstr "========================== Constrained Calls Test =========================="
call void [mscorlib]System.Console::WriteLine(string)
newobj instance void class G3_C1246`1<class BaseClass0>::.ctor()
stloc.0
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G1_C6.T.T<class BaseClass1,class BaseClass0,class G3_C1246`1<class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G1_C6.B.T<class BaseClass0,class G3_C1246`1<class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G1_C6.B.A<class G3_C1246`1<class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G3_C1246`1<class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.IBase2.B.T<class BaseClass1,class G3_C1246`1<class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.IBase2.B.B<class G3_C1246`1<class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G3_C1246`1<class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.A.T<class BaseClass1,class G3_C1246`1<class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.A.B<class G3_C1246`1<class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G3_C1246::ClassMethod1913.MI.15096()#G2_C293::ClassMethod1914.7472()#G3_C1246::ClassMethod1915.MI.15097<System.Object>()#G2_C293::ClassMethod1916.7474<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G2_C293.T.T<class BaseClass1,class BaseClass0,class G3_C1246`1<class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G3_C1246::ClassMethod1913.MI.15096()#G2_C293::ClassMethod1914.7472()#G3_C1246::ClassMethod1915.MI.15097<System.Object>()#G2_C293::ClassMethod1916.7474<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G2_C293.B.T<class BaseClass0,class G3_C1246`1<class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G3_C1246::ClassMethod1913.MI.15096()#G2_C293::ClassMethod1914.7472()#G3_C1246::ClassMethod1915.MI.15097<System.Object>()#G2_C293::ClassMethod1916.7474<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G2_C293.B.A<class G3_C1246`1<class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G3_C1246`1<class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.A.T<class BaseClass0,class G3_C1246`1<class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.A.A<class G3_C1246`1<class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G3_C1246::ClassMethod1913.MI.15096()#G2_C293::ClassMethod1914.7472()#G3_C1246::ClassMethod1915.MI.15097<System.Object>()#G2_C293::ClassMethod1916.7474<System.Object>()#G3_C1246::ClassMethod3870.15093()#G3_C1246::ClassMethod3871.15094<System.Object>()#G3_C1246::ClassMethod3872.15095<System.Object>()#G3_C1246::Method4.15087()#G3_C1246::Method5.15089()#G3_C1246::Method6.15091<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G3_C1246.T<class BaseClass0,class G3_C1246`1<class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G3_C1246::ClassMethod1913.MI.15096()#G2_C293::ClassMethod1914.7472()#G3_C1246::ClassMethod1915.MI.15097<System.Object>()#G2_C293::ClassMethod1916.7474<System.Object>()#G3_C1246::ClassMethod3870.15093()#G3_C1246::ClassMethod3871.15094<System.Object>()#G3_C1246::ClassMethod3872.15095<System.Object>()#G3_C1246::Method4.15087()#G3_C1246::Method5.15089()#G3_C1246::Method6.15091<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G3_C1246.A<class G3_C1246`1<class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G3_C1246::Method4.MI.15088()#G3_C1246::Method5.MI.15090()#G3_C1246::Method6.MI.15092<System.Object>()#"
call void Generated774::M.IBase1.T<class BaseClass0,class G3_C1246`1<class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G3_C1246::Method4.MI.15088()#G3_C1246::Method5.MI.15090()#G3_C1246::Method6.MI.15092<System.Object>()#"
call void Generated774::M.IBase1.A<class G3_C1246`1<class BaseClass0>>(!!0,string)
newobj instance void class G3_C1246`1<class BaseClass1>::.ctor()
stloc.0
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G1_C6.T.T<class BaseClass1,class BaseClass0,class G3_C1246`1<class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G1_C6.B.T<class BaseClass0,class G3_C1246`1<class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G1_C6.B.A<class G3_C1246`1<class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G3_C1246`1<class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.IBase2.B.T<class BaseClass1,class G3_C1246`1<class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.IBase2.B.B<class G3_C1246`1<class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G3_C1246`1<class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.A.T<class BaseClass1,class G3_C1246`1<class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.A.B<class G3_C1246`1<class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G3_C1246::ClassMethod1913.MI.15096()#G2_C293::ClassMethod1914.7472()#G3_C1246::ClassMethod1915.MI.15097<System.Object>()#G2_C293::ClassMethod1916.7474<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G2_C293.T.T<class BaseClass1,class BaseClass0,class G3_C1246`1<class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G3_C1246::ClassMethod1913.MI.15096()#G2_C293::ClassMethod1914.7472()#G3_C1246::ClassMethod1915.MI.15097<System.Object>()#G2_C293::ClassMethod1916.7474<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G2_C293.B.T<class BaseClass0,class G3_C1246`1<class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G3_C1246::ClassMethod1913.MI.15096()#G2_C293::ClassMethod1914.7472()#G3_C1246::ClassMethod1915.MI.15097<System.Object>()#G2_C293::ClassMethod1916.7474<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G2_C293.B.A<class G3_C1246`1<class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G3_C1246`1<class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.A.T<class BaseClass0,class G3_C1246`1<class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.A.A<class G3_C1246`1<class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G3_C1246::ClassMethod1913.MI.15096()#G2_C293::ClassMethod1914.7472()#G3_C1246::ClassMethod1915.MI.15097<System.Object>()#G2_C293::ClassMethod1916.7474<System.Object>()#G3_C1246::ClassMethod3870.15093()#G3_C1246::ClassMethod3871.15094<System.Object>()#G3_C1246::ClassMethod3872.15095<System.Object>()#G3_C1246::Method4.15087()#G3_C1246::Method5.15089()#G3_C1246::Method6.15091<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G3_C1246.T<class BaseClass1,class G3_C1246`1<class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G3_C1246::ClassMethod1913.MI.15096()#G2_C293::ClassMethod1914.7472()#G3_C1246::ClassMethod1915.MI.15097<System.Object>()#G2_C293::ClassMethod1916.7474<System.Object>()#G3_C1246::ClassMethod3870.15093()#G3_C1246::ClassMethod3871.15094<System.Object>()#G3_C1246::ClassMethod3872.15095<System.Object>()#G3_C1246::Method4.15087()#G3_C1246::Method5.15089()#G3_C1246::Method6.15091<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G3_C1246.B<class G3_C1246`1<class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G3_C1246::Method4.MI.15088()#G3_C1246::Method5.MI.15090()#G3_C1246::Method6.MI.15092<System.Object>()#"
call void Generated774::M.IBase1.T<class BaseClass1,class G3_C1246`1<class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G3_C1246::Method4.MI.15088()#G3_C1246::Method5.MI.15090()#G3_C1246::Method6.MI.15092<System.Object>()#"
call void Generated774::M.IBase1.B<class G3_C1246`1<class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G3_C1246::Method4.MI.15088()#G3_C1246::Method5.MI.15090()#G3_C1246::Method6.MI.15092<System.Object>()#"
call void Generated774::M.IBase1.T<class BaseClass0,class G3_C1246`1<class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G3_C1246::Method4.MI.15088()#G3_C1246::Method5.MI.15090()#G3_C1246::Method6.MI.15092<System.Object>()#"
call void Generated774::M.IBase1.A<class G3_C1246`1<class BaseClass1>>(!!0,string)
newobj instance void class G2_C293`2<class BaseClass0,class BaseClass0>::.ctor()
stloc.0
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G1_C6.T.T<class BaseClass1,class BaseClass0,class G2_C293`2<class BaseClass0,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G1_C6.B.T<class BaseClass0,class G2_C293`2<class BaseClass0,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G1_C6.B.A<class G2_C293`2<class BaseClass0,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G2_C293`2<class BaseClass0,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.IBase2.B.T<class BaseClass1,class G2_C293`2<class BaseClass0,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.IBase2.B.B<class G2_C293`2<class BaseClass0,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C293`2<class BaseClass0,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.A.T<class BaseClass1,class G2_C293`2<class BaseClass0,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.A.B<class G2_C293`2<class BaseClass0,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::ClassMethod1913.7471()#G2_C293::ClassMethod1914.7472()#G2_C293::ClassMethod1915.7473<System.Object>()#G2_C293::ClassMethod1916.7474<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G2_C293.T.T<class BaseClass0,class BaseClass0,class G2_C293`2<class BaseClass0,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::ClassMethod1913.7471()#G2_C293::ClassMethod1914.7472()#G2_C293::ClassMethod1915.7473<System.Object>()#G2_C293::ClassMethod1916.7474<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G2_C293.A.T<class BaseClass0,class G2_C293`2<class BaseClass0,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::ClassMethod1913.7471()#G2_C293::ClassMethod1914.7472()#G2_C293::ClassMethod1915.7473<System.Object>()#G2_C293::ClassMethod1916.7474<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G2_C293.A.A<class G2_C293`2<class BaseClass0,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G2_C293`2<class BaseClass0,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.A.T<class BaseClass0,class G2_C293`2<class BaseClass0,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.A.A<class G2_C293`2<class BaseClass0,class BaseClass0>>(!!0,string)
newobj instance void class G2_C293`2<class BaseClass0,class BaseClass1>::.ctor()
stloc.0
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G1_C6.T.T<class BaseClass1,class BaseClass1,class G2_C293`2<class BaseClass0,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G1_C6.B.T<class BaseClass1,class G2_C293`2<class BaseClass0,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G1_C6.B.B<class G2_C293`2<class BaseClass0,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G2_C293`2<class BaseClass0,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.IBase2.B.T<class BaseClass1,class G2_C293`2<class BaseClass0,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.IBase2.B.B<class G2_C293`2<class BaseClass0,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C293`2<class BaseClass0,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.A.T<class BaseClass1,class G2_C293`2<class BaseClass0,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.A.B<class G2_C293`2<class BaseClass0,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::ClassMethod1913.7471()#G2_C293::ClassMethod1914.7472()#G2_C293::ClassMethod1915.7473<System.Object>()#G2_C293::ClassMethod1916.7474<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G2_C293.T.T<class BaseClass0,class BaseClass1,class G2_C293`2<class BaseClass0,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::ClassMethod1913.7471()#G2_C293::ClassMethod1914.7472()#G2_C293::ClassMethod1915.7473<System.Object>()#G2_C293::ClassMethod1916.7474<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G2_C293.A.T<class BaseClass1,class G2_C293`2<class BaseClass0,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::ClassMethod1913.7471()#G2_C293::ClassMethod1914.7472()#G2_C293::ClassMethod1915.7473<System.Object>()#G2_C293::ClassMethod1916.7474<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G2_C293.A.B<class G2_C293`2<class BaseClass0,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G2_C293`2<class BaseClass0,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.A.T<class BaseClass0,class G2_C293`2<class BaseClass0,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.A.A<class G2_C293`2<class BaseClass0,class BaseClass1>>(!!0,string)
newobj instance void class G2_C293`2<class BaseClass1,class BaseClass0>::.ctor()
stloc.0
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G1_C6.T.T<class BaseClass1,class BaseClass0,class G2_C293`2<class BaseClass1,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G1_C6.B.T<class BaseClass0,class G2_C293`2<class BaseClass1,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G1_C6.B.A<class G2_C293`2<class BaseClass1,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G2_C293`2<class BaseClass1,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.IBase2.B.T<class BaseClass1,class G2_C293`2<class BaseClass1,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.IBase2.B.B<class G2_C293`2<class BaseClass1,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C293`2<class BaseClass1,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.A.T<class BaseClass1,class G2_C293`2<class BaseClass1,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.A.B<class G2_C293`2<class BaseClass1,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::ClassMethod1913.7471()#G2_C293::ClassMethod1914.7472()#G2_C293::ClassMethod1915.7473<System.Object>()#G2_C293::ClassMethod1916.7474<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G2_C293.T.T<class BaseClass1,class BaseClass0,class G2_C293`2<class BaseClass1,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::ClassMethod1913.7471()#G2_C293::ClassMethod1914.7472()#G2_C293::ClassMethod1915.7473<System.Object>()#G2_C293::ClassMethod1916.7474<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G2_C293.B.T<class BaseClass0,class G2_C293`2<class BaseClass1,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::ClassMethod1913.7471()#G2_C293::ClassMethod1914.7472()#G2_C293::ClassMethod1915.7473<System.Object>()#G2_C293::ClassMethod1916.7474<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G2_C293.B.A<class G2_C293`2<class BaseClass1,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G2_C293`2<class BaseClass1,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.A.T<class BaseClass0,class G2_C293`2<class BaseClass1,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.A.A<class G2_C293`2<class BaseClass1,class BaseClass0>>(!!0,string)
newobj instance void class G2_C293`2<class BaseClass1,class BaseClass1>::.ctor()
stloc.0
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G1_C6.T.T<class BaseClass1,class BaseClass1,class G2_C293`2<class BaseClass1,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G1_C6.B.T<class BaseClass1,class G2_C293`2<class BaseClass1,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G1_C6.B.B<class G2_C293`2<class BaseClass1,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G2_C293`2<class BaseClass1,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.IBase2.B.T<class BaseClass1,class G2_C293`2<class BaseClass1,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.IBase2.B.B<class G2_C293`2<class BaseClass1,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C293`2<class BaseClass1,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.A.T<class BaseClass1,class G2_C293`2<class BaseClass1,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.A.B<class G2_C293`2<class BaseClass1,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::ClassMethod1913.7471()#G2_C293::ClassMethod1914.7472()#G2_C293::ClassMethod1915.7473<System.Object>()#G2_C293::ClassMethod1916.7474<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G2_C293.T.T<class BaseClass1,class BaseClass1,class G2_C293`2<class BaseClass1,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::ClassMethod1913.7471()#G2_C293::ClassMethod1914.7472()#G2_C293::ClassMethod1915.7473<System.Object>()#G2_C293::ClassMethod1916.7474<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G2_C293.B.T<class BaseClass1,class G2_C293`2<class BaseClass1,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G2_C293::ClassMethod1327.MI.7475<System.Object>()#G2_C293::ClassMethod1913.7471()#G2_C293::ClassMethod1914.7472()#G2_C293::ClassMethod1915.7473<System.Object>()#G2_C293::ClassMethod1916.7474<System.Object>()#G2_C293::Method7.7469<System.Object>()#"
call void Generated774::M.G2_C293.B.B<class G2_C293`2<class BaseClass1,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G2_C293`2<class BaseClass1,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.A.T<class BaseClass0,class G2_C293`2<class BaseClass1,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C293::Method7.MI.7470<System.Object>()#"
call void Generated774::M.IBase2.A.A<class G2_C293`2<class BaseClass1,class BaseClass1>>(!!0,string)
newobj instance void class G1_C6`2<class BaseClass0,class BaseClass0>::.ctor()
stloc.0
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G1_C6::ClassMethod1327.4810<System.Object>()#G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.G1_C6.T.T<class BaseClass0,class BaseClass0,class G1_C6`2<class BaseClass0,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G1_C6::ClassMethod1327.4810<System.Object>()#G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.G1_C6.A.T<class BaseClass0,class G1_C6`2<class BaseClass0,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G1_C6::ClassMethod1327.4810<System.Object>()#G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.G1_C6.A.A<class G1_C6`2<class BaseClass0,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass1,class BaseClass0,class G1_C6`2<class BaseClass0,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.B.T<class BaseClass0,class G1_C6`2<class BaseClass0,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.B.A<class G1_C6`2<class BaseClass0,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G1_C6`2<class BaseClass0,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.A.T<class BaseClass0,class G1_C6`2<class BaseClass0,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.A.A<class G1_C6`2<class BaseClass0,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G1_C6`2<class BaseClass0,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.A.T<class BaseClass1,class G1_C6`2<class BaseClass0,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.A.B<class G1_C6`2<class BaseClass0,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G1_C6`2<class BaseClass0,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.B.T<class BaseClass1,class G1_C6`2<class BaseClass0,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.B.B<class G1_C6`2<class BaseClass0,class BaseClass0>>(!!0,string)
newobj instance void class G1_C6`2<class BaseClass0,class BaseClass1>::.ctor()
stloc.0
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G1_C6::ClassMethod1327.4810<System.Object>()#G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.G1_C6.T.T<class BaseClass0,class BaseClass1,class G1_C6`2<class BaseClass0,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G1_C6::ClassMethod1327.4810<System.Object>()#G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.G1_C6.A.T<class BaseClass1,class G1_C6`2<class BaseClass0,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G1_C6::ClassMethod1327.4810<System.Object>()#G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.G1_C6.A.B<class G1_C6`2<class BaseClass0,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass1,class BaseClass0,class G1_C6`2<class BaseClass0,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.B.T<class BaseClass0,class G1_C6`2<class BaseClass0,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.B.A<class G1_C6`2<class BaseClass0,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G1_C6`2<class BaseClass0,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.A.T<class BaseClass0,class G1_C6`2<class BaseClass0,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.A.A<class G1_C6`2<class BaseClass0,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G1_C6`2<class BaseClass0,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.A.T<class BaseClass1,class G1_C6`2<class BaseClass0,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.A.B<class G1_C6`2<class BaseClass0,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G1_C6`2<class BaseClass0,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.B.T<class BaseClass1,class G1_C6`2<class BaseClass0,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.B.B<class G1_C6`2<class BaseClass0,class BaseClass1>>(!!0,string)
newobj instance void class G1_C6`2<class BaseClass1,class BaseClass0>::.ctor()
stloc.0
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G1_C6::ClassMethod1327.4810<System.Object>()#G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.G1_C6.T.T<class BaseClass1,class BaseClass0,class G1_C6`2<class BaseClass1,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G1_C6::ClassMethod1327.4810<System.Object>()#G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.G1_C6.B.T<class BaseClass0,class G1_C6`2<class BaseClass1,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G1_C6::ClassMethod1327.4810<System.Object>()#G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.G1_C6.B.A<class G1_C6`2<class BaseClass1,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G1_C6`2<class BaseClass1,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.B.T<class BaseClass1,class G1_C6`2<class BaseClass1,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.B.B<class G1_C6`2<class BaseClass1,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G1_C6`2<class BaseClass1,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.A.T<class BaseClass1,class G1_C6`2<class BaseClass1,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.A.B<class G1_C6`2<class BaseClass1,class BaseClass0>>(!!0,string)
newobj instance void class G1_C6`2<class BaseClass1,class BaseClass1>::.ctor()
stloc.0
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G1_C6::ClassMethod1327.4810<System.Object>()#G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.G1_C6.T.T<class BaseClass1,class BaseClass1,class G1_C6`2<class BaseClass1,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G1_C6::ClassMethod1327.4810<System.Object>()#G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.G1_C6.B.T<class BaseClass1,class G1_C6`2<class BaseClass1,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G1_C6::ClassMethod1326.4809()#G1_C6::ClassMethod1327.4810<System.Object>()#G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.G1_C6.B.B<class G1_C6`2<class BaseClass1,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G1_C6`2<class BaseClass1,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.B.T<class BaseClass1,class G1_C6`2<class BaseClass1,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.B.B<class G1_C6`2<class BaseClass1,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G1_C6`2<class BaseClass1,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.A.T<class BaseClass1,class G1_C6`2<class BaseClass1,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G1_C6::Method7.4808<System.Object>()#"
call void Generated774::M.IBase2.A.B<class G1_C6`2<class BaseClass1,class BaseClass1>>(!!0,string)
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static void StructConstrainedInterfaceCallsTest() cil managed
{
.maxstack 10
ldstr "===================== Struct Constrained Interface Calls Test ====================="
call void [mscorlib]System.Console::WriteLine(string)
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static void CalliTest() cil managed
{
.maxstack 10
.locals init (object V_0)
ldstr "========================== Method Calli Test =========================="
call void [mscorlib]System.Console::WriteLine(string)
newobj instance void class G3_C1246`1<class BaseClass0>::.ctor()
stloc.0
ldloc.0
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1327<object>()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1326()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass0>::Method7<object>()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G2_C293::Method7.MI.7470<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1916<object>()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G2_C293::ClassMethod1916.7474<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1915<object>()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G3_C1246::ClassMethod1915.MI.15097<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1914()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G2_C293::ClassMethod1914.7472()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1913()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G3_C1246::ClassMethod1913.MI.15096()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass0>::Method7<object>()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1327<object>()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1326()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G2_C293::Method7.MI.7470<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1246`1<class BaseClass0>
ldloc.0
ldvirtftn instance string class G3_C1246`1<class BaseClass0>::ClassMethod3872<object>()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G3_C1246::ClassMethod3872.15095<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1246`1<class BaseClass0>
ldloc.0
ldvirtftn instance string class G3_C1246`1<class BaseClass0>::ClassMethod3871<object>()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G3_C1246::ClassMethod3871.15094<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1246`1<class BaseClass0>
ldloc.0
ldvirtftn instance string class G3_C1246`1<class BaseClass0>::ClassMethod3870()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G3_C1246::ClassMethod3870.15093()"
ldstr "class G3_C1246`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1246`1<class BaseClass0>
ldloc.0
ldvirtftn instance string class G3_C1246`1<class BaseClass0>::Method6<object>()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G3_C1246::Method6.15091<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1246`1<class BaseClass0>
ldloc.0
ldvirtftn instance string class G3_C1246`1<class BaseClass0>::Method5()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G3_C1246::Method5.15089()"
ldstr "class G3_C1246`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1246`1<class BaseClass0>
ldloc.0
ldvirtftn instance string class G3_C1246`1<class BaseClass0>::Method4()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G3_C1246::Method4.15087()"
ldstr "class G3_C1246`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1246`1<class BaseClass0>
ldloc.0
ldvirtftn instance string class G3_C1246`1<class BaseClass0>::ClassMethod1916<object>()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G2_C293::ClassMethod1916.7474<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1246`1<class BaseClass0>
ldloc.0
ldvirtftn instance string class G3_C1246`1<class BaseClass0>::ClassMethod1915<object>()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G3_C1246::ClassMethod1915.MI.15097<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1246`1<class BaseClass0>
ldloc.0
ldvirtftn instance string class G3_C1246`1<class BaseClass0>::ClassMethod1914()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G2_C293::ClassMethod1914.7472()"
ldstr "class G3_C1246`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1246`1<class BaseClass0>
ldloc.0
ldvirtftn instance string class G3_C1246`1<class BaseClass0>::ClassMethod1913()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G3_C1246::ClassMethod1913.MI.15096()"
ldstr "class G3_C1246`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1246`1<class BaseClass0>
ldloc.0
ldvirtftn instance string class G3_C1246`1<class BaseClass0>::Method7<object>()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1246`1<class BaseClass0>
ldloc.0
ldvirtftn instance string class G3_C1246`1<class BaseClass0>::ClassMethod1327<object>()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1246`1<class BaseClass0>
ldloc.0
ldvirtftn instance string class G3_C1246`1<class BaseClass0>::ClassMethod1326()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G3_C1246`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G3_C1246::Method4.MI.15088()"
ldstr "class IBase1`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G3_C1246::Method5.MI.15090()"
ldstr "class IBase1`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>()
calli default string(class G3_C1246`1<class BaseClass0>)
ldstr "G3_C1246::Method6.MI.15092<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type class G3_C1246`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
newobj instance void class G3_C1246`1<class BaseClass1>::.ctor()
stloc.0
ldloc.0
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1327<object>()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1326()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass0>::Method7<object>()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G2_C293::Method7.MI.7470<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1916<object>()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G2_C293::ClassMethod1916.7474<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1915<object>()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G3_C1246::ClassMethod1915.MI.15097<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1914()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G2_C293::ClassMethod1914.7472()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1913()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G3_C1246::ClassMethod1913.MI.15096()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass0>::Method7<object>()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1327<object>()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1326()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G2_C293::Method7.MI.7470<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1246`1<class BaseClass1>
ldloc.0
ldvirtftn instance string class G3_C1246`1<class BaseClass1>::ClassMethod3872<object>()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G3_C1246::ClassMethod3872.15095<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1246`1<class BaseClass1>
ldloc.0
ldvirtftn instance string class G3_C1246`1<class BaseClass1>::ClassMethod3871<object>()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G3_C1246::ClassMethod3871.15094<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1246`1<class BaseClass1>
ldloc.0
ldvirtftn instance string class G3_C1246`1<class BaseClass1>::ClassMethod3870()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G3_C1246::ClassMethod3870.15093()"
ldstr "class G3_C1246`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1246`1<class BaseClass1>
ldloc.0
ldvirtftn instance string class G3_C1246`1<class BaseClass1>::Method6<object>()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G3_C1246::Method6.15091<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1246`1<class BaseClass1>
ldloc.0
ldvirtftn instance string class G3_C1246`1<class BaseClass1>::Method5()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G3_C1246::Method5.15089()"
ldstr "class G3_C1246`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1246`1<class BaseClass1>
ldloc.0
ldvirtftn instance string class G3_C1246`1<class BaseClass1>::Method4()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G3_C1246::Method4.15087()"
ldstr "class G3_C1246`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1246`1<class BaseClass1>
ldloc.0
ldvirtftn instance string class G3_C1246`1<class BaseClass1>::ClassMethod1916<object>()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G2_C293::ClassMethod1916.7474<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1246`1<class BaseClass1>
ldloc.0
ldvirtftn instance string class G3_C1246`1<class BaseClass1>::ClassMethod1915<object>()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G3_C1246::ClassMethod1915.MI.15097<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1246`1<class BaseClass1>
ldloc.0
ldvirtftn instance string class G3_C1246`1<class BaseClass1>::ClassMethod1914()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G2_C293::ClassMethod1914.7472()"
ldstr "class G3_C1246`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1246`1<class BaseClass1>
ldloc.0
ldvirtftn instance string class G3_C1246`1<class BaseClass1>::ClassMethod1913()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G3_C1246::ClassMethod1913.MI.15096()"
ldstr "class G3_C1246`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1246`1<class BaseClass1>
ldloc.0
ldvirtftn instance string class G3_C1246`1<class BaseClass1>::Method7<object>()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1246`1<class BaseClass1>
ldloc.0
ldvirtftn instance string class G3_C1246`1<class BaseClass1>::ClassMethod1327<object>()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G3_C1246`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1246`1<class BaseClass1>
ldloc.0
ldvirtftn instance string class G3_C1246`1<class BaseClass1>::ClassMethod1326()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G3_C1246`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G3_C1246::Method4.MI.15088()"
ldstr "class IBase1`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G3_C1246::Method5.MI.15090()"
ldstr "class IBase1`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G3_C1246::Method6.MI.15092<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G3_C1246::Method4.MI.15088()"
ldstr "class IBase1`1<class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G3_C1246::Method5.MI.15090()"
ldstr "class IBase1`1<class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>()
calli default string(class G3_C1246`1<class BaseClass1>)
ldstr "G3_C1246::Method6.MI.15092<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type class G3_C1246`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
newobj instance void class G2_C293`2<class BaseClass0,class BaseClass0>::.ctor()
stloc.0
ldloc.0
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1327<object>()
calli default string(class G2_C293`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G2_C293`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1326()
calli default string(class G2_C293`2<class BaseClass0,class BaseClass0>)
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G2_C293`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass0>::Method7<object>()
calli default string(class G2_C293`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G2_C293`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G2_C293`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
calli default string(class G2_C293`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C293::Method7.MI.7470<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass0,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass0,class BaseClass0>::ClassMethod1916<object>()
calli default string(class G2_C293`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C293::ClassMethod1916.7474<System.Object>()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass0> on type class G2_C293`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass0,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass0,class BaseClass0>::ClassMethod1915<object>()
calli default string(class G2_C293`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C293::ClassMethod1915.7473<System.Object>()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass0> on type class G2_C293`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass0,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass0,class BaseClass0>::ClassMethod1914()
calli default string(class G2_C293`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C293::ClassMethod1914.7472()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass0> on type class G2_C293`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass0,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass0,class BaseClass0>::ClassMethod1913()
calli default string(class G2_C293`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C293::ClassMethod1913.7471()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass0> on type class G2_C293`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass0,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass0,class BaseClass0>::Method7<object>()
calli default string(class G2_C293`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass0> on type class G2_C293`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass0,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass0,class BaseClass0>::ClassMethod1327<object>()
calli default string(class G2_C293`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass0> on type class G2_C293`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass0,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass0,class BaseClass0>::ClassMethod1326()
calli default string(class G2_C293`2<class BaseClass0,class BaseClass0>)
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass0> on type class G2_C293`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
calli default string(class G2_C293`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C293::Method7.MI.7470<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C293`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
newobj instance void class G2_C293`2<class BaseClass0,class BaseClass1>::.ctor()
stloc.0
ldloc.0
castclass class G1_C6`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1327<object>()
calli default string(class G2_C293`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C6`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1326()
calli default string(class G2_C293`2<class BaseClass0,class BaseClass1>)
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C6`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G2_C293`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G2_C293`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
calli default string(class G2_C293`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C293::Method7.MI.7470<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass0,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass0,class BaseClass1>::ClassMethod1916<object>()
calli default string(class G2_C293`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C293::ClassMethod1916.7474<System.Object>()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass0,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass0,class BaseClass1>::ClassMethod1915<object>()
calli default string(class G2_C293`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C293::ClassMethod1915.7473<System.Object>()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass0,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass0,class BaseClass1>::ClassMethod1914()
calli default string(class G2_C293`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C293::ClassMethod1914.7472()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass0,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass0,class BaseClass1>::ClassMethod1913()
calli default string(class G2_C293`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C293::ClassMethod1913.7471()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass0,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass0,class BaseClass1>::Method7<object>()
calli default string(class G2_C293`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass0,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass0,class BaseClass1>::ClassMethod1327<object>()
calli default string(class G2_C293`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass0,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass0,class BaseClass1>::ClassMethod1326()
calli default string(class G2_C293`2<class BaseClass0,class BaseClass1>)
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G2_C293`2<class BaseClass0,class BaseClass1> on type class G2_C293`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
calli default string(class G2_C293`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C293::Method7.MI.7470<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C293`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
newobj instance void class G2_C293`2<class BaseClass1,class BaseClass0>::.ctor()
stloc.0
ldloc.0
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1327<object>()
calli default string(class G2_C293`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G2_C293`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1326()
calli default string(class G2_C293`2<class BaseClass1,class BaseClass0>)
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G2_C293`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass0>::Method7<object>()
calli default string(class G2_C293`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G2_C293`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G2_C293`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
calli default string(class G2_C293`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C293::Method7.MI.7470<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1916<object>()
calli default string(class G2_C293`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C293::ClassMethod1916.7474<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G2_C293`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1915<object>()
calli default string(class G2_C293`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C293::ClassMethod1915.7473<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G2_C293`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1914()
calli default string(class G2_C293`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C293::ClassMethod1914.7472()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G2_C293`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1913()
calli default string(class G2_C293`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C293::ClassMethod1913.7471()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G2_C293`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass0>::Method7<object>()
calli default string(class G2_C293`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G2_C293`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1327<object>()
calli default string(class G2_C293`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G2_C293`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass0>::ClassMethod1326()
calli default string(class G2_C293`2<class BaseClass1,class BaseClass0>)
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass0> on type class G2_C293`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
calli default string(class G2_C293`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C293::Method7.MI.7470<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C293`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
newobj instance void class G2_C293`2<class BaseClass1,class BaseClass1>::.ctor()
stloc.0
ldloc.0
castclass class G1_C6`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1327<object>()
calli default string(class G2_C293`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C6`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1326()
calli default string(class G2_C293`2<class BaseClass1,class BaseClass1>)
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C6`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G2_C293`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G2_C293`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
calli default string(class G2_C293`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C293::Method7.MI.7470<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass1>::ClassMethod1916<object>()
calli default string(class G2_C293`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C293::ClassMethod1916.7474<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass1>::ClassMethod1915<object>()
calli default string(class G2_C293`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C293::ClassMethod1915.7473<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass1>::ClassMethod1914()
calli default string(class G2_C293`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C293::ClassMethod1914.7472()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass1>::ClassMethod1913()
calli default string(class G2_C293`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C293::ClassMethod1913.7471()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G2_C293`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C293::Method7.7469<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass1>::ClassMethod1327<object>()
calli default string(class G2_C293`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C293::ClassMethod1327.MI.7475<System.Object>()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C293`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C293`2<class BaseClass1,class BaseClass1>::ClassMethod1326()
calli default string(class G2_C293`2<class BaseClass1,class BaseClass1>)
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G2_C293`2<class BaseClass1,class BaseClass1> on type class G2_C293`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
calli default string(class G2_C293`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C293::Method7.MI.7470<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C293`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
newobj instance void class G1_C6`2<class BaseClass0,class BaseClass0>::.ctor()
stloc.0
ldloc.0
castclass class G1_C6`2<class BaseClass0,class BaseClass0>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass0,class BaseClass0>::ClassMethod1327<object>()
calli default string(class G1_C6`2<class BaseClass0,class BaseClass0>)
ldstr "G1_C6::ClassMethod1327.4810<System.Object>()"
ldstr "class G1_C6`2<class BaseClass0,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C6`2<class BaseClass0,class BaseClass0>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass0,class BaseClass0>::ClassMethod1326()
calli default string(class G1_C6`2<class BaseClass0,class BaseClass0>)
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G1_C6`2<class BaseClass0,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C6`2<class BaseClass0,class BaseClass0>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass0,class BaseClass0>::Method7<object>()
calli default string(class G1_C6`2<class BaseClass0,class BaseClass0>)
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class G1_C6`2<class BaseClass0,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
calli default string(class G1_C6`2<class BaseClass0,class BaseClass0>)
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
calli default string(class G1_C6`2<class BaseClass0,class BaseClass0>)
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
calli default string(class G1_C6`2<class BaseClass0,class BaseClass0>)
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G1_C6`2<class BaseClass0,class BaseClass0>)
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
newobj instance void class G1_C6`2<class BaseClass0,class BaseClass1>::.ctor()
stloc.0
ldloc.0
castclass class G1_C6`2<class BaseClass0,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1327<object>()
calli default string(class G1_C6`2<class BaseClass0,class BaseClass1>)
ldstr "G1_C6::ClassMethod1327.4810<System.Object>()"
ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C6`2<class BaseClass0,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass0,class BaseClass1>::ClassMethod1326()
calli default string(class G1_C6`2<class BaseClass0,class BaseClass1>)
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C6`2<class BaseClass0,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass0,class BaseClass1>::Method7<object>()
calli default string(class G1_C6`2<class BaseClass0,class BaseClass1>)
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class G1_C6`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
calli default string(class G1_C6`2<class BaseClass0,class BaseClass1>)
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
calli default string(class G1_C6`2<class BaseClass0,class BaseClass1>)
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G1_C6`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
calli default string(class G1_C6`2<class BaseClass0,class BaseClass1>)
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G1_C6`2<class BaseClass0,class BaseClass1>)
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
newobj instance void class G1_C6`2<class BaseClass1,class BaseClass0>::.ctor()
stloc.0
ldloc.0
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1327<object>()
calli default string(class G1_C6`2<class BaseClass1,class BaseClass0>)
ldstr "G1_C6::ClassMethod1327.4810<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G1_C6`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass0>::ClassMethod1326()
calli default string(class G1_C6`2<class BaseClass1,class BaseClass0>)
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G1_C6`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C6`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass0>::Method7<object>()
calli default string(class G1_C6`2<class BaseClass1,class BaseClass0>)
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass0> on type class G1_C6`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G1_C6`2<class BaseClass1,class BaseClass0>)
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
calli default string(class G1_C6`2<class BaseClass1,class BaseClass0>)
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
newobj instance void class G1_C6`2<class BaseClass1,class BaseClass1>::.ctor()
stloc.0
ldloc.0
castclass class G1_C6`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1327<object>()
calli default string(class G1_C6`2<class BaseClass1,class BaseClass1>)
ldstr "G1_C6::ClassMethod1327.4810<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C6`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass1>::ClassMethod1326()
calli default string(class G1_C6`2<class BaseClass1,class BaseClass1>)
ldstr "G1_C6::ClassMethod1326.4809()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C6`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C6`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G1_C6`2<class BaseClass1,class BaseClass1>)
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class G1_C6`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G1_C6`2<class BaseClass1,class BaseClass1>)
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
calli default string(class G1_C6`2<class BaseClass1,class BaseClass1>)
ldstr "G1_C6::Method7.4808<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G1_C6`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static int32 Main() cil managed
{
.custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = (
01 00 00 00
)
.entrypoint
.maxstack 10
call void Generated774::MethodCallingTest()
call void Generated774::ConstrainedCallsTest()
call void Generated774::StructConstrainedInterfaceCallsTest()
call void Generated774::CalliTest()
ldc.i4 100
ret
}
}
| -1 |
dotnet/runtime | 65,924 | Fix libraries GCStress pipeline | Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| BruceForstall | "2022-02-27T04:30:37Z" | "2022-03-01T18:20:06Z" | dfe08b3bea9a1897562635307ab4bb70bc31fada | 79026a542a8364b6783c5d212bb1841ac2c873a9 | Fix libraries GCStress pipeline. Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| ./src/libraries/System.Private.Xml/tests/XmlSchema/TestFiles/TestData/bug338038_v2.xsd | <xs:schema xmlns:xml="http://www.w3.org/XML/1998/namespace" targetNamespace="http://www.w3.org/XML/1998/namespace" xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:import schemaLocation="bug338038_v2a1.xsd" />
<xs:attribute name="lang" type="xs:language" />
<xs:attribute name="base" type="xs:anyURI" />
<xs:attribute default="preserve" name="space">
<xs:simpleType>
<xs:restriction base="xs:NCName">
<xs:enumeration value="default" />
<xs:enumeration value="preserve" />
</xs:restriction>
</xs:simpleType>
</xs:attribute>
<xs:attribute name="blah"/>
<xs:attributeGroup name="specialAttrs">
<xs:attribute ref="xml:lang" />
<xs:attribute ref="xml:space" />
<xs:attribute ref="xml:base" />
<xs:attribute ref="xml:blah" />
</xs:attributeGroup>
</xs:schema> | <xs:schema xmlns:xml="http://www.w3.org/XML/1998/namespace" targetNamespace="http://www.w3.org/XML/1998/namespace" xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:import schemaLocation="bug338038_v2a1.xsd" />
<xs:attribute name="lang" type="xs:language" />
<xs:attribute name="base" type="xs:anyURI" />
<xs:attribute default="preserve" name="space">
<xs:simpleType>
<xs:restriction base="xs:NCName">
<xs:enumeration value="default" />
<xs:enumeration value="preserve" />
</xs:restriction>
</xs:simpleType>
</xs:attribute>
<xs:attribute name="blah"/>
<xs:attributeGroup name="specialAttrs">
<xs:attribute ref="xml:lang" />
<xs:attribute ref="xml:space" />
<xs:attribute ref="xml:base" />
<xs:attribute ref="xml:blah" />
</xs:attributeGroup>
</xs:schema> | -1 |
dotnet/runtime | 65,924 | Fix libraries GCStress pipeline | Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| BruceForstall | "2022-02-27T04:30:37Z" | "2022-03-01T18:20:06Z" | dfe08b3bea9a1897562635307ab4bb70bc31fada | 79026a542a8364b6783c5d212bb1841ac2c873a9 | Fix libraries GCStress pipeline. Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| ./src/tests/Loader/classloader/TypeGeneratorTests/TypeGeneratorTest27/Generated27.il | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
.assembly extern mscorlib { .publickeytoken = (B7 7A 5C 56 19 34 E0 89 ) .ver 4:0:0:0 }
.assembly extern TestFramework { .publickeytoken = ( B0 3F 5F 7F 11 D5 0A 3A ) }
//TYPES IN FORWARDER ASSEMBLIES:
//TEST ASSEMBLY:
.assembly Generated27 { .hash algorithm 0x00008004 }
.assembly extern xunit.core {}
.class public BaseClass0
{
.method public hidebysig specialname rtspecialname instance void .ctor() cil managed {
ldarg.0
call instance void [mscorlib]System.Object::.ctor()
ret
}
}
.class public BaseClass1
extends BaseClass0
{
.method public hidebysig specialname rtspecialname instance void .ctor() cil managed {
ldarg.0
call instance void BaseClass0::.ctor()
ret
}
}
.class public sequential sealed MyStruct77`1<T0>
extends [mscorlib]System.ValueType
implements class IBase1`1<class BaseClass1>, IBase0
{
.pack 0
.size 1
.method public hidebysig newslot virtual instance string Method4() cil managed noinlining {
ldstr "MyStruct77::Method4.616()"
ret
}
.method public hidebysig newslot virtual instance string 'IBase1<class BaseClass1>.Method4'() cil managed noinlining {
.override method instance string class IBase1`1<class BaseClass1>::Method4()
ldstr "MyStruct77::Method4.MI.617()"
ret
}
.method public hidebysig newslot virtual instance string Method5() cil managed noinlining {
ldstr "MyStruct77::Method5.618()"
ret
}
.method public hidebysig newslot virtual instance string 'IBase1<class BaseClass1>.Method5'() cil managed noinlining {
.override method instance string class IBase1`1<class BaseClass1>::Method5()
ldstr "MyStruct77::Method5.MI.619()"
ret
}
.method public hidebysig virtual instance string Method6<M0>() cil managed noinlining {
ldstr "MyStruct77::Method6.620<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot virtual instance string 'IBase1<class BaseClass1>.Method6'<M0>() cil managed noinlining {
.override method instance string class IBase1`1<class BaseClass1>::Method6<[1]>()
ldstr "MyStruct77::Method6.MI.621<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig virtual instance string Method0() cil managed noinlining {
ldstr "MyStruct77::Method0.622()"
ret
}
.method public hidebysig newslot virtual instance string 'IBase0.Method0'() cil managed noinlining {
.override method instance string IBase0::Method0()
ldstr "MyStruct77::Method0.MI.623()"
ret
}
.method public hidebysig newslot virtual instance string Method1() cil managed noinlining {
ldstr "MyStruct77::Method1.624()"
ret
}
.method public hidebysig newslot virtual instance string 'IBase0.Method1'() cil managed noinlining {
.override method instance string IBase0::Method1()
ldstr "MyStruct77::Method1.MI.625()"
ret
}
.method public hidebysig newslot virtual instance string Method2<M0>() cil managed noinlining {
ldstr "MyStruct77::Method2.626<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig virtual instance string Method3<M0>() cil managed noinlining {
ldstr "MyStruct77::Method3.627<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot virtual instance string 'IBase0.Method3'<M0>() cil managed noinlining {
.override method instance string IBase0::Method3<[1]>()
ldstr "MyStruct77::Method3.MI.628<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot instance string ClassMethod157() cil managed noinlining {
ldstr "MyStruct77::ClassMethod157.629()"
ret
}
.method public hidebysig virtual instance bool Equals(object obj) cil managed { ldc.i4.0 ret }
.method public hidebysig virtual instance int32 GetHashCode() cil managed { ldc.i4.0 ret }
.method public hidebysig virtual instance string ToString() cil managed { ldstr "" ret }
}
.class interface public abstract IBase1`1<+T0>
{
.method public hidebysig newslot abstract virtual instance string Method4() cil managed { }
.method public hidebysig newslot abstract virtual instance string Method5() cil managed { }
.method public hidebysig newslot abstract virtual instance string Method6<M0>() cil managed { }
}
.class interface public abstract IBase0
{
.method public hidebysig newslot abstract virtual instance string Method0() cil managed { }
.method public hidebysig newslot abstract virtual instance string Method1() cil managed { }
.method public hidebysig newslot abstract virtual instance string Method2<M0>() cil managed { }
.method public hidebysig newslot abstract virtual instance string Method3<M0>() cil managed { }
}
.class public auto ansi beforefieldinit Generated27 {
.method static void M.BaseClass0<(BaseClass0)W>(!!W inst, string exp) cil managed {
.maxstack 5
.locals init (string[] actualResults)
ldc.i4.s 0
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.BaseClass0<(BaseClass0)W>(!!W inst, string exp)"
ldc.i4.s 0
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.BaseClass1<(BaseClass1)W>(!!W inst, string exp) cil managed {
.maxstack 5
.locals init (string[] actualResults)
ldc.i4.s 0
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.BaseClass1<(BaseClass1)W>(!!W inst, string exp)"
ldc.i4.s 0
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase1.T<T0,(class IBase1`1<!!T0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 8
.locals init (string[] actualResults)
ldc.i4.s 3
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase1.T<T0,(class IBase1`1<!!T0>)W>(!!W 'inst', string exp)"
ldc.i4.s 3
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<!!T0>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<!!T0>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<!!T0>::Method6<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase1.A<(class IBase1`1<class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 8
.locals init (string[] actualResults)
ldc.i4.s 3
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase1.A<(class IBase1`1<class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 3
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass0>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass0>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase1.B<(class IBase1`1<class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 8
.locals init (string[] actualResults)
ldc.i4.s 3
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase1.B<(class IBase1`1<class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 3
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase0<(IBase0)W>(!!W inst, string exp) cil managed {
.maxstack 9
.locals init (string[] actualResults)
ldc.i4.s 4
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase0<(IBase0)W>(!!W inst, string exp)"
ldc.i4.s 4
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string IBase0::Method0()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string IBase0::Method1()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string IBase0::Method2<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string IBase0::Method3<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.MyStruct77.T<T0,(valuetype MyStruct77`1<!!T0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 12
.locals init (string[] actualResults)
ldc.i4.s 7
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.MyStruct77.T<T0,(valuetype MyStruct77`1<!!T0>)W>(!!W 'inst', string exp)"
ldc.i4.s 7
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. valuetype MyStruct77`1<!!T0>
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. valuetype MyStruct77`1<!!T0>
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. valuetype MyStruct77`1<!!T0>
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. valuetype MyStruct77`1<!!T0>
callvirt instance string IBase0::Method0()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. valuetype MyStruct77`1<!!T0>
callvirt instance string IBase0::Method1()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. valuetype MyStruct77`1<!!T0>
callvirt instance string IBase0::Method2<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 6
ldarga.s 0
constrained. valuetype MyStruct77`1<!!T0>
callvirt instance string IBase0::Method3<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.MyStruct77.A<(valuetype MyStruct77`1<class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 12
.locals init (string[] actualResults)
ldc.i4.s 7
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.MyStruct77.A<(valuetype MyStruct77`1<class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 7
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. valuetype MyStruct77`1<class BaseClass0>
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. valuetype MyStruct77`1<class BaseClass0>
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. valuetype MyStruct77`1<class BaseClass0>
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. valuetype MyStruct77`1<class BaseClass0>
callvirt instance string IBase0::Method0()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. valuetype MyStruct77`1<class BaseClass0>
callvirt instance string IBase0::Method1()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. valuetype MyStruct77`1<class BaseClass0>
callvirt instance string IBase0::Method2<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 6
ldarga.s 0
constrained. valuetype MyStruct77`1<class BaseClass0>
callvirt instance string IBase0::Method3<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.MyStruct77.B<(valuetype MyStruct77`1<class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 12
.locals init (string[] actualResults)
ldc.i4.s 7
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.MyStruct77.B<(valuetype MyStruct77`1<class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 7
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. valuetype MyStruct77`1<class BaseClass1>
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. valuetype MyStruct77`1<class BaseClass1>
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. valuetype MyStruct77`1<class BaseClass1>
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. valuetype MyStruct77`1<class BaseClass1>
callvirt instance string IBase0::Method0()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. valuetype MyStruct77`1<class BaseClass1>
callvirt instance string IBase0::Method1()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. valuetype MyStruct77`1<class BaseClass1>
callvirt instance string IBase0::Method2<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 6
ldarga.s 0
constrained. valuetype MyStruct77`1<class BaseClass1>
callvirt instance string IBase0::Method3<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method public hidebysig static void MethodCallingTest() cil managed
{
.maxstack 10
.locals init (object V_0)
ldstr "========================== Method Calling Test =========================="
call void [mscorlib]System.Console::WriteLine(string)
.locals init (valuetype MyStruct77`1<class BaseClass0> V_1)
ldloca V_1
initobj valuetype MyStruct77`1<class BaseClass0>
ldloca V_1
dup
call instance string valuetype MyStruct77`1<class BaseClass0>::Method4()
ldstr "MyStruct77::Method4.616()"
ldstr "valuetype MyStruct77`1<class BaseClass0> on type MyStruct77"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct77`1<class BaseClass0>::Method5()
ldstr "MyStruct77::Method5.618()"
ldstr "valuetype MyStruct77`1<class BaseClass0> on type MyStruct77"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct77`1<class BaseClass0>::Method6<object>()
ldstr "MyStruct77::Method6.620<System.Object>()"
ldstr "valuetype MyStruct77`1<class BaseClass0> on type MyStruct77"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct77`1<class BaseClass0>::Method0()
ldstr "MyStruct77::Method0.622()"
ldstr "valuetype MyStruct77`1<class BaseClass0> on type MyStruct77"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct77`1<class BaseClass0>::Method1()
ldstr "MyStruct77::Method1.624()"
ldstr "valuetype MyStruct77`1<class BaseClass0> on type MyStruct77"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct77`1<class BaseClass0>::Method2<object>()
ldstr "MyStruct77::Method2.626<System.Object>()"
ldstr "valuetype MyStruct77`1<class BaseClass0> on type MyStruct77"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct77`1<class BaseClass0>::Method3<object>()
ldstr "MyStruct77::Method3.627<System.Object>()"
ldstr "valuetype MyStruct77`1<class BaseClass0> on type MyStruct77"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct77`1<class BaseClass0>::ClassMethod157()
ldstr "MyStruct77::ClassMethod157.629()"
ldstr "valuetype MyStruct77`1<class BaseClass0> on type MyStruct77"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup ldnull call instance bool valuetype MyStruct77`1<class BaseClass0>::Equals(object) pop
dup call instance int32 valuetype MyStruct77`1<class BaseClass0>::GetHashCode() pop
dup call instance string valuetype MyStruct77`1<class BaseClass0>::ToString() pop
pop
ldloc V_1
box valuetype MyStruct77`1<class BaseClass0>
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
ldstr "MyStruct77::Method4.MI.617()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
ldstr "MyStruct77::Method5.MI.619()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
ldstr "MyStruct77::Method6.MI.621<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc V_1
box valuetype MyStruct77`1<class BaseClass0>
dup
callvirt instance string IBase0::Method0()
ldstr "MyStruct77::Method0.MI.623()"
ldstr "IBase0 on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string IBase0::Method1()
ldstr "MyStruct77::Method1.MI.625()"
ldstr "IBase0 on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string IBase0::Method2<object>()
ldstr "MyStruct77::Method2.626<System.Object>()"
ldstr "IBase0 on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string IBase0::Method3<object>()
ldstr "MyStruct77::Method3.MI.628<System.Object>()"
ldstr "IBase0 on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc V_1
box valuetype MyStruct77`1<class BaseClass0>
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method4()
ldstr "MyStruct77::Method4.MI.617()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method5()
ldstr "MyStruct77::Method5.MI.619()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>()
ldstr "MyStruct77::Method6.MI.621<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
.locals init (valuetype MyStruct77`1<class BaseClass1> V_2)
ldloca V_2
initobj valuetype MyStruct77`1<class BaseClass1>
ldloca V_2
dup
call instance string valuetype MyStruct77`1<class BaseClass1>::Method4()
ldstr "MyStruct77::Method4.616()"
ldstr "valuetype MyStruct77`1<class BaseClass1> on type MyStruct77"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct77`1<class BaseClass1>::Method5()
ldstr "MyStruct77::Method5.618()"
ldstr "valuetype MyStruct77`1<class BaseClass1> on type MyStruct77"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct77`1<class BaseClass1>::Method6<object>()
ldstr "MyStruct77::Method6.620<System.Object>()"
ldstr "valuetype MyStruct77`1<class BaseClass1> on type MyStruct77"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct77`1<class BaseClass1>::Method0()
ldstr "MyStruct77::Method0.622()"
ldstr "valuetype MyStruct77`1<class BaseClass1> on type MyStruct77"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct77`1<class BaseClass1>::Method1()
ldstr "MyStruct77::Method1.624()"
ldstr "valuetype MyStruct77`1<class BaseClass1> on type MyStruct77"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct77`1<class BaseClass1>::Method2<object>()
ldstr "MyStruct77::Method2.626<System.Object>()"
ldstr "valuetype MyStruct77`1<class BaseClass1> on type MyStruct77"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct77`1<class BaseClass1>::Method3<object>()
ldstr "MyStruct77::Method3.627<System.Object>()"
ldstr "valuetype MyStruct77`1<class BaseClass1> on type MyStruct77"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct77`1<class BaseClass1>::ClassMethod157()
ldstr "MyStruct77::ClassMethod157.629()"
ldstr "valuetype MyStruct77`1<class BaseClass1> on type MyStruct77"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup ldnull call instance bool valuetype MyStruct77`1<class BaseClass1>::Equals(object) pop
dup call instance int32 valuetype MyStruct77`1<class BaseClass1>::GetHashCode() pop
dup call instance string valuetype MyStruct77`1<class BaseClass1>::ToString() pop
pop
ldloc V_2
box valuetype MyStruct77`1<class BaseClass1>
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
ldstr "MyStruct77::Method4.MI.617()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
ldstr "MyStruct77::Method5.MI.619()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
ldstr "MyStruct77::Method6.MI.621<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc V_2
box valuetype MyStruct77`1<class BaseClass1>
dup
callvirt instance string IBase0::Method0()
ldstr "MyStruct77::Method0.MI.623()"
ldstr "IBase0 on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string IBase0::Method1()
ldstr "MyStruct77::Method1.MI.625()"
ldstr "IBase0 on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string IBase0::Method2<object>()
ldstr "MyStruct77::Method2.626<System.Object>()"
ldstr "IBase0 on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string IBase0::Method3<object>()
ldstr "MyStruct77::Method3.MI.628<System.Object>()"
ldstr "IBase0 on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc V_2
box valuetype MyStruct77`1<class BaseClass1>
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method4()
ldstr "MyStruct77::Method4.MI.617()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method5()
ldstr "MyStruct77::Method5.MI.619()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>()
ldstr "MyStruct77::Method6.MI.621<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static void ConstrainedCallsTest() cil managed
{
.maxstack 10
.locals init (object V_0)
ldstr "========================== Constrained Calls Test =========================="
call void [mscorlib]System.Console::WriteLine(string)
.locals init (valuetype MyStruct77`1<class BaseClass0> V_3)
ldloca V_3
initobj valuetype MyStruct77`1<class BaseClass0>
.try { ldloc V_3
ldstr "MyStruct77::Method4.MI.617()#MyStruct77::Method5.MI.619()#MyStruct77::Method6.MI.621<System.Object>()#"
call void Generated27::M.IBase1.T<class BaseClass1,valuetype MyStruct77`1<class BaseClass0>>(!!1,string) leave.s LV0
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV0} LV0:
.try { ldloc V_3
ldstr "MyStruct77::Method4.MI.617()#MyStruct77::Method5.MI.619()#MyStruct77::Method6.MI.621<System.Object>()#"
call void Generated27::M.IBase1.B<valuetype MyStruct77`1<class BaseClass0>>(!!0,string) leave.s LV1
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV1} LV1:
.try { ldloc V_3
ldstr "MyStruct77::Method0.MI.623()#MyStruct77::Method1.MI.625()#MyStruct77::Method2.626<System.Object>()#MyStruct77::Method3.MI.628<System.Object>()#"
call void Generated27::M.IBase0<valuetype MyStruct77`1<class BaseClass0>>(!!0,string) leave.s LV2
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV2} LV2:
.try { ldloc V_3
ldstr "MyStruct77::Method4.MI.617()#MyStruct77::Method5.MI.619()#MyStruct77::Method6.MI.621<System.Object>()#"
call void Generated27::M.IBase1.T<class BaseClass0,valuetype MyStruct77`1<class BaseClass0>>(!!1,string) leave.s LV3
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV3} LV3:
.try { ldloc V_3
ldstr "MyStruct77::Method4.MI.617()#MyStruct77::Method5.MI.619()#MyStruct77::Method6.MI.621<System.Object>()#"
call void Generated27::M.IBase1.A<valuetype MyStruct77`1<class BaseClass0>>(!!0,string) leave.s LV4
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV4} LV4:
.locals init (valuetype MyStruct77`1<class BaseClass1> V_4)
ldloca V_4
initobj valuetype MyStruct77`1<class BaseClass1>
.try { ldloc V_4
ldstr "MyStruct77::Method4.MI.617()#MyStruct77::Method5.MI.619()#MyStruct77::Method6.MI.621<System.Object>()#"
call void Generated27::M.IBase1.T<class BaseClass1,valuetype MyStruct77`1<class BaseClass1>>(!!1,string) leave.s LV5
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV5} LV5:
.try { ldloc V_4
ldstr "MyStruct77::Method4.MI.617()#MyStruct77::Method5.MI.619()#MyStruct77::Method6.MI.621<System.Object>()#"
call void Generated27::M.IBase1.B<valuetype MyStruct77`1<class BaseClass1>>(!!0,string) leave.s LV6
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV6} LV6:
.try { ldloc V_4
ldstr "MyStruct77::Method0.MI.623()#MyStruct77::Method1.MI.625()#MyStruct77::Method2.626<System.Object>()#MyStruct77::Method3.MI.628<System.Object>()#"
call void Generated27::M.IBase0<valuetype MyStruct77`1<class BaseClass1>>(!!0,string) leave.s LV7
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV7} LV7:
.try { ldloc V_4
ldstr "MyStruct77::Method4.MI.617()#MyStruct77::Method5.MI.619()#MyStruct77::Method6.MI.621<System.Object>()#"
call void Generated27::M.IBase1.T<class BaseClass0,valuetype MyStruct77`1<class BaseClass1>>(!!1,string) leave.s LV8
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV8} LV8:
.try { ldloc V_4
ldstr "MyStruct77::Method4.MI.617()#MyStruct77::Method5.MI.619()#MyStruct77::Method6.MI.621<System.Object>()#"
call void Generated27::M.IBase1.A<valuetype MyStruct77`1<class BaseClass1>>(!!0,string) leave.s LV9
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV9} LV9:
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static void StructConstrainedInterfaceCallsTest() cil managed
{
.maxstack 10
ldstr "===================== Struct Constrained Interface Calls Test ====================="
call void [mscorlib]System.Console::WriteLine(string)
.locals init (valuetype MyStruct77`1<class BaseClass0> V_5)
ldloca V_5
initobj valuetype MyStruct77`1<class BaseClass0>
.try { ldloc V_5
ldstr "MyStruct77::Method4.MI.617()#" +
"MyStruct77::Method5.MI.619()#" +
"MyStruct77::Method6.MI.621<System.Object>()#" +
"MyStruct77::Method0.MI.623()#" +
"MyStruct77::Method1.MI.625()#" +
"MyStruct77::Method2.626<System.Object>()#" +
"MyStruct77::Method3.MI.628<System.Object>()#"
call void Generated27::M.MyStruct77.T<class BaseClass0,valuetype MyStruct77`1<class BaseClass0>>(!!1,string) leave.s LV0
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV0} LV0:
.try { ldloc V_5
ldstr "MyStruct77::Method4.MI.617()#" +
"MyStruct77::Method5.MI.619()#" +
"MyStruct77::Method6.MI.621<System.Object>()#" +
"MyStruct77::Method0.MI.623()#" +
"MyStruct77::Method1.MI.625()#" +
"MyStruct77::Method2.626<System.Object>()#" +
"MyStruct77::Method3.MI.628<System.Object>()#"
call void Generated27::M.MyStruct77.A<valuetype MyStruct77`1<class BaseClass0>>(!!0,string) leave.s LV1
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV1} LV1:
.locals init (valuetype MyStruct77`1<class BaseClass1> V_6)
ldloca V_6
initobj valuetype MyStruct77`1<class BaseClass1>
.try { ldloc V_6
ldstr "MyStruct77::Method4.MI.617()#" +
"MyStruct77::Method5.MI.619()#" +
"MyStruct77::Method6.MI.621<System.Object>()#" +
"MyStruct77::Method0.MI.623()#" +
"MyStruct77::Method1.MI.625()#" +
"MyStruct77::Method2.626<System.Object>()#" +
"MyStruct77::Method3.MI.628<System.Object>()#"
call void Generated27::M.MyStruct77.T<class BaseClass1,valuetype MyStruct77`1<class BaseClass1>>(!!1,string) leave.s LV2
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV2} LV2:
.try { ldloc V_6
ldstr "MyStruct77::Method4.MI.617()#" +
"MyStruct77::Method5.MI.619()#" +
"MyStruct77::Method6.MI.621<System.Object>()#" +
"MyStruct77::Method0.MI.623()#" +
"MyStruct77::Method1.MI.625()#" +
"MyStruct77::Method2.626<System.Object>()#" +
"MyStruct77::Method3.MI.628<System.Object>()#"
call void Generated27::M.MyStruct77.B<valuetype MyStruct77`1<class BaseClass1>>(!!0,string) leave.s LV3
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV3} LV3:
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static void CalliTest() cil managed
{
.maxstack 10
.locals init (object V_0)
ldstr "========================== Method Calli Test =========================="
call void [mscorlib]System.Console::WriteLine(string)
.locals init (valuetype MyStruct77`1<class BaseClass0> V_7)
ldloca V_7
initobj valuetype MyStruct77`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldvirtftn instance string valuetype MyStruct77`1<class BaseClass0>::Method4()
calli default string(object)
ldstr "MyStruct77::Method4.616()"
ldstr "valuetype MyStruct77`1<class BaseClass0> on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldvirtftn instance string valuetype MyStruct77`1<class BaseClass0>::Method5()
calli default string(object)
ldstr "MyStruct77::Method5.618()"
ldstr "valuetype MyStruct77`1<class BaseClass0> on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldvirtftn instance string valuetype MyStruct77`1<class BaseClass0>::Method6<object>()
calli default string(object)
ldstr "MyStruct77::Method6.620<System.Object>()"
ldstr "valuetype MyStruct77`1<class BaseClass0> on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldvirtftn instance string valuetype MyStruct77`1<class BaseClass0>::Method0()
calli default string(object)
ldstr "MyStruct77::Method0.622()"
ldstr "valuetype MyStruct77`1<class BaseClass0> on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldvirtftn instance string valuetype MyStruct77`1<class BaseClass0>::Method1()
calli default string(object)
ldstr "MyStruct77::Method1.624()"
ldstr "valuetype MyStruct77`1<class BaseClass0> on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldvirtftn instance string valuetype MyStruct77`1<class BaseClass0>::Method2<object>()
calli default string(object)
ldstr "MyStruct77::Method2.626<System.Object>()"
ldstr "valuetype MyStruct77`1<class BaseClass0> on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldvirtftn instance string valuetype MyStruct77`1<class BaseClass0>::Method3<object>()
calli default string(object)
ldstr "MyStruct77::Method3.627<System.Object>()"
ldstr "valuetype MyStruct77`1<class BaseClass0> on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldvirtftn instance string valuetype MyStruct77`1<class BaseClass0>::ClassMethod157()
calli default string(object)
ldstr "MyStruct77::ClassMethod157.629()"
ldstr "valuetype MyStruct77`1<class BaseClass0> on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7 box valuetype MyStruct77`1<class BaseClass0> ldnull
ldloc V_7 box valuetype MyStruct77`1<class BaseClass0>
ldvirtftn instance bool valuetype MyStruct77`1<class BaseClass0>::Equals(object) calli default bool(object,object) pop
ldloc V_7 box valuetype MyStruct77`1<class BaseClass0>
ldloc V_7 box valuetype MyStruct77`1<class BaseClass0>
ldvirtftn instance int32 valuetype MyStruct77`1<class BaseClass0>::GetHashCode() calli default int32(object) pop
ldloc V_7 box valuetype MyStruct77`1<class BaseClass0>
ldloc V_7 box valuetype MyStruct77`1<class BaseClass0>
ldvirtftn instance string valuetype MyStruct77`1<class BaseClass0>::ToString() calli default string(object) pop
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4()
calli default string(object)
ldstr "MyStruct77::Method4.MI.617()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5()
calli default string(object)
ldstr "MyStruct77::Method5.MI.619()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>()
calli default string(object)
ldstr "MyStruct77::Method6.MI.621<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldvirtftn instance string IBase0::Method0()
calli default string(object)
ldstr "MyStruct77::Method0.MI.623()"
ldstr "IBase0 on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldvirtftn instance string IBase0::Method1()
calli default string(object)
ldstr "MyStruct77::Method1.MI.625()"
ldstr "IBase0 on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldvirtftn instance string IBase0::Method2<object>()
calli default string(object)
ldstr "MyStruct77::Method2.626<System.Object>()"
ldstr "IBase0 on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldvirtftn instance string IBase0::Method3<object>()
calli default string(object)
ldstr "MyStruct77::Method3.MI.628<System.Object>()"
ldstr "IBase0 on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4()
calli default string(object)
ldstr "MyStruct77::Method4.MI.617()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5()
calli default string(object)
ldstr "MyStruct77::Method5.MI.619()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>()
calli default string(object)
ldstr "MyStruct77::Method6.MI.621<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
.locals init (valuetype MyStruct77`1<class BaseClass1> V_8)
ldloca V_8
initobj valuetype MyStruct77`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldvirtftn instance string valuetype MyStruct77`1<class BaseClass1>::Method4()
calli default string(object)
ldstr "MyStruct77::Method4.616()"
ldstr "valuetype MyStruct77`1<class BaseClass1> on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldvirtftn instance string valuetype MyStruct77`1<class BaseClass1>::Method5()
calli default string(object)
ldstr "MyStruct77::Method5.618()"
ldstr "valuetype MyStruct77`1<class BaseClass1> on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldvirtftn instance string valuetype MyStruct77`1<class BaseClass1>::Method6<object>()
calli default string(object)
ldstr "MyStruct77::Method6.620<System.Object>()"
ldstr "valuetype MyStruct77`1<class BaseClass1> on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldvirtftn instance string valuetype MyStruct77`1<class BaseClass1>::Method0()
calli default string(object)
ldstr "MyStruct77::Method0.622()"
ldstr "valuetype MyStruct77`1<class BaseClass1> on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldvirtftn instance string valuetype MyStruct77`1<class BaseClass1>::Method1()
calli default string(object)
ldstr "MyStruct77::Method1.624()"
ldstr "valuetype MyStruct77`1<class BaseClass1> on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldvirtftn instance string valuetype MyStruct77`1<class BaseClass1>::Method2<object>()
calli default string(object)
ldstr "MyStruct77::Method2.626<System.Object>()"
ldstr "valuetype MyStruct77`1<class BaseClass1> on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldvirtftn instance string valuetype MyStruct77`1<class BaseClass1>::Method3<object>()
calli default string(object)
ldstr "MyStruct77::Method3.627<System.Object>()"
ldstr "valuetype MyStruct77`1<class BaseClass1> on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldvirtftn instance string valuetype MyStruct77`1<class BaseClass1>::ClassMethod157()
calli default string(object)
ldstr "MyStruct77::ClassMethod157.629()"
ldstr "valuetype MyStruct77`1<class BaseClass1> on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8 box valuetype MyStruct77`1<class BaseClass1> ldnull
ldloc V_8 box valuetype MyStruct77`1<class BaseClass1>
ldvirtftn instance bool valuetype MyStruct77`1<class BaseClass1>::Equals(object) calli default bool(object,object) pop
ldloc V_8 box valuetype MyStruct77`1<class BaseClass1>
ldloc V_8 box valuetype MyStruct77`1<class BaseClass1>
ldvirtftn instance int32 valuetype MyStruct77`1<class BaseClass1>::GetHashCode() calli default int32(object) pop
ldloc V_8 box valuetype MyStruct77`1<class BaseClass1>
ldloc V_8 box valuetype MyStruct77`1<class BaseClass1>
ldvirtftn instance string valuetype MyStruct77`1<class BaseClass1>::ToString() calli default string(object) pop
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4()
calli default string(object)
ldstr "MyStruct77::Method4.MI.617()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5()
calli default string(object)
ldstr "MyStruct77::Method5.MI.619()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>()
calli default string(object)
ldstr "MyStruct77::Method6.MI.621<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldvirtftn instance string IBase0::Method0()
calli default string(object)
ldstr "MyStruct77::Method0.MI.623()"
ldstr "IBase0 on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldvirtftn instance string IBase0::Method1()
calli default string(object)
ldstr "MyStruct77::Method1.MI.625()"
ldstr "IBase0 on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldvirtftn instance string IBase0::Method2<object>()
calli default string(object)
ldstr "MyStruct77::Method2.626<System.Object>()"
ldstr "IBase0 on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldvirtftn instance string IBase0::Method3<object>()
calli default string(object)
ldstr "MyStruct77::Method3.MI.628<System.Object>()"
ldstr "IBase0 on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4()
calli default string(object)
ldstr "MyStruct77::Method4.MI.617()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5()
calli default string(object)
ldstr "MyStruct77::Method5.MI.619()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>()
calli default string(object)
ldstr "MyStruct77::Method6.MI.621<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static int32 Main() cil managed
{
.custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = (
01 00 00 00
)
.entrypoint
.maxstack 10
call void Generated27::MethodCallingTest()
call void Generated27::ConstrainedCallsTest()
call void Generated27::StructConstrainedInterfaceCallsTest()
call void Generated27::CalliTest()
ldc.i4 100
ret
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
.assembly extern mscorlib { .publickeytoken = (B7 7A 5C 56 19 34 E0 89 ) .ver 4:0:0:0 }
.assembly extern TestFramework { .publickeytoken = ( B0 3F 5F 7F 11 D5 0A 3A ) }
//TYPES IN FORWARDER ASSEMBLIES:
//TEST ASSEMBLY:
.assembly Generated27 { .hash algorithm 0x00008004 }
.assembly extern xunit.core {}
.class public BaseClass0
{
.method public hidebysig specialname rtspecialname instance void .ctor() cil managed {
ldarg.0
call instance void [mscorlib]System.Object::.ctor()
ret
}
}
.class public BaseClass1
extends BaseClass0
{
.method public hidebysig specialname rtspecialname instance void .ctor() cil managed {
ldarg.0
call instance void BaseClass0::.ctor()
ret
}
}
.class public sequential sealed MyStruct77`1<T0>
extends [mscorlib]System.ValueType
implements class IBase1`1<class BaseClass1>, IBase0
{
.pack 0
.size 1
.method public hidebysig newslot virtual instance string Method4() cil managed noinlining {
ldstr "MyStruct77::Method4.616()"
ret
}
.method public hidebysig newslot virtual instance string 'IBase1<class BaseClass1>.Method4'() cil managed noinlining {
.override method instance string class IBase1`1<class BaseClass1>::Method4()
ldstr "MyStruct77::Method4.MI.617()"
ret
}
.method public hidebysig newslot virtual instance string Method5() cil managed noinlining {
ldstr "MyStruct77::Method5.618()"
ret
}
.method public hidebysig newslot virtual instance string 'IBase1<class BaseClass1>.Method5'() cil managed noinlining {
.override method instance string class IBase1`1<class BaseClass1>::Method5()
ldstr "MyStruct77::Method5.MI.619()"
ret
}
.method public hidebysig virtual instance string Method6<M0>() cil managed noinlining {
ldstr "MyStruct77::Method6.620<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot virtual instance string 'IBase1<class BaseClass1>.Method6'<M0>() cil managed noinlining {
.override method instance string class IBase1`1<class BaseClass1>::Method6<[1]>()
ldstr "MyStruct77::Method6.MI.621<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig virtual instance string Method0() cil managed noinlining {
ldstr "MyStruct77::Method0.622()"
ret
}
.method public hidebysig newslot virtual instance string 'IBase0.Method0'() cil managed noinlining {
.override method instance string IBase0::Method0()
ldstr "MyStruct77::Method0.MI.623()"
ret
}
.method public hidebysig newslot virtual instance string Method1() cil managed noinlining {
ldstr "MyStruct77::Method1.624()"
ret
}
.method public hidebysig newslot virtual instance string 'IBase0.Method1'() cil managed noinlining {
.override method instance string IBase0::Method1()
ldstr "MyStruct77::Method1.MI.625()"
ret
}
.method public hidebysig newslot virtual instance string Method2<M0>() cil managed noinlining {
ldstr "MyStruct77::Method2.626<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig virtual instance string Method3<M0>() cil managed noinlining {
ldstr "MyStruct77::Method3.627<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot virtual instance string 'IBase0.Method3'<M0>() cil managed noinlining {
.override method instance string IBase0::Method3<[1]>()
ldstr "MyStruct77::Method3.MI.628<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot instance string ClassMethod157() cil managed noinlining {
ldstr "MyStruct77::ClassMethod157.629()"
ret
}
.method public hidebysig virtual instance bool Equals(object obj) cil managed { ldc.i4.0 ret }
.method public hidebysig virtual instance int32 GetHashCode() cil managed { ldc.i4.0 ret }
.method public hidebysig virtual instance string ToString() cil managed { ldstr "" ret }
}
.class interface public abstract IBase1`1<+T0>
{
.method public hidebysig newslot abstract virtual instance string Method4() cil managed { }
.method public hidebysig newslot abstract virtual instance string Method5() cil managed { }
.method public hidebysig newslot abstract virtual instance string Method6<M0>() cil managed { }
}
.class interface public abstract IBase0
{
.method public hidebysig newslot abstract virtual instance string Method0() cil managed { }
.method public hidebysig newslot abstract virtual instance string Method1() cil managed { }
.method public hidebysig newslot abstract virtual instance string Method2<M0>() cil managed { }
.method public hidebysig newslot abstract virtual instance string Method3<M0>() cil managed { }
}
.class public auto ansi beforefieldinit Generated27 {
.method static void M.BaseClass0<(BaseClass0)W>(!!W inst, string exp) cil managed {
.maxstack 5
.locals init (string[] actualResults)
ldc.i4.s 0
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.BaseClass0<(BaseClass0)W>(!!W inst, string exp)"
ldc.i4.s 0
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.BaseClass1<(BaseClass1)W>(!!W inst, string exp) cil managed {
.maxstack 5
.locals init (string[] actualResults)
ldc.i4.s 0
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.BaseClass1<(BaseClass1)W>(!!W inst, string exp)"
ldc.i4.s 0
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase1.T<T0,(class IBase1`1<!!T0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 8
.locals init (string[] actualResults)
ldc.i4.s 3
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase1.T<T0,(class IBase1`1<!!T0>)W>(!!W 'inst', string exp)"
ldc.i4.s 3
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<!!T0>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<!!T0>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<!!T0>::Method6<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase1.A<(class IBase1`1<class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 8
.locals init (string[] actualResults)
ldc.i4.s 3
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase1.A<(class IBase1`1<class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 3
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass0>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass0>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase1.B<(class IBase1`1<class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 8
.locals init (string[] actualResults)
ldc.i4.s 3
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase1.B<(class IBase1`1<class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 3
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase0<(IBase0)W>(!!W inst, string exp) cil managed {
.maxstack 9
.locals init (string[] actualResults)
ldc.i4.s 4
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase0<(IBase0)W>(!!W inst, string exp)"
ldc.i4.s 4
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string IBase0::Method0()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string IBase0::Method1()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string IBase0::Method2<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string IBase0::Method3<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.MyStruct77.T<T0,(valuetype MyStruct77`1<!!T0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 12
.locals init (string[] actualResults)
ldc.i4.s 7
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.MyStruct77.T<T0,(valuetype MyStruct77`1<!!T0>)W>(!!W 'inst', string exp)"
ldc.i4.s 7
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. valuetype MyStruct77`1<!!T0>
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. valuetype MyStruct77`1<!!T0>
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. valuetype MyStruct77`1<!!T0>
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. valuetype MyStruct77`1<!!T0>
callvirt instance string IBase0::Method0()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. valuetype MyStruct77`1<!!T0>
callvirt instance string IBase0::Method1()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. valuetype MyStruct77`1<!!T0>
callvirt instance string IBase0::Method2<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 6
ldarga.s 0
constrained. valuetype MyStruct77`1<!!T0>
callvirt instance string IBase0::Method3<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.MyStruct77.A<(valuetype MyStruct77`1<class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 12
.locals init (string[] actualResults)
ldc.i4.s 7
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.MyStruct77.A<(valuetype MyStruct77`1<class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 7
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. valuetype MyStruct77`1<class BaseClass0>
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. valuetype MyStruct77`1<class BaseClass0>
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. valuetype MyStruct77`1<class BaseClass0>
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. valuetype MyStruct77`1<class BaseClass0>
callvirt instance string IBase0::Method0()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. valuetype MyStruct77`1<class BaseClass0>
callvirt instance string IBase0::Method1()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. valuetype MyStruct77`1<class BaseClass0>
callvirt instance string IBase0::Method2<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 6
ldarga.s 0
constrained. valuetype MyStruct77`1<class BaseClass0>
callvirt instance string IBase0::Method3<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.MyStruct77.B<(valuetype MyStruct77`1<class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 12
.locals init (string[] actualResults)
ldc.i4.s 7
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.MyStruct77.B<(valuetype MyStruct77`1<class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 7
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. valuetype MyStruct77`1<class BaseClass1>
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. valuetype MyStruct77`1<class BaseClass1>
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. valuetype MyStruct77`1<class BaseClass1>
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. valuetype MyStruct77`1<class BaseClass1>
callvirt instance string IBase0::Method0()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. valuetype MyStruct77`1<class BaseClass1>
callvirt instance string IBase0::Method1()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. valuetype MyStruct77`1<class BaseClass1>
callvirt instance string IBase0::Method2<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 6
ldarga.s 0
constrained. valuetype MyStruct77`1<class BaseClass1>
callvirt instance string IBase0::Method3<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method public hidebysig static void MethodCallingTest() cil managed
{
.maxstack 10
.locals init (object V_0)
ldstr "========================== Method Calling Test =========================="
call void [mscorlib]System.Console::WriteLine(string)
.locals init (valuetype MyStruct77`1<class BaseClass0> V_1)
ldloca V_1
initobj valuetype MyStruct77`1<class BaseClass0>
ldloca V_1
dup
call instance string valuetype MyStruct77`1<class BaseClass0>::Method4()
ldstr "MyStruct77::Method4.616()"
ldstr "valuetype MyStruct77`1<class BaseClass0> on type MyStruct77"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct77`1<class BaseClass0>::Method5()
ldstr "MyStruct77::Method5.618()"
ldstr "valuetype MyStruct77`1<class BaseClass0> on type MyStruct77"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct77`1<class BaseClass0>::Method6<object>()
ldstr "MyStruct77::Method6.620<System.Object>()"
ldstr "valuetype MyStruct77`1<class BaseClass0> on type MyStruct77"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct77`1<class BaseClass0>::Method0()
ldstr "MyStruct77::Method0.622()"
ldstr "valuetype MyStruct77`1<class BaseClass0> on type MyStruct77"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct77`1<class BaseClass0>::Method1()
ldstr "MyStruct77::Method1.624()"
ldstr "valuetype MyStruct77`1<class BaseClass0> on type MyStruct77"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct77`1<class BaseClass0>::Method2<object>()
ldstr "MyStruct77::Method2.626<System.Object>()"
ldstr "valuetype MyStruct77`1<class BaseClass0> on type MyStruct77"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct77`1<class BaseClass0>::Method3<object>()
ldstr "MyStruct77::Method3.627<System.Object>()"
ldstr "valuetype MyStruct77`1<class BaseClass0> on type MyStruct77"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct77`1<class BaseClass0>::ClassMethod157()
ldstr "MyStruct77::ClassMethod157.629()"
ldstr "valuetype MyStruct77`1<class BaseClass0> on type MyStruct77"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup ldnull call instance bool valuetype MyStruct77`1<class BaseClass0>::Equals(object) pop
dup call instance int32 valuetype MyStruct77`1<class BaseClass0>::GetHashCode() pop
dup call instance string valuetype MyStruct77`1<class BaseClass0>::ToString() pop
pop
ldloc V_1
box valuetype MyStruct77`1<class BaseClass0>
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
ldstr "MyStruct77::Method4.MI.617()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
ldstr "MyStruct77::Method5.MI.619()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
ldstr "MyStruct77::Method6.MI.621<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc V_1
box valuetype MyStruct77`1<class BaseClass0>
dup
callvirt instance string IBase0::Method0()
ldstr "MyStruct77::Method0.MI.623()"
ldstr "IBase0 on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string IBase0::Method1()
ldstr "MyStruct77::Method1.MI.625()"
ldstr "IBase0 on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string IBase0::Method2<object>()
ldstr "MyStruct77::Method2.626<System.Object>()"
ldstr "IBase0 on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string IBase0::Method3<object>()
ldstr "MyStruct77::Method3.MI.628<System.Object>()"
ldstr "IBase0 on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc V_1
box valuetype MyStruct77`1<class BaseClass0>
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method4()
ldstr "MyStruct77::Method4.MI.617()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method5()
ldstr "MyStruct77::Method5.MI.619()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>()
ldstr "MyStruct77::Method6.MI.621<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
.locals init (valuetype MyStruct77`1<class BaseClass1> V_2)
ldloca V_2
initobj valuetype MyStruct77`1<class BaseClass1>
ldloca V_2
dup
call instance string valuetype MyStruct77`1<class BaseClass1>::Method4()
ldstr "MyStruct77::Method4.616()"
ldstr "valuetype MyStruct77`1<class BaseClass1> on type MyStruct77"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct77`1<class BaseClass1>::Method5()
ldstr "MyStruct77::Method5.618()"
ldstr "valuetype MyStruct77`1<class BaseClass1> on type MyStruct77"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct77`1<class BaseClass1>::Method6<object>()
ldstr "MyStruct77::Method6.620<System.Object>()"
ldstr "valuetype MyStruct77`1<class BaseClass1> on type MyStruct77"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct77`1<class BaseClass1>::Method0()
ldstr "MyStruct77::Method0.622()"
ldstr "valuetype MyStruct77`1<class BaseClass1> on type MyStruct77"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct77`1<class BaseClass1>::Method1()
ldstr "MyStruct77::Method1.624()"
ldstr "valuetype MyStruct77`1<class BaseClass1> on type MyStruct77"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct77`1<class BaseClass1>::Method2<object>()
ldstr "MyStruct77::Method2.626<System.Object>()"
ldstr "valuetype MyStruct77`1<class BaseClass1> on type MyStruct77"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct77`1<class BaseClass1>::Method3<object>()
ldstr "MyStruct77::Method3.627<System.Object>()"
ldstr "valuetype MyStruct77`1<class BaseClass1> on type MyStruct77"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct77`1<class BaseClass1>::ClassMethod157()
ldstr "MyStruct77::ClassMethod157.629()"
ldstr "valuetype MyStruct77`1<class BaseClass1> on type MyStruct77"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup ldnull call instance bool valuetype MyStruct77`1<class BaseClass1>::Equals(object) pop
dup call instance int32 valuetype MyStruct77`1<class BaseClass1>::GetHashCode() pop
dup call instance string valuetype MyStruct77`1<class BaseClass1>::ToString() pop
pop
ldloc V_2
box valuetype MyStruct77`1<class BaseClass1>
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
ldstr "MyStruct77::Method4.MI.617()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
ldstr "MyStruct77::Method5.MI.619()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
ldstr "MyStruct77::Method6.MI.621<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc V_2
box valuetype MyStruct77`1<class BaseClass1>
dup
callvirt instance string IBase0::Method0()
ldstr "MyStruct77::Method0.MI.623()"
ldstr "IBase0 on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string IBase0::Method1()
ldstr "MyStruct77::Method1.MI.625()"
ldstr "IBase0 on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string IBase0::Method2<object>()
ldstr "MyStruct77::Method2.626<System.Object>()"
ldstr "IBase0 on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string IBase0::Method3<object>()
ldstr "MyStruct77::Method3.MI.628<System.Object>()"
ldstr "IBase0 on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc V_2
box valuetype MyStruct77`1<class BaseClass1>
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method4()
ldstr "MyStruct77::Method4.MI.617()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method5()
ldstr "MyStruct77::Method5.MI.619()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>()
ldstr "MyStruct77::Method6.MI.621<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static void ConstrainedCallsTest() cil managed
{
.maxstack 10
.locals init (object V_0)
ldstr "========================== Constrained Calls Test =========================="
call void [mscorlib]System.Console::WriteLine(string)
.locals init (valuetype MyStruct77`1<class BaseClass0> V_3)
ldloca V_3
initobj valuetype MyStruct77`1<class BaseClass0>
.try { ldloc V_3
ldstr "MyStruct77::Method4.MI.617()#MyStruct77::Method5.MI.619()#MyStruct77::Method6.MI.621<System.Object>()#"
call void Generated27::M.IBase1.T<class BaseClass1,valuetype MyStruct77`1<class BaseClass0>>(!!1,string) leave.s LV0
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV0} LV0:
.try { ldloc V_3
ldstr "MyStruct77::Method4.MI.617()#MyStruct77::Method5.MI.619()#MyStruct77::Method6.MI.621<System.Object>()#"
call void Generated27::M.IBase1.B<valuetype MyStruct77`1<class BaseClass0>>(!!0,string) leave.s LV1
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV1} LV1:
.try { ldloc V_3
ldstr "MyStruct77::Method0.MI.623()#MyStruct77::Method1.MI.625()#MyStruct77::Method2.626<System.Object>()#MyStruct77::Method3.MI.628<System.Object>()#"
call void Generated27::M.IBase0<valuetype MyStruct77`1<class BaseClass0>>(!!0,string) leave.s LV2
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV2} LV2:
.try { ldloc V_3
ldstr "MyStruct77::Method4.MI.617()#MyStruct77::Method5.MI.619()#MyStruct77::Method6.MI.621<System.Object>()#"
call void Generated27::M.IBase1.T<class BaseClass0,valuetype MyStruct77`1<class BaseClass0>>(!!1,string) leave.s LV3
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV3} LV3:
.try { ldloc V_3
ldstr "MyStruct77::Method4.MI.617()#MyStruct77::Method5.MI.619()#MyStruct77::Method6.MI.621<System.Object>()#"
call void Generated27::M.IBase1.A<valuetype MyStruct77`1<class BaseClass0>>(!!0,string) leave.s LV4
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV4} LV4:
.locals init (valuetype MyStruct77`1<class BaseClass1> V_4)
ldloca V_4
initobj valuetype MyStruct77`1<class BaseClass1>
.try { ldloc V_4
ldstr "MyStruct77::Method4.MI.617()#MyStruct77::Method5.MI.619()#MyStruct77::Method6.MI.621<System.Object>()#"
call void Generated27::M.IBase1.T<class BaseClass1,valuetype MyStruct77`1<class BaseClass1>>(!!1,string) leave.s LV5
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV5} LV5:
.try { ldloc V_4
ldstr "MyStruct77::Method4.MI.617()#MyStruct77::Method5.MI.619()#MyStruct77::Method6.MI.621<System.Object>()#"
call void Generated27::M.IBase1.B<valuetype MyStruct77`1<class BaseClass1>>(!!0,string) leave.s LV6
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV6} LV6:
.try { ldloc V_4
ldstr "MyStruct77::Method0.MI.623()#MyStruct77::Method1.MI.625()#MyStruct77::Method2.626<System.Object>()#MyStruct77::Method3.MI.628<System.Object>()#"
call void Generated27::M.IBase0<valuetype MyStruct77`1<class BaseClass1>>(!!0,string) leave.s LV7
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV7} LV7:
.try { ldloc V_4
ldstr "MyStruct77::Method4.MI.617()#MyStruct77::Method5.MI.619()#MyStruct77::Method6.MI.621<System.Object>()#"
call void Generated27::M.IBase1.T<class BaseClass0,valuetype MyStruct77`1<class BaseClass1>>(!!1,string) leave.s LV8
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV8} LV8:
.try { ldloc V_4
ldstr "MyStruct77::Method4.MI.617()#MyStruct77::Method5.MI.619()#MyStruct77::Method6.MI.621<System.Object>()#"
call void Generated27::M.IBase1.A<valuetype MyStruct77`1<class BaseClass1>>(!!0,string) leave.s LV9
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV9} LV9:
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static void StructConstrainedInterfaceCallsTest() cil managed
{
.maxstack 10
ldstr "===================== Struct Constrained Interface Calls Test ====================="
call void [mscorlib]System.Console::WriteLine(string)
.locals init (valuetype MyStruct77`1<class BaseClass0> V_5)
ldloca V_5
initobj valuetype MyStruct77`1<class BaseClass0>
.try { ldloc V_5
ldstr "MyStruct77::Method4.MI.617()#" +
"MyStruct77::Method5.MI.619()#" +
"MyStruct77::Method6.MI.621<System.Object>()#" +
"MyStruct77::Method0.MI.623()#" +
"MyStruct77::Method1.MI.625()#" +
"MyStruct77::Method2.626<System.Object>()#" +
"MyStruct77::Method3.MI.628<System.Object>()#"
call void Generated27::M.MyStruct77.T<class BaseClass0,valuetype MyStruct77`1<class BaseClass0>>(!!1,string) leave.s LV0
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV0} LV0:
.try { ldloc V_5
ldstr "MyStruct77::Method4.MI.617()#" +
"MyStruct77::Method5.MI.619()#" +
"MyStruct77::Method6.MI.621<System.Object>()#" +
"MyStruct77::Method0.MI.623()#" +
"MyStruct77::Method1.MI.625()#" +
"MyStruct77::Method2.626<System.Object>()#" +
"MyStruct77::Method3.MI.628<System.Object>()#"
call void Generated27::M.MyStruct77.A<valuetype MyStruct77`1<class BaseClass0>>(!!0,string) leave.s LV1
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV1} LV1:
.locals init (valuetype MyStruct77`1<class BaseClass1> V_6)
ldloca V_6
initobj valuetype MyStruct77`1<class BaseClass1>
.try { ldloc V_6
ldstr "MyStruct77::Method4.MI.617()#" +
"MyStruct77::Method5.MI.619()#" +
"MyStruct77::Method6.MI.621<System.Object>()#" +
"MyStruct77::Method0.MI.623()#" +
"MyStruct77::Method1.MI.625()#" +
"MyStruct77::Method2.626<System.Object>()#" +
"MyStruct77::Method3.MI.628<System.Object>()#"
call void Generated27::M.MyStruct77.T<class BaseClass1,valuetype MyStruct77`1<class BaseClass1>>(!!1,string) leave.s LV2
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV2} LV2:
.try { ldloc V_6
ldstr "MyStruct77::Method4.MI.617()#" +
"MyStruct77::Method5.MI.619()#" +
"MyStruct77::Method6.MI.621<System.Object>()#" +
"MyStruct77::Method0.MI.623()#" +
"MyStruct77::Method1.MI.625()#" +
"MyStruct77::Method2.626<System.Object>()#" +
"MyStruct77::Method3.MI.628<System.Object>()#"
call void Generated27::M.MyStruct77.B<valuetype MyStruct77`1<class BaseClass1>>(!!0,string) leave.s LV3
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV3} LV3:
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static void CalliTest() cil managed
{
.maxstack 10
.locals init (object V_0)
ldstr "========================== Method Calli Test =========================="
call void [mscorlib]System.Console::WriteLine(string)
.locals init (valuetype MyStruct77`1<class BaseClass0> V_7)
ldloca V_7
initobj valuetype MyStruct77`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldvirtftn instance string valuetype MyStruct77`1<class BaseClass0>::Method4()
calli default string(object)
ldstr "MyStruct77::Method4.616()"
ldstr "valuetype MyStruct77`1<class BaseClass0> on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldvirtftn instance string valuetype MyStruct77`1<class BaseClass0>::Method5()
calli default string(object)
ldstr "MyStruct77::Method5.618()"
ldstr "valuetype MyStruct77`1<class BaseClass0> on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldvirtftn instance string valuetype MyStruct77`1<class BaseClass0>::Method6<object>()
calli default string(object)
ldstr "MyStruct77::Method6.620<System.Object>()"
ldstr "valuetype MyStruct77`1<class BaseClass0> on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldvirtftn instance string valuetype MyStruct77`1<class BaseClass0>::Method0()
calli default string(object)
ldstr "MyStruct77::Method0.622()"
ldstr "valuetype MyStruct77`1<class BaseClass0> on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldvirtftn instance string valuetype MyStruct77`1<class BaseClass0>::Method1()
calli default string(object)
ldstr "MyStruct77::Method1.624()"
ldstr "valuetype MyStruct77`1<class BaseClass0> on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldvirtftn instance string valuetype MyStruct77`1<class BaseClass0>::Method2<object>()
calli default string(object)
ldstr "MyStruct77::Method2.626<System.Object>()"
ldstr "valuetype MyStruct77`1<class BaseClass0> on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldvirtftn instance string valuetype MyStruct77`1<class BaseClass0>::Method3<object>()
calli default string(object)
ldstr "MyStruct77::Method3.627<System.Object>()"
ldstr "valuetype MyStruct77`1<class BaseClass0> on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldvirtftn instance string valuetype MyStruct77`1<class BaseClass0>::ClassMethod157()
calli default string(object)
ldstr "MyStruct77::ClassMethod157.629()"
ldstr "valuetype MyStruct77`1<class BaseClass0> on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7 box valuetype MyStruct77`1<class BaseClass0> ldnull
ldloc V_7 box valuetype MyStruct77`1<class BaseClass0>
ldvirtftn instance bool valuetype MyStruct77`1<class BaseClass0>::Equals(object) calli default bool(object,object) pop
ldloc V_7 box valuetype MyStruct77`1<class BaseClass0>
ldloc V_7 box valuetype MyStruct77`1<class BaseClass0>
ldvirtftn instance int32 valuetype MyStruct77`1<class BaseClass0>::GetHashCode() calli default int32(object) pop
ldloc V_7 box valuetype MyStruct77`1<class BaseClass0>
ldloc V_7 box valuetype MyStruct77`1<class BaseClass0>
ldvirtftn instance string valuetype MyStruct77`1<class BaseClass0>::ToString() calli default string(object) pop
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4()
calli default string(object)
ldstr "MyStruct77::Method4.MI.617()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5()
calli default string(object)
ldstr "MyStruct77::Method5.MI.619()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>()
calli default string(object)
ldstr "MyStruct77::Method6.MI.621<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldvirtftn instance string IBase0::Method0()
calli default string(object)
ldstr "MyStruct77::Method0.MI.623()"
ldstr "IBase0 on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldvirtftn instance string IBase0::Method1()
calli default string(object)
ldstr "MyStruct77::Method1.MI.625()"
ldstr "IBase0 on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldvirtftn instance string IBase0::Method2<object>()
calli default string(object)
ldstr "MyStruct77::Method2.626<System.Object>()"
ldstr "IBase0 on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldvirtftn instance string IBase0::Method3<object>()
calli default string(object)
ldstr "MyStruct77::Method3.MI.628<System.Object>()"
ldstr "IBase0 on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4()
calli default string(object)
ldstr "MyStruct77::Method4.MI.617()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5()
calli default string(object)
ldstr "MyStruct77::Method5.MI.619()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct77`1<class BaseClass0>
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>()
calli default string(object)
ldstr "MyStruct77::Method6.MI.621<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct77`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
.locals init (valuetype MyStruct77`1<class BaseClass1> V_8)
ldloca V_8
initobj valuetype MyStruct77`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldvirtftn instance string valuetype MyStruct77`1<class BaseClass1>::Method4()
calli default string(object)
ldstr "MyStruct77::Method4.616()"
ldstr "valuetype MyStruct77`1<class BaseClass1> on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldvirtftn instance string valuetype MyStruct77`1<class BaseClass1>::Method5()
calli default string(object)
ldstr "MyStruct77::Method5.618()"
ldstr "valuetype MyStruct77`1<class BaseClass1> on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldvirtftn instance string valuetype MyStruct77`1<class BaseClass1>::Method6<object>()
calli default string(object)
ldstr "MyStruct77::Method6.620<System.Object>()"
ldstr "valuetype MyStruct77`1<class BaseClass1> on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldvirtftn instance string valuetype MyStruct77`1<class BaseClass1>::Method0()
calli default string(object)
ldstr "MyStruct77::Method0.622()"
ldstr "valuetype MyStruct77`1<class BaseClass1> on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldvirtftn instance string valuetype MyStruct77`1<class BaseClass1>::Method1()
calli default string(object)
ldstr "MyStruct77::Method1.624()"
ldstr "valuetype MyStruct77`1<class BaseClass1> on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldvirtftn instance string valuetype MyStruct77`1<class BaseClass1>::Method2<object>()
calli default string(object)
ldstr "MyStruct77::Method2.626<System.Object>()"
ldstr "valuetype MyStruct77`1<class BaseClass1> on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldvirtftn instance string valuetype MyStruct77`1<class BaseClass1>::Method3<object>()
calli default string(object)
ldstr "MyStruct77::Method3.627<System.Object>()"
ldstr "valuetype MyStruct77`1<class BaseClass1> on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldvirtftn instance string valuetype MyStruct77`1<class BaseClass1>::ClassMethod157()
calli default string(object)
ldstr "MyStruct77::ClassMethod157.629()"
ldstr "valuetype MyStruct77`1<class BaseClass1> on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8 box valuetype MyStruct77`1<class BaseClass1> ldnull
ldloc V_8 box valuetype MyStruct77`1<class BaseClass1>
ldvirtftn instance bool valuetype MyStruct77`1<class BaseClass1>::Equals(object) calli default bool(object,object) pop
ldloc V_8 box valuetype MyStruct77`1<class BaseClass1>
ldloc V_8 box valuetype MyStruct77`1<class BaseClass1>
ldvirtftn instance int32 valuetype MyStruct77`1<class BaseClass1>::GetHashCode() calli default int32(object) pop
ldloc V_8 box valuetype MyStruct77`1<class BaseClass1>
ldloc V_8 box valuetype MyStruct77`1<class BaseClass1>
ldvirtftn instance string valuetype MyStruct77`1<class BaseClass1>::ToString() calli default string(object) pop
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4()
calli default string(object)
ldstr "MyStruct77::Method4.MI.617()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5()
calli default string(object)
ldstr "MyStruct77::Method5.MI.619()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>()
calli default string(object)
ldstr "MyStruct77::Method6.MI.621<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldvirtftn instance string IBase0::Method0()
calli default string(object)
ldstr "MyStruct77::Method0.MI.623()"
ldstr "IBase0 on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldvirtftn instance string IBase0::Method1()
calli default string(object)
ldstr "MyStruct77::Method1.MI.625()"
ldstr "IBase0 on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldvirtftn instance string IBase0::Method2<object>()
calli default string(object)
ldstr "MyStruct77::Method2.626<System.Object>()"
ldstr "IBase0 on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldvirtftn instance string IBase0::Method3<object>()
calli default string(object)
ldstr "MyStruct77::Method3.MI.628<System.Object>()"
ldstr "IBase0 on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4()
calli default string(object)
ldstr "MyStruct77::Method4.MI.617()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5()
calli default string(object)
ldstr "MyStruct77::Method5.MI.619()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct77`1<class BaseClass1>
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>()
calli default string(object)
ldstr "MyStruct77::Method6.MI.621<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct77`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static int32 Main() cil managed
{
.custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = (
01 00 00 00
)
.entrypoint
.maxstack 10
call void Generated27::MethodCallingTest()
call void Generated27::ConstrainedCallsTest()
call void Generated27::StructConstrainedInterfaceCallsTest()
call void Generated27::CalliTest()
ldc.i4 100
ret
}
}
| -1 |
dotnet/runtime | 65,924 | Fix libraries GCStress pipeline | Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| BruceForstall | "2022-02-27T04:30:37Z" | "2022-03-01T18:20:06Z" | dfe08b3bea9a1897562635307ab4bb70bc31fada | 79026a542a8364b6783c5d212bb1841ac2c873a9 | Fix libraries GCStress pipeline. Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| ./src/coreclr/inc/ex.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#if !defined(_EX_H_)
#define _EX_H_
#ifdef HOST_UNIX
#define EX_TRY_HOLDER \
HardwareExceptionHolder \
NativeExceptionHolderCatchAll __exceptionHolder; \
__exceptionHolder.Push(); \
#else // HOST_UNIX
#define EX_TRY_HOLDER
#endif // HOST_UNIX
#include "sstring.h"
#include "crtwrap.h"
#include "winwrap.h"
#include "corerror.h"
#include "stresslog.h"
#include "staticcontract.h"
#include "entrypoints.h"
#if !defined(_DEBUG_IMPL) && defined(_DEBUG) && !defined(DACCESS_COMPILE)
#define _DEBUG_IMPL 1
#endif
//===========================================================================================
// These abstractions hide the difference between legacy desktop CLR's (that don't support
// side-by-side-inproc and rely on a fixed SEH code to identify managed exceptions) and
// new CLR's that support side-by-side inproc.
//
// The new CLR's use a different set of SEH codes to avoid conflicting with the legacy CLR's.
// In addition, to distinguish between EH's raised by different inproc instances of the CLR,
// the module handle of the owning CLR is stored in ExceptionRecord.ExceptionInformation[4].
//
// (Note: all existing SEH's use either only slot [0] or no slots at all. We are leaving
// slots [1] thru [3] open for future expansion.)
//===========================================================================================
// Is this exception code one of the special CLR-specific SEH codes that participate in the
// instance-tagging scheme?
BOOL IsInstanceTaggedSEHCode(DWORD dwExceptionCode);
// This set of overloads generates the NumberParameters and ExceptionInformation[] array to
// pass to RaiseException().
//
// Parameters:
// exceptionArgs: a fixed-size array of size INSTANCE_TAGGED_SEH_PARAM_ARRAY_SIZE.
// This will get filled in by this function. (The module handle goes
// in the last slot if this is a side-by-side-inproc enabled build.)
//
// exceptionArg1... up to four arguments that go in slots [0]..[3]. These depends
// the specific requirements of your exception code.
//
// Returns:
// The NumberParameters to pass to RaiseException().
//
// Basically, this is either INSTANCE_TAGGED_SEH_PARAM_ARRAY_SIZE or the count of your
// fixed arguments depending on whether this tagged-SEH-enabled build.
//
// This function is not permitted to fail.
#define INSTANCE_TAGGED_SEH_PARAM_ARRAY_SIZE 5
DWORD MarkAsThrownByUs(/*out*/ ULONG_PTR exceptionArgs[INSTANCE_TAGGED_SEH_PARAM_ARRAY_SIZE]);
DWORD MarkAsThrownByUs(/*out*/ ULONG_PTR exceptionArgs[INSTANCE_TAGGED_SEH_PARAM_ARRAY_SIZE], ULONG_PTR arg0);
// (the existing system can support more overloads up to 4 fixed arguments but we don't need them at this time.)
// Given an exception record, checks if it's exception code matches a specific exception code
// *and* whether it was tagged by the calling instance of the CLR.
//
// If this is a non-tagged-SEH-enabled build, it is blindly assumed to be tagged by the
// calling instance of the CLR.
BOOL WasThrownByUs(const EXCEPTION_RECORD *pcER, DWORD dwExceptionCode);
//-----------------------------------------------------------------------------------
// The following group wraps the basic abstracts specifically for EXCEPTION_COMPLUS.
//-----------------------------------------------------------------------------------
BOOL IsComPlusException(const EXCEPTION_RECORD *pcER);
VOID RaiseComPlusException();
//===========================================================================================
//===========================================================================================
//-------------------------------------------------------------------------------------------
// This routine will generate the most descriptive possible error message for an hresult.
// It will generate at minimum the hex value. It will also try to generate the symbolic name
// (E_POINTER) and the friendly description (from the message tables.)
//
// bNoGeekStuff suppresses hex HR codes. Use this sparingly as most error strings generated by the
// CLR are aimed at developers, not end-users.
//-------------------------------------------------------------------------------------------
void GetHRMsg(HRESULT hresult, SString &result, BOOL bNoGeekStuff = FALSE);
//-------------------------------------------------------------------------------------------
// Similar to GetHRMsg but phrased for top-level exception message.
//-------------------------------------------------------------------------------------------
void GenerateTopLevelHRExceptionMessage(HRESULT hresult, SString &result);
// ---------------------------------------------------------------------------
// We save current ExceptionPointers using VectoredExceptionHandler. The save data is only valid
// duing exception handling. GetCurrentExceptionPointers returns the saved data.
// ---------------------------------------------------------------------------
void GetCurrentExceptionPointers(PEXCEPTION_POINTERS pExceptionInfo DEBUG_ARG(bool checkExceptionRecordLocation));
// ---------------------------------------------------------------------------
// We save current ExceptionPointers using VectoredExceptionHandler. The save data is only valid
// duing exception handling. GetCurrentExceptionCode returns the current exception code.
// ---------------------------------------------------------------------------
DWORD GetCurrentExceptionCode();
// ---------------------------------------------------------------------------
// Standard exception hierarchy & infrastructure for library code & EE
// ---------------------------------------------------------------------------
// ---------------------------------------------------------------------------
// Exception class. Abstract root exception of our hierarchy.
// ---------------------------------------------------------------------------
class Exception;
class SEHException;
// Exception hierarchy:
/* GetInstanceType
Exception
|
|-> HRException Y
| |
| |-> HRMsgException
| |-> COMException
|
|-> SEHException Y
|
|-> DelegatingException Y
|
|-> OutOfMemoryException Y
|
|-> CLRException Y
|
|-> EEException Y
| |
| |-> EEMessageException
| |
| |-> EEResourceException
| |
| |-> EECOMException
| |
| |-> EEFieldException
| |
| |-> EEMethodException
| |
| |-> EEArgumentException
| |
| |-> EETypeLoadException
| |
| |-> EEFileLoadException
|
|-> ObjrefException Y
|
|-> CLRLastThrownObjectException Y
*/
class Exception
{
friend bool DebugIsEECxxExceptionPointer(void* pv);
private:
static const int c_type = 0x524f4f54; // 'ROOT'
static Exception * g_OOMException;
static Exception * g_SOException;
protected:
Exception *m_innerException;
public:
Exception() {LIMITED_METHOD_DAC_CONTRACT; m_innerException = NULL;}
virtual ~Exception() {LIMITED_METHOD_DAC_CONTRACT; if (m_innerException != NULL) Exception::Delete(m_innerException); }
virtual BOOL IsDomainBound() {return m_innerException!=NULL && m_innerException->IsDomainBound();} ;
virtual HRESULT GetHR() = 0;
virtual void GetMessage(SString &s);
virtual IErrorInfo *GetErrorInfo() { LIMITED_METHOD_CONTRACT; return NULL; }
virtual HRESULT SetErrorInfo() { LIMITED_METHOD_CONTRACT; return S_OK; }
void SetInnerException(Exception * pInnerException) { LIMITED_METHOD_CONTRACT; m_innerException = pInnerException; }
// Dynamic type query for catchers
static int GetType() { LIMITED_METHOD_CONTRACT; return c_type; }
// !!! If GetInstanceType is implemented, IsSameInstanceType should be implemented
virtual int GetInstanceType() = 0;
virtual BOOL IsType(int type) {LIMITED_METHOD_CONTRACT; return type == c_type; }
// This is used in CLRException::GetThrowable to detect if we are in a recursive situation.
virtual BOOL IsSameInstanceType(Exception *pException) = 0;
// Will create a new instance of the Exception. Note that this will
// be free of app domain or thread affinity. Not every type of exception
// can be cloned with full fidelity.
virtual Exception *Clone();
// DomainBoundClone is a specialized form of cloning which is guaranteed
// to provide full fidelity. However, the result is bound to the current
// app domain and should not be leaked.
Exception *DomainBoundClone();
class HandlerState
{
enum CaughtFlags
{
Caught = 1,
CaughtSO = 2,
CaughtCxx = 4,
};
DWORD m_dwFlags;
public:
Exception* m_pExceptionPtr;
HandlerState();
void CleanupTry();
void SetupCatch(INDEBUG_COMMA(_In_z_ const char * szFile) int lineNum);
void SucceedCatch();
BOOL DidCatch() { return (m_dwFlags & Caught); }
void SetCaught() { m_dwFlags |= Caught; }
BOOL DidCatchCxx() { return (m_dwFlags & CaughtCxx); }
void SetCaughtCxx() { m_dwFlags |= CaughtCxx; }
};
// Is this exception type considered "uncatchable"?
BOOL IsTerminal();
// Is this exception type considered "transient" (would a retry possibly succeed)?
BOOL IsTransient();
static BOOL IsTransient(HRESULT hr);
// Get an HRESULT's source representation, if known
static LPCSTR GetHRSymbolicName(HRESULT hr);
static Exception* GetOOMException();
// Preallocated exceptions: If there is a preallocated instance of some
// subclass of Exception, override this function and return a correct
// value. The default implementation returns constant FALSE
virtual BOOL IsPreallocatedException();
BOOL IsPreallocatedOOMException();
static void Delete(Exception* pvMemory);
protected:
// This virtual method must be implemented by any non abstract Exception
// derived class. It must allocate a NEW exception of the identical type and
// copy all the relevant fields from the current exception to the new one.
// It is NOT responsible however for copying the inner exception. This
// will be handled by the base Exception class.
virtual Exception *CloneHelper();
// This virtual method must be implemented by Exception subclasses whose
// DomainBoundClone behavior is different than their normal clone behavior.
// It must allocate a NEW exception of the identical type and
// copy all the relevant fields from the current exception to the new one.
// It is NOT responsible however for copying the inner exception. This
// will be handled by the base Exception class.
virtual Exception *DomainBoundCloneHelper() { return CloneHelper(); }
};
#if 1
inline void Exception__Delete(Exception* pvMemory)
{
Exception::Delete(pvMemory);
}
using ExceptionHolder = SpecializedWrapper<Exception, Exception__Delete>;
#else
//------------------------------------------------------------------------------
// class ExceptionHolder
//
// This is a very lightweight holder class for use inside the EX_TRY family
// of macros. It is based on the standard Holder classes, but has been
// highly specialized for this one function, so that extra code can be
// removed, and the resulting code can be simple enough for all of the
// non-exceptional-case code to be inlined.
class ExceptionHolder
{
private:
Exception *m_value;
BOOL m_acquired;
public:
FORCEINLINE ExceptionHolder(Exception *pException = NULL, BOOL take = TRUE)
: m_value(pException)
{
m_acquired = pException && take;
}
FORCEINLINE ~ExceptionHolder()
{
if (m_acquired)
{
Exception::Delete(m_value);
}
}
Exception* operator->() { return m_value; }
void operator=(Exception *p)
{
Release();
m_value = p;
Acquire();
}
BOOL IsNull() { return m_value == NULL; }
operator Exception*() { return m_value; }
Exception* GetValue() { return m_value; }
void SuppressRelease() { m_acquired = FALSE; }
private:
void Acquire()
{
_ASSERTE(!m_acquired);
if (!IsNull())
{
m_acquired = TRUE;
}
}
void Release()
{
if (m_acquired)
{
_ASSERTE(!IsNull());
Exception::Delete(m_value);
m_acquired = FALSE;
}
}
};
#endif
// ---------------------------------------------------------------------------
// HRException class. Implements exception API for exceptions generated from HRESULTs
// ---------------------------------------------------------------------------
class HRException : public Exception
{
friend bool DebugIsEECxxExceptionPointer(void* pv);
protected:
HRESULT m_hr;
public:
HRException();
HRException(HRESULT hr);
static const int c_type = 0x48522020; // 'HR '
// Dynamic type query for catchers
static int GetType() {LIMITED_METHOD_DAC_CONTRACT; return c_type; }
virtual int GetInstanceType() { LIMITED_METHOD_CONTRACT; return c_type; }
virtual BOOL IsType(int type) { WRAPPER_NO_CONTRACT; return type == c_type || Exception::IsType(type); }
// Virtual overrides
HRESULT GetHR();
BOOL IsSameInstanceType(Exception *pException)
{
WRAPPER_NO_CONTRACT;
return pException->GetInstanceType() == GetType() && pException->GetHR() == m_hr;
}
protected:
virtual Exception *CloneHelper()
{
WRAPPER_NO_CONTRACT;
return new HRException(m_hr);
}
};
// ---------------------------------------------------------------------------
// HRMessageException class. Implements exception API for exceptions
// generated from HRESULTs, and includes in info message.
// ---------------------------------------------------------------------------
class HRMsgException : public HRException
{
friend bool DebugIsEECxxExceptionPointer(void* pv);
protected:
SString m_msg;
public:
HRMsgException();
HRMsgException(HRESULT hr, SString const &msg);
// Virtual overrides
void GetMessage(SString &s);
protected:
virtual Exception *CloneHelper()
{
WRAPPER_NO_CONTRACT;
return new HRMsgException(m_hr, m_msg);
}
};
// ---------------------------------------------------------------------------
// COMException class. Implements exception API for standard COM-based error info
// ---------------------------------------------------------------------------
class COMException : public HRException
{
friend bool DebugIsEECxxExceptionPointer(void* pv);
private:
IErrorInfo *m_pErrorInfo;
public:
COMException();
COMException(HRESULT hr) ;
COMException(HRESULT hr, IErrorInfo *pErrorInfo);
~COMException();
// Virtual overrides
IErrorInfo *GetErrorInfo();
void GetMessage(SString &result);
protected:
virtual Exception *CloneHelper()
{
WRAPPER_NO_CONTRACT;
return new COMException(m_hr, m_pErrorInfo);
}
};
// ---------------------------------------------------------------------------
// SEHException class. Implements exception API for SEH exception info
// ---------------------------------------------------------------------------
class SEHException : public Exception
{
friend bool DebugIsEECxxExceptionPointer(void* pv);
public:
EXCEPTION_RECORD m_exception;
SEHException();
SEHException(EXCEPTION_RECORD *pRecord, T_CONTEXT *pContext = NULL);
static const int c_type = 0x53454820; // 'SEH '
// Dynamic type query for catchers
static int GetType() {LIMITED_METHOD_CONTRACT; return c_type; }
virtual int GetInstanceType() { LIMITED_METHOD_CONTRACT; return c_type; }
virtual BOOL IsType(int type) { WRAPPER_NO_CONTRACT; return type == c_type || Exception::IsType(type); }
BOOL IsSameInstanceType(Exception *pException)
{
WRAPPER_NO_CONTRACT;
return pException->GetInstanceType() == GetType() && pException->GetHR() == GetHR();
}
// Virtual overrides
HRESULT GetHR();
IErrorInfo *GetErrorInfo();
void GetMessage(SString &result);
protected:
virtual Exception *CloneHelper()
{
WRAPPER_NO_CONTRACT;
return new SEHException(&m_exception);
}
};
// ---------------------------------------------------------------------------
// DelegatingException class. Implements exception API for "foreign" exceptions.
// ---------------------------------------------------------------------------
class DelegatingException : public Exception
{
Exception *m_delegatedException;
Exception* GetDelegate();
enum {DELEGATE_NOT_YET_SET = -1};
bool IsDelegateSet() {LIMITED_METHOD_DAC_CONTRACT; return m_delegatedException != (Exception*)DELEGATE_NOT_YET_SET; }
bool IsDelegateValid() {LIMITED_METHOD_DAC_CONTRACT; return IsDelegateSet() && m_delegatedException != NULL; }
public:
DelegatingException();
~DelegatingException();
static const int c_type = 0x44454C20; // 'DEL '
// Dynamic type query for catchers
static int GetType() {LIMITED_METHOD_CONTRACT; return c_type; }
virtual int GetInstanceType() { LIMITED_METHOD_CONTRACT; return c_type; }
virtual BOOL IsType(int type) { WRAPPER_NO_CONTRACT; return type == c_type || Exception::IsType(type); }
BOOL IsSameInstanceType(Exception *pException)
{
WRAPPER_NO_CONTRACT;
return pException->GetInstanceType() == GetType() && pException->GetHR() == GetHR();
}
// Virtual overrides
virtual BOOL IsDomainBound() {return Exception::IsDomainBound() ||(m_delegatedException!=NULL && m_delegatedException->IsDomainBound());} ;
HRESULT GetHR();
IErrorInfo *GetErrorInfo();
void GetMessage(SString &result);
virtual Exception *Clone();
protected:
virtual Exception *CloneHelper()
{
WRAPPER_NO_CONTRACT;
return new DelegatingException();
}
};
//------------------------------------------------------------------------------
// class OutOfMemoryException
//
// While there could be any number of instances of this class, there is one
// special instance, the pre-allocated OOM exception. Storage for that
// instance is allocated in the image, so we can always obtain it, even
// in low memory situations.
// Note that, in fact, there is only one instance.
//------------------------------------------------------------------------------
class OutOfMemoryException : public Exception
{
private:
static const int c_type = 0x4F4F4D20; // 'OOM '
BOOL bIsPreallocated;
public:
OutOfMemoryException() : bIsPreallocated(FALSE) {}
OutOfMemoryException(BOOL b) : bIsPreallocated(b) {}
// Dynamic type query for catchers
static int GetType() {LIMITED_METHOD_CONTRACT; return c_type; }
virtual int GetInstanceType() { LIMITED_METHOD_CONTRACT; return c_type; }
BOOL IsType(int type) { WRAPPER_NO_CONTRACT; return type == c_type || Exception::IsType(type); }
BOOL IsSameInstanceType(Exception *pException)
{
WRAPPER_NO_CONTRACT;
return pException->GetInstanceType() == GetType();
}
HRESULT GetHR() {LIMITED_METHOD_DAC_CONTRACT; return E_OUTOFMEMORY; }
void GetMessage(SString &result) { WRAPPER_NO_CONTRACT; result.SetASCII("Out Of Memory"); }
virtual Exception *Clone();
virtual BOOL IsPreallocatedException() { return bIsPreallocated; }
};
template <typename STATETYPE>
class CAutoTryCleanup
{
public:
DEBUG_NOINLINE CAutoTryCleanup(STATETYPE& refState) :
m_refState(refState)
{
SCAN_SCOPE_BEGIN;
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_SUPPORTS_DAC;
#ifdef ENABLE_CONTRACTS_IMPL
// This is similar to ClrTryMarkerHolder. We're marking that its okay to throw on this thread now because
// we're within a try block. We fold this into here strictly for performance reasons... we have one
// stack-allocated object do the work.
m_pClrDebugState = GetClrDebugState();
m_oldOkayToThrowValue = m_pClrDebugState->IsOkToThrow();
m_pClrDebugState->SetOkToThrow();
#endif
}
DEBUG_NOINLINE ~CAutoTryCleanup()
{
SCAN_SCOPE_END;
WRAPPER_NO_CONTRACT;
m_refState.CleanupTry();
#ifdef ENABLE_CONTRACTS_IMPL
// Restore the original OkayToThrow value since we're leaving the try block.
m_pClrDebugState->SetOkToThrow( m_oldOkayToThrowValue );
#endif // ENABLE_CONTRACTS_IMPL
}
protected:
STATETYPE& m_refState;
#ifdef ENABLE_CONTRACTS_DATA
private:
BOOL m_oldOkayToThrowValue;
ClrDebugState *m_pClrDebugState;
#endif
};
// ---------------------------------------------------------------------------
// Throw/Catch macros
//
// Usage:
//
// EX_TRY
// {
// EX_THROW(HRException, (E_FAIL));
// }
// EX_CATCH
// {
// Exception *e = GET_EXCEPTION();
// EX_RETHROW;
// }
// EX_END_CATCH(RethrowTerminalExceptions, RethrowTransientExceptions or SwallowAllExceptions)
//
// ---------------------------------------------------------------------------
// ---------------------------------------------------------------------------
// #NO_HOST_CPP_EH_ONLY
//
// The EX_CATCH* macros defined below can work one of two ways:
// 1. They catch all exceptions, both C++ and SEH exceptions.
// 2. They catch only C++ exceptions.
//
// Which way they are defined depends on what sort of handling of SEH
// exceptions, like AV's, you wish to have in your DLL. In general we
// do not typically want to catch and swallow AV's.
//
// By default, the macros catch all exceptions. This is how they work when
// compiled into the primary runtime DLL (clr.dll). This is reasonable for
// the CLR becuase it needs to also catch managed exceptions, which are SEH
// exceptions, and because that DLL also includes a vectored exception
// handler that will take down the process on any AV within clr.dll.
//
// But for uses of these macros outside of the CLR DLL there are other
// possibilities. If a DLL only uses facilities in Utilcode that throw the
// C++ exceptions defined above, and never needs to catch a managed exception,
// then that DLL should setup the macros to only catch C++ exceptions. That
// way, AV's are not accidentally swallowed and hidden.
//
// On the other hand, if a DLL needs to catch managed exceptions, then it has
// no choice but to also catch all SEH exceptions, including AV's. In that case
// the DLL should also include a vectored handler, like CLR.dll, to take the
// process down on an AV.
//
// The behavior difference is controled by NO_HOST_CPP_EH_ONLY. When defined,
// the EX_CATCH* macros only catch C++ exceptions. When not defined, they catch
// C++ and SEH exceptions.
//
// Note: use of NO_HOST_CPP_EH_ONLY is only valid outside the primary CLR DLLs.
// Thus it is an error to attempt to define it without also defining SELF_NO_HOST.
// ---------------------------------------------------------------------------
#if defined(NO_HOST_CPP_EH_ONLY) && !defined(SELF_NO_HOST)
#error It is incorrect to attempt to have C++-only EH macros when hosted. This is only valid for components outside the runtime DLLs.
#endif
//-----------------------------------------------------------------------
// EX_END_CATCH has a mandatory argument which is one of "RethrowTerminalExceptions",
// "RethrowTransientExceptions", or "SwallowAllExceptions".
//
// If an exception is considered "terminal" (e->IsTerminal()), it should normally
// be allowed to proceed. Hence, most of the time, you should use RethrowTerminalExceptions.
//
// In some cases you will want transient exceptions (terminal plus things like
// resource exhaustion) to proceed as well. Use RethrowTransientExceptions for this cas.
//
// If you have a good reason to use SwallowAllExceptions, (e.g. a hard COM interop boundary)
// use one of the higher level macros for this if available, or consider developing one.
// Otherwise, clearly document why you're swallowing terminal exceptions. Raw uses of
// SwallowAllExceptions will cause the cleanup police to come knocking on your door
// at some point.
//
// A lot of existing TRY's swallow terminals right now simply because there is
// backout code following the END_CATCH that has to be executed. The solution is
// to replace that backout code with holder objects.
//-----------------------------------------------------------------------
#define RethrowTransientExceptions \
if (GET_EXCEPTION()->IsTransient()) \
{ \
EX_RETHROW; \
} \
#define SwallowAllExceptions ;
// When applied to EX_END_CATCH, this policy will always rethrow Terminal exceptions if they are
// encountered.
#define RethrowTerminalExceptions \
if (GET_EXCEPTION()->IsTerminal()) \
{ \
STATIC_CONTRACT_THROWS_TERMINAL; \
EX_RETHROW; \
} \
// Special define to be used in EEStartup that will also check for VM initialization before
// commencing on a path that may use the managed thread object.
#define RethrowTerminalExceptionsWithInitCheck \
if ((g_fEEStarted == TRUE) && (GetThreadNULLOk() != NULL)) \
{ \
RethrowTerminalExceptions \
}
#ifdef _DEBUG
void ExThrowTrap(const char *fcn, const char *file, int line, const char *szType, HRESULT hr, const char *args);
#define EX_THROW_DEBUG_TRAP(fcn, file, line, szType, hr, args) ExThrowTrap(fcn, file, line, szType, hr, args)
#else
#define EX_THROW_DEBUG_TRAP(fcn, file, line, szType, hr, args)
#endif
#define EX_THROW(_type, _args) \
{ \
FAULT_NOT_FATAL(); \
\
_type * ___pExForExThrow = new _type _args ; \
/* don't embed file names in retail to save space and avoid IP */ \
/* a findstr /n will allow you to locate it in a pinch */ \
STRESS_LOG3(LF_EH, LL_INFO100, "EX_THROW Type = 0x%x HR = 0x%x, " \
INDEBUG(__FILE__) " line %d\n", _type::GetType(), \
___pExForExThrow->GetHR(), __LINE__); \
EX_THROW_DEBUG_TRAP(__FUNCTION__, __FILE__, __LINE__, #_type, ___pExForExThrow->GetHR(), #_args); \
PAL_CPP_THROW(_type *, ___pExForExThrow); \
}
//--------------------------------------------------------------------------------
// Clones an exception into the current domain. Also handles special cases for
// OOM and other stuff. Making this a function so we don't inline all this logic
// every place we call EX_THROW_WITH_INNER.
//--------------------------------------------------------------------------------
Exception *ExThrowWithInnerHelper(Exception *inner);
// This macro will set the m_innerException into the newly created exception
// The passed in _type has to be derived from CLRException. You cannot put OOM
// as the inner exception. If we are throwing in OOM case, allocate more memory (this macro will clone)
// does not make any sense.
//
#define EX_THROW_WITH_INNER(_type, _args, _inner) \
{ \
FAULT_NOT_FATAL(); \
\
Exception *_inner2 = ExThrowWithInnerHelper(_inner); \
_type *___pExForExThrow = new _type _args ; \
___pExForExThrow->SetInnerException(_inner2); \
STRESS_LOG3(LF_EH, LL_INFO100, "EX_THROW_WITH_INNER Type = 0x%x HR = 0x%x, " \
INDEBUG(__FILE__) " line %d\n", _type::GetType(), \
___pExForExThrow->GetHR(), __LINE__); \
EX_THROW_DEBUG_TRAP(__FUNCTION__, __FILE__, __LINE__, #_type, ___pExForExThrow->GetHR(), #_args); \
PAL_CPP_THROW(_type *, ___pExForExThrow); \
}
//#define IsCLRException(ex) ((ex !=NULL) && ex->IsType(CLRException::GetType())
#define EX_TRY_IMPL EX_TRY_CUSTOM(Exception::HandlerState, , DelegatingException /* was SEHException*/)
#define EX_TRY_CPP_ONLY EX_TRY_CUSTOM_CPP_ONLY(Exception::HandlerState, , DelegatingException /* was SEHException*/)
#ifndef INCONTRACT
#ifdef ENABLE_CONTRACTS
#define INCONTRACT(x) x
#else
#define INCONTRACT(x)
#endif
#endif
#define EX_TRY_CUSTOM(STATETYPE, STATEARG, DEFAULT_EXCEPTION_TYPE) \
{ \
STATETYPE __state STATEARG; \
typedef DEFAULT_EXCEPTION_TYPE __defaultException_t; \
SCAN_EHMARKER(); \
PAL_CPP_TRY \
{ \
SCAN_EHMARKER_TRY(); \
SCAN_EHMARKER(); \
PAL_CPP_TRY \
{ \
SCAN_EHMARKER_TRY(); \
CAutoTryCleanup<STATETYPE> __autoCleanupTry(__state); \
/* prevent annotations from being dropped by optimizations in debug */ \
INDEBUG(static bool __alwayszero;) \
INDEBUG(VolatileLoad(&__alwayszero);) \
{ \
/* Disallow returns to make exception handling work. */ \
/* Some work is done after the catch, see EX_ENDTRY. */ \
DEBUG_ASSURE_NO_RETURN_BEGIN(EX_TRY) \
EX_TRY_HOLDER \
#define EX_CATCH_IMPL_EX(DerivedExceptionClass) \
DEBUG_ASSURE_NO_RETURN_END(EX_TRY) \
} \
SCAN_EHMARKER_END_TRY(); \
} \
PAL_CPP_CATCH_DERIVED (DerivedExceptionClass, __pExceptionRaw) \
{ \
SCAN_EHMARKER_CATCH(); \
__state.SetCaughtCxx(); \
__state.m_pExceptionPtr = __pExceptionRaw; \
SCAN_EHMARKER_END_CATCH(); \
SCAN_IGNORE_THROW_MARKER; \
PAL_CPP_RETHROW; \
} \
PAL_CPP_ENDTRY \
SCAN_EHMARKER_END_TRY(); \
} \
PAL_CPP_CATCH_ALL \
{ \
SCAN_EHMARKER_CATCH(); \
__defaultException_t __defaultException; \
CHECK::ResetAssert(); \
ExceptionHolder __pException(__state.m_pExceptionPtr); \
/* work around unreachable code warning */ \
if (true) { \
DEBUG_ASSURE_NO_RETURN_BEGIN(EX_CATCH) \
/* don't embed file names in retail to save space and avoid IP */ \
/* a findstr /n will allow you to locate it in a pinch */ \
__state.SetupCatch(INDEBUG_COMMA(__FILE__) __LINE__); \
#define EX_CATCH_IMPL EX_CATCH_IMPL_EX(Exception)
#define EX_TRY_CUSTOM_CPP_ONLY(STATETYPE, STATEARG, DEFAULT_EXCEPTION_TYPE) \
{ \
STATETYPE __state STATEARG; \
typedef DEFAULT_EXCEPTION_TYPE __defaultException_t; \
SCAN_EHMARKER(); \
PAL_CPP_TRY \
{ \
SCAN_EHMARKER_TRY(); \
CAutoTryCleanup<STATETYPE> __autoCleanupTry(__state); \
/* prevent annotations from being dropped by optimizations in debug */ \
INDEBUG(static bool __alwayszero;) \
INDEBUG(VolatileLoad(&__alwayszero);) \
{ \
/* Disallow returns to make exception handling work. */ \
/* Some work is done after the catch, see EX_ENDTRY. */ \
DEBUG_ASSURE_NO_RETURN_BEGIN(EX_TRY) \
#define EX_CATCH_IMPL_CPP_ONLY \
DEBUG_ASSURE_NO_RETURN_END(EX_TRY) \
} \
SCAN_EHMARKER_END_TRY(); \
} \
PAL_CPP_CATCH_DERIVED (Exception, __pExceptionRaw) \
{ \
SCAN_EHMARKER_CATCH(); \
__state.SetCaughtCxx(); \
__state.m_pExceptionPtr = __pExceptionRaw; \
SCAN_EHMARKER_END_CATCH(); \
SCAN_IGNORE_THROW_MARKER; \
__defaultException_t __defaultException; \
CHECK::ResetAssert(); \
ExceptionHolder __pException(__state.m_pExceptionPtr); \
/* work around unreachable code warning */ \
if (true) { \
DEBUG_ASSURE_NO_RETURN_BEGIN(EX_CATCH) \
/* don't embed file names in retail to save space and avoid IP */ \
/* a findstr /n will allow you to locate it in a pinch */ \
__state.SetupCatch(INDEBUG_COMMA(__FILE__) __LINE__); \
// Here we finally define the EX_CATCH* macros that will be used throughout the system.
// These can catch C++ and SEH exceptions, or just C++ exceptions.
// See code:NO_HOST_CPP_EH_ONLY for more details.
//
// Note: we make it illegal to use forms that are redundant with the basic EX_CATCH
// version. I.e., in the C++ & SEH version, EX_CATCH_CPP_AND_SEH is the same as EX_CATCH.
// Likewise, in the C++ only version, EX_CATCH_CPP_ONLY is redundant with EX_CATCH.
#ifndef NO_HOST_CPP_EH_ONLY
#define EX_TRY EX_TRY_IMPL
#define EX_CATCH EX_CATCH_IMPL
#define EX_CATCH_EX EX_CATCH_IMPL_EX
#define EX_CATCH_CPP_ONLY EX_CATCH_IMPL_CPP_ONLY
#define EX_CATCH_CPP_AND_SEH Dont_Use_EX_CATCH_CPP_AND_SEH
#else
#define EX_TRY EX_TRY_CPP_ONLY
#define EX_CATCH EX_CATCH_IMPL_CPP_ONLY
#define EX_CATCH_CPP_ONLY Dont_Use_EX_CATCH_CPP_ONLY
#define EX_CATCH_CPP_AND_SEH EX_CATCH_IMPL
// Note: at this time we don't have a use case for EX_CATCH_EX, and we do not have
// the C++-only version of the implementation available. Thus we disallow its use at this time.
// If a real use case arises then we should go ahead and enable this.
#define EX_CATCH_EX Dont_Use_EX_CATCH_EX
#endif
#define EX_END_CATCH_UNREACHABLE \
DEBUG_ASSURE_NO_RETURN_END(EX_CATCH) \
} \
SCAN_EHMARKER_END_CATCH(); \
UNREACHABLE(); \
} \
PAL_CPP_ENDTRY \
} \
// "terminalexceptionpolicy" must be one of "RethrowTerminalExceptions",
// "RethrowTransientExceptions", or "SwallowAllExceptions"
#define EX_END_CATCH(terminalexceptionpolicy) \
terminalexceptionpolicy; \
__state.SucceedCatch(); \
DEBUG_ASSURE_NO_RETURN_END(EX_CATCH) \
} \
SCAN_EHMARKER_END_CATCH(); \
} \
EX_ENDTRY \
} \
#define EX_END_CATCH_FOR_HOOK \
__state.SucceedCatch(); \
DEBUG_ASSURE_NO_RETURN_END(EX_CATCH) \
ANNOTATION_HANDLER_END; \
} \
SCAN_EHMARKER_END_CATCH(); \
} \
EX_ENDTRY
#define EX_ENDTRY \
PAL_CPP_ENDTRY
#define EX_RETHROW \
{ \
__pException.SuppressRelease(); \
PAL_CPP_RETHROW; \
} \
// Define a copy of GET_EXCEPTION() that will not be redefined by clrex.h
#define GET_EXCEPTION() (__pException == NULL ? &__defaultException : __pException.GetValue())
#define EXTRACT_EXCEPTION() (__pException.Extract())
//==============================================================================
// High-level macros for common uses of EX_TRY. Try using these rather
// than the raw EX_TRY constructs.
//==============================================================================
//===================================================================================
// Macro for converting exceptions into HR internally. Unlike EX_CATCH_HRESULT,
// it does not set up IErrorInfo on the current thread.
//
// Usage:
//
// HRESULT hr = S_OK;
// EX_TRY
// <do managed stuff>
// EX_CATCH_HRESULT_NO_ERRORINFO(hr);
// return hr;
//
// Comments:
// Since IErrorInfo is not set up, this does not require COM interop to be started.
//===================================================================================
#define EX_CATCH_HRESULT_NO_ERRORINFO(_hr) \
EX_CATCH \
{ \
(_hr) = GET_EXCEPTION()->GetHR(); \
_ASSERTE(FAILED(_hr)); \
} \
EX_END_CATCH(SwallowAllExceptions)
//===================================================================================
// Macro for catching managed exception object.
//
// Usage:
//
// OBJECTREF pThrowable = NULL;
// EX_TRY
// <do managed stuff>
// EX_CATCH_THROWABLE(&pThrowable);
//
//===================================================================================
#define EX_CATCH_THROWABLE(ppThrowable) \
EX_CATCH \
{ \
*ppThrowable = GET_THROWABLE(); \
} \
EX_END_CATCH(SwallowAllExceptions)
#ifdef FEATURE_COMINTEROP
//===================================================================================
// Macro for defining external entrypoints such as COM interop boundaries.
// The boundary will catch all exceptions (including terminals) and convert
// them into HR/IErrorInfo pairs as appropriate.
//
// Usage:
//
// HRESULT hr = S_OK;
// EX_TRY
// <do managed stuff>
// EX_CATCH_HRESULT(hr);
// return hr;
//
// Comments:
// Note that IErrorInfo will automatically be set up on the thread if appropriate.
//===================================================================================
#define EX_CATCH_HRESULT(_hr) \
EX_CATCH \
{ \
(_hr) = GET_EXCEPTION()->GetHR(); \
_ASSERTE(FAILED(_hr)); \
IErrorInfo *pErr = GET_EXCEPTION()->GetErrorInfo(); \
if (pErr != NULL) \
{ \
SetErrorInfo(0, pErr); \
pErr->Release(); \
} \
} \
EX_END_CATCH(SwallowAllExceptions)
//===================================================================================
// Macro to make conditional catching more succinct.
//
// Usage:
//
// EX_TRY
// ...
// EX_CATCH_HRESULT_IF(IsHRESULTForExceptionKind(GET_EXCEPTION()->GetHR(), kFileNotFoundException));
//===================================================================================
#define EX_CATCH_HRESULT_IF(HR, ...) \
EX_CATCH \
{ \
(HR) = GET_EXCEPTION()->GetHR(); \
\
/* Rethrow if condition is false. */ \
if (!(__VA_ARGS__)) \
EX_RETHROW; \
\
_ASSERTE(FAILED(HR)); \
IErrorInfo *pErr = GET_EXCEPTION()->GetErrorInfo(); \
if (pErr != NULL) \
{ \
SetErrorInfo(0, pErr); \
pErr->Release(); \
} \
} \
EX_END_CATCH(SwallowAllExceptions)
#else // FEATURE_COMINTEROP
#define EX_CATCH_HRESULT(_hr) EX_CATCH_HRESULT_NO_ERRORINFO(_hr)
#endif // FEATURE_COMINTEROP
//===================================================================================
// Macro for containing normal exceptions but letting terminal exceptions continue to propagate.
//
// Usage:
//
// EX_TRY
// {
// ...your stuff...
// }
// EX_SWALLOW_NONTERMINAL
//
// Remember, terminal exceptions (such as ThreadAbort) will still throw out of this
// block. So don't use this as a substitute for exception-safe cleanup!
//===================================================================================
#define EX_SWALLOW_NONTERMINAL \
EX_CATCH \
{ \
} \
EX_END_CATCH(RethrowTerminalExceptions) \
//===================================================================================
// Macro for containing normal exceptions but letting transient exceptions continue to propagate.
//
// Usage:
//
// EX_TRY
// {
// ...your stuff...
// }
// EX_SWALLOW_NONTRANSIENT
//
// Terminal exceptions (such as ThreadAbort and OutOfMemory) will still throw out of this
// block. So don't use this as a substitute for exception-safe cleanup!
//===================================================================================
#define EX_SWALLOW_NONTRANSIENT \
EX_CATCH \
{ \
} \
EX_END_CATCH(RethrowTransientExceptions) \
//===================================================================================
// Macro for observing or wrapping exceptions in flight.
//
// Usage:
//
// EX_TRY
// {
// ... your stuff ...
// }
// EX_HOOK
// {
// ... your stuff ...
// }
// EX_END_HOOK
// ... control will never get here ...
//
//
// EX_HOOK is like EX_CATCH except that you can't prevent the
// exception from being rethrown. You can throw a new exception inside the hook
// (for example, if you want to wrap the exception in flight with your own).
// But if control reaches the end of the hook, the original exception gets rethrown.
//
// Avoid using EX_HOOK for conditional backout if a destructor-based holder
// will suffice. Because these macros are implemented on top of SEH, using them will
// prevent the use of holders anywhere else inside the same function. That is, instead
// of saying this:
//
// EX_TRY // DON'T DO THIS
// {
// thing = new Thing();
// blah
// }
// EX_HOOK
// {
// delete thing; // if it failed, we don't want to keep the Thing.
// }
// EX_END_HOOK
//
// do this:
//
// Holder<Thing> thing = new Thing(); //DO THIS INSTEAD
// blah
// // If we got here, we succeeded. So tell holder we want to keep the thing.
// thing.SuppressRelease();
//
// We won't rethrow the exception if it is a Stack Overflow exception. Instead, we'll throw a new
// exception. This will allow the stack to unwind point, and so we won't be jeopardizing a
// second stack overflow.
//===================================================================================
#define EX_HOOK \
EX_CATCH \
{ \
#define EX_END_HOOK \
} \
ANNOTATION_HANDLER_END; \
EX_RETHROW; \
EX_END_CATCH_FOR_HOOK; \
}
// ---------------------------------------------------------------------------
// Inline implementations. Pay no attention to that man behind the curtain.
// ---------------------------------------------------------------------------
inline Exception::HandlerState::HandlerState()
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_CANNOT_TAKE_LOCK;
STATIC_CONTRACT_SUPPORTS_DAC;
m_dwFlags = 0;
m_pExceptionPtr = NULL;
#if defined(STACK_GUARDS_DEBUG) && defined(ENABLE_CONTRACTS_IMPL)
// If we have a debug state, use its setting for SO tolerance. The default
// is SO-tolerant if we have no debug state. Can't probe w/o debug state and
// can't enter SO-interolant mode w/o probing.
GetClrDebugState();
#endif
}
inline void Exception::HandlerState::CleanupTry()
{
LIMITED_METHOD_DAC_CONTRACT;
}
inline void Exception::HandlerState::SetupCatch(INDEBUG_COMMA(_In_z_ const char * szFile) int lineNum)
{
WRAPPER_NO_CONTRACT;
/* don't embed file names in retail to save space and avoid IP */
/* a findstr /n will allow you to locate it in a pinch */
#ifdef _DEBUG
STRESS_LOG2(LF_EH, LL_INFO100, "EX_CATCH %s line %d\n", szFile, lineNum);
#else
STRESS_LOG1(LF_EH, LL_INFO100, "EX_CATCH line %d\n", lineNum);
#endif
SetCaught();
}
inline void Exception::HandlerState::SucceedCatch()
{
LIMITED_METHOD_DAC_CONTRACT;
}
inline HRException::HRException()
: m_hr(E_UNEXPECTED)
{
LIMITED_METHOD_CONTRACT;
SUPPORTS_DAC;
}
inline HRException::HRException(HRESULT hr)
: m_hr(hr)
{
LIMITED_METHOD_CONTRACT;
SUPPORTS_DAC;
// Catchers assume only failing hresults
_ASSERTE(FAILED(hr));
}
inline HRMsgException::HRMsgException()
: HRException()
{
LIMITED_METHOD_CONTRACT;
}
inline HRMsgException::HRMsgException(HRESULT hr, SString const &s)
: HRException(hr), m_msg(s)
{
WRAPPER_NO_CONTRACT;
}
inline COMException::COMException()
: HRException(),
m_pErrorInfo(NULL)
{
WRAPPER_NO_CONTRACT;
}
inline COMException::COMException(HRESULT hr)
: HRException(hr),
m_pErrorInfo(NULL)
{
LIMITED_METHOD_CONTRACT;
}
inline COMException::COMException(HRESULT hr, IErrorInfo *pErrorInfo)
: HRException(hr),
m_pErrorInfo(pErrorInfo)
{
LIMITED_METHOD_CONTRACT;
}
inline SEHException::SEHException()
{
LIMITED_METHOD_CONTRACT;
memset(&m_exception, 0, sizeof(EXCEPTION_RECORD));
}
inline SEHException::SEHException(EXCEPTION_RECORD *pointers, T_CONTEXT *pContext)
{
LIMITED_METHOD_CONTRACT;
memcpy(&m_exception, pointers, sizeof(EXCEPTION_RECORD));
}
// The exception throwing helpers are intentionally not inlined
// Exception throwing is a rare slow codepath that should be optimized for code size
void DECLSPEC_NORETURN ThrowHR(HRESULT hr);
void DECLSPEC_NORETURN ThrowHR(HRESULT hr, SString const &msg);
void DECLSPEC_NORETURN ThrowHR(HRESULT hr, UINT uText);
void DECLSPEC_NORETURN ThrowWin32(DWORD err);
void DECLSPEC_NORETURN ThrowLastError();
void DECLSPEC_NORETURN ThrowOutOfMemory();
void DECLSPEC_NORETURN ThrowStackOverflow();
#undef IfFailThrow
inline HRESULT IfFailThrow(HRESULT hr)
{
WRAPPER_NO_CONTRACT;
if (FAILED(hr))
{
ThrowHR(hr);
}
return hr;
}
inline HRESULT IfFailThrow(HRESULT hr, SString &msg)
{
WRAPPER_NO_CONTRACT;
if (FAILED(hr))
{
ThrowHR(hr, msg);
}
return hr;
}
inline HRESULT IfTransientFailThrow(HRESULT hr)
{
WRAPPER_NO_CONTRACT;
if (FAILED(hr) && Exception::IsTransient(hr))
{
ThrowHR(hr);
}
return hr;
}
// Set if fatal error (like stack overflow or out of memory) occurred in this process.
GVAL_DECL(HRESULT, g_hrFatalError);
#endif // _EX_H_
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#if !defined(_EX_H_)
#define _EX_H_
#ifdef HOST_UNIX
#define EX_TRY_HOLDER \
HardwareExceptionHolder \
NativeExceptionHolderCatchAll __exceptionHolder; \
__exceptionHolder.Push(); \
#else // HOST_UNIX
#define EX_TRY_HOLDER
#endif // HOST_UNIX
#include "sstring.h"
#include "crtwrap.h"
#include "winwrap.h"
#include "corerror.h"
#include "stresslog.h"
#include "staticcontract.h"
#include "entrypoints.h"
#if !defined(_DEBUG_IMPL) && defined(_DEBUG) && !defined(DACCESS_COMPILE)
#define _DEBUG_IMPL 1
#endif
//===========================================================================================
// These abstractions hide the difference between legacy desktop CLR's (that don't support
// side-by-side-inproc and rely on a fixed SEH code to identify managed exceptions) and
// new CLR's that support side-by-side inproc.
//
// The new CLR's use a different set of SEH codes to avoid conflicting with the legacy CLR's.
// In addition, to distinguish between EH's raised by different inproc instances of the CLR,
// the module handle of the owning CLR is stored in ExceptionRecord.ExceptionInformation[4].
//
// (Note: all existing SEH's use either only slot [0] or no slots at all. We are leaving
// slots [1] thru [3] open for future expansion.)
//===========================================================================================
// Is this exception code one of the special CLR-specific SEH codes that participate in the
// instance-tagging scheme?
BOOL IsInstanceTaggedSEHCode(DWORD dwExceptionCode);
// This set of overloads generates the NumberParameters and ExceptionInformation[] array to
// pass to RaiseException().
//
// Parameters:
// exceptionArgs: a fixed-size array of size INSTANCE_TAGGED_SEH_PARAM_ARRAY_SIZE.
// This will get filled in by this function. (The module handle goes
// in the last slot if this is a side-by-side-inproc enabled build.)
//
// exceptionArg1... up to four arguments that go in slots [0]..[3]. These depends
// the specific requirements of your exception code.
//
// Returns:
// The NumberParameters to pass to RaiseException().
//
// Basically, this is either INSTANCE_TAGGED_SEH_PARAM_ARRAY_SIZE or the count of your
// fixed arguments depending on whether this tagged-SEH-enabled build.
//
// This function is not permitted to fail.
#define INSTANCE_TAGGED_SEH_PARAM_ARRAY_SIZE 5
DWORD MarkAsThrownByUs(/*out*/ ULONG_PTR exceptionArgs[INSTANCE_TAGGED_SEH_PARAM_ARRAY_SIZE]);
DWORD MarkAsThrownByUs(/*out*/ ULONG_PTR exceptionArgs[INSTANCE_TAGGED_SEH_PARAM_ARRAY_SIZE], ULONG_PTR arg0);
// (the existing system can support more overloads up to 4 fixed arguments but we don't need them at this time.)
// Given an exception record, checks if it's exception code matches a specific exception code
// *and* whether it was tagged by the calling instance of the CLR.
//
// If this is a non-tagged-SEH-enabled build, it is blindly assumed to be tagged by the
// calling instance of the CLR.
BOOL WasThrownByUs(const EXCEPTION_RECORD *pcER, DWORD dwExceptionCode);
//-----------------------------------------------------------------------------------
// The following group wraps the basic abstracts specifically for EXCEPTION_COMPLUS.
//-----------------------------------------------------------------------------------
BOOL IsComPlusException(const EXCEPTION_RECORD *pcER);
VOID RaiseComPlusException();
//===========================================================================================
//===========================================================================================
//-------------------------------------------------------------------------------------------
// This routine will generate the most descriptive possible error message for an hresult.
// It will generate at minimum the hex value. It will also try to generate the symbolic name
// (E_POINTER) and the friendly description (from the message tables.)
//
// bNoGeekStuff suppresses hex HR codes. Use this sparingly as most error strings generated by the
// CLR are aimed at developers, not end-users.
//-------------------------------------------------------------------------------------------
void GetHRMsg(HRESULT hresult, SString &result, BOOL bNoGeekStuff = FALSE);
//-------------------------------------------------------------------------------------------
// Similar to GetHRMsg but phrased for top-level exception message.
//-------------------------------------------------------------------------------------------
void GenerateTopLevelHRExceptionMessage(HRESULT hresult, SString &result);
// ---------------------------------------------------------------------------
// We save current ExceptionPointers using VectoredExceptionHandler. The save data is only valid
// duing exception handling. GetCurrentExceptionPointers returns the saved data.
// ---------------------------------------------------------------------------
void GetCurrentExceptionPointers(PEXCEPTION_POINTERS pExceptionInfo DEBUG_ARG(bool checkExceptionRecordLocation));
// ---------------------------------------------------------------------------
// We save current ExceptionPointers using VectoredExceptionHandler. The save data is only valid
// duing exception handling. GetCurrentExceptionCode returns the current exception code.
// ---------------------------------------------------------------------------
DWORD GetCurrentExceptionCode();
// ---------------------------------------------------------------------------
// Standard exception hierarchy & infrastructure for library code & EE
// ---------------------------------------------------------------------------
// ---------------------------------------------------------------------------
// Exception class. Abstract root exception of our hierarchy.
// ---------------------------------------------------------------------------
class Exception;
class SEHException;
// Exception hierarchy:
/* GetInstanceType
Exception
|
|-> HRException Y
| |
| |-> HRMsgException
| |-> COMException
|
|-> SEHException Y
|
|-> DelegatingException Y
|
|-> OutOfMemoryException Y
|
|-> CLRException Y
|
|-> EEException Y
| |
| |-> EEMessageException
| |
| |-> EEResourceException
| |
| |-> EECOMException
| |
| |-> EEFieldException
| |
| |-> EEMethodException
| |
| |-> EEArgumentException
| |
| |-> EETypeLoadException
| |
| |-> EEFileLoadException
|
|-> ObjrefException Y
|
|-> CLRLastThrownObjectException Y
*/
class Exception
{
friend bool DebugIsEECxxExceptionPointer(void* pv);
private:
static const int c_type = 0x524f4f54; // 'ROOT'
static Exception * g_OOMException;
static Exception * g_SOException;
protected:
Exception *m_innerException;
public:
Exception() {LIMITED_METHOD_DAC_CONTRACT; m_innerException = NULL;}
virtual ~Exception() {LIMITED_METHOD_DAC_CONTRACT; if (m_innerException != NULL) Exception::Delete(m_innerException); }
virtual BOOL IsDomainBound() {return m_innerException!=NULL && m_innerException->IsDomainBound();} ;
virtual HRESULT GetHR() = 0;
virtual void GetMessage(SString &s);
virtual IErrorInfo *GetErrorInfo() { LIMITED_METHOD_CONTRACT; return NULL; }
virtual HRESULT SetErrorInfo() { LIMITED_METHOD_CONTRACT; return S_OK; }
void SetInnerException(Exception * pInnerException) { LIMITED_METHOD_CONTRACT; m_innerException = pInnerException; }
// Dynamic type query for catchers
static int GetType() { LIMITED_METHOD_CONTRACT; return c_type; }
// !!! If GetInstanceType is implemented, IsSameInstanceType should be implemented
virtual int GetInstanceType() = 0;
virtual BOOL IsType(int type) {LIMITED_METHOD_CONTRACT; return type == c_type; }
// This is used in CLRException::GetThrowable to detect if we are in a recursive situation.
virtual BOOL IsSameInstanceType(Exception *pException) = 0;
// Will create a new instance of the Exception. Note that this will
// be free of app domain or thread affinity. Not every type of exception
// can be cloned with full fidelity.
virtual Exception *Clone();
// DomainBoundClone is a specialized form of cloning which is guaranteed
// to provide full fidelity. However, the result is bound to the current
// app domain and should not be leaked.
Exception *DomainBoundClone();
class HandlerState
{
enum CaughtFlags
{
Caught = 1,
CaughtSO = 2,
CaughtCxx = 4,
};
DWORD m_dwFlags;
public:
Exception* m_pExceptionPtr;
HandlerState();
void CleanupTry();
void SetupCatch(INDEBUG_COMMA(_In_z_ const char * szFile) int lineNum);
void SucceedCatch();
BOOL DidCatch() { return (m_dwFlags & Caught); }
void SetCaught() { m_dwFlags |= Caught; }
BOOL DidCatchCxx() { return (m_dwFlags & CaughtCxx); }
void SetCaughtCxx() { m_dwFlags |= CaughtCxx; }
};
// Is this exception type considered "uncatchable"?
BOOL IsTerminal();
// Is this exception type considered "transient" (would a retry possibly succeed)?
BOOL IsTransient();
static BOOL IsTransient(HRESULT hr);
// Get an HRESULT's source representation, if known
static LPCSTR GetHRSymbolicName(HRESULT hr);
static Exception* GetOOMException();
// Preallocated exceptions: If there is a preallocated instance of some
// subclass of Exception, override this function and return a correct
// value. The default implementation returns constant FALSE
virtual BOOL IsPreallocatedException();
BOOL IsPreallocatedOOMException();
static void Delete(Exception* pvMemory);
protected:
// This virtual method must be implemented by any non abstract Exception
// derived class. It must allocate a NEW exception of the identical type and
// copy all the relevant fields from the current exception to the new one.
// It is NOT responsible however for copying the inner exception. This
// will be handled by the base Exception class.
virtual Exception *CloneHelper();
// This virtual method must be implemented by Exception subclasses whose
// DomainBoundClone behavior is different than their normal clone behavior.
// It must allocate a NEW exception of the identical type and
// copy all the relevant fields from the current exception to the new one.
// It is NOT responsible however for copying the inner exception. This
// will be handled by the base Exception class.
virtual Exception *DomainBoundCloneHelper() { return CloneHelper(); }
};
#if 1
inline void Exception__Delete(Exception* pvMemory)
{
Exception::Delete(pvMemory);
}
using ExceptionHolder = SpecializedWrapper<Exception, Exception__Delete>;
#else
//------------------------------------------------------------------------------
// class ExceptionHolder
//
// This is a very lightweight holder class for use inside the EX_TRY family
// of macros. It is based on the standard Holder classes, but has been
// highly specialized for this one function, so that extra code can be
// removed, and the resulting code can be simple enough for all of the
// non-exceptional-case code to be inlined.
class ExceptionHolder
{
private:
Exception *m_value;
BOOL m_acquired;
public:
FORCEINLINE ExceptionHolder(Exception *pException = NULL, BOOL take = TRUE)
: m_value(pException)
{
m_acquired = pException && take;
}
FORCEINLINE ~ExceptionHolder()
{
if (m_acquired)
{
Exception::Delete(m_value);
}
}
Exception* operator->() { return m_value; }
void operator=(Exception *p)
{
Release();
m_value = p;
Acquire();
}
BOOL IsNull() { return m_value == NULL; }
operator Exception*() { return m_value; }
Exception* GetValue() { return m_value; }
void SuppressRelease() { m_acquired = FALSE; }
private:
void Acquire()
{
_ASSERTE(!m_acquired);
if (!IsNull())
{
m_acquired = TRUE;
}
}
void Release()
{
if (m_acquired)
{
_ASSERTE(!IsNull());
Exception::Delete(m_value);
m_acquired = FALSE;
}
}
};
#endif
// ---------------------------------------------------------------------------
// HRException class. Implements exception API for exceptions generated from HRESULTs
// ---------------------------------------------------------------------------
class HRException : public Exception
{
friend bool DebugIsEECxxExceptionPointer(void* pv);
protected:
HRESULT m_hr;
public:
HRException();
HRException(HRESULT hr);
static const int c_type = 0x48522020; // 'HR '
// Dynamic type query for catchers
static int GetType() {LIMITED_METHOD_DAC_CONTRACT; return c_type; }
virtual int GetInstanceType() { LIMITED_METHOD_CONTRACT; return c_type; }
virtual BOOL IsType(int type) { WRAPPER_NO_CONTRACT; return type == c_type || Exception::IsType(type); }
// Virtual overrides
HRESULT GetHR();
BOOL IsSameInstanceType(Exception *pException)
{
WRAPPER_NO_CONTRACT;
return pException->GetInstanceType() == GetType() && pException->GetHR() == m_hr;
}
protected:
virtual Exception *CloneHelper()
{
WRAPPER_NO_CONTRACT;
return new HRException(m_hr);
}
};
// ---------------------------------------------------------------------------
// HRMessageException class. Implements exception API for exceptions
// generated from HRESULTs, and includes in info message.
// ---------------------------------------------------------------------------
class HRMsgException : public HRException
{
friend bool DebugIsEECxxExceptionPointer(void* pv);
protected:
SString m_msg;
public:
HRMsgException();
HRMsgException(HRESULT hr, SString const &msg);
// Virtual overrides
void GetMessage(SString &s);
protected:
virtual Exception *CloneHelper()
{
WRAPPER_NO_CONTRACT;
return new HRMsgException(m_hr, m_msg);
}
};
// ---------------------------------------------------------------------------
// COMException class. Implements exception API for standard COM-based error info
// ---------------------------------------------------------------------------
class COMException : public HRException
{
friend bool DebugIsEECxxExceptionPointer(void* pv);
private:
IErrorInfo *m_pErrorInfo;
public:
COMException();
COMException(HRESULT hr) ;
COMException(HRESULT hr, IErrorInfo *pErrorInfo);
~COMException();
// Virtual overrides
IErrorInfo *GetErrorInfo();
void GetMessage(SString &result);
protected:
virtual Exception *CloneHelper()
{
WRAPPER_NO_CONTRACT;
return new COMException(m_hr, m_pErrorInfo);
}
};
// ---------------------------------------------------------------------------
// SEHException class. Implements exception API for SEH exception info
// ---------------------------------------------------------------------------
class SEHException : public Exception
{
friend bool DebugIsEECxxExceptionPointer(void* pv);
public:
EXCEPTION_RECORD m_exception;
SEHException();
SEHException(EXCEPTION_RECORD *pRecord, T_CONTEXT *pContext = NULL);
static const int c_type = 0x53454820; // 'SEH '
// Dynamic type query for catchers
static int GetType() {LIMITED_METHOD_CONTRACT; return c_type; }
virtual int GetInstanceType() { LIMITED_METHOD_CONTRACT; return c_type; }
virtual BOOL IsType(int type) { WRAPPER_NO_CONTRACT; return type == c_type || Exception::IsType(type); }
BOOL IsSameInstanceType(Exception *pException)
{
WRAPPER_NO_CONTRACT;
return pException->GetInstanceType() == GetType() && pException->GetHR() == GetHR();
}
// Virtual overrides
HRESULT GetHR();
IErrorInfo *GetErrorInfo();
void GetMessage(SString &result);
protected:
virtual Exception *CloneHelper()
{
WRAPPER_NO_CONTRACT;
return new SEHException(&m_exception);
}
};
// ---------------------------------------------------------------------------
// DelegatingException class. Implements exception API for "foreign" exceptions.
// ---------------------------------------------------------------------------
class DelegatingException : public Exception
{
Exception *m_delegatedException;
Exception* GetDelegate();
enum {DELEGATE_NOT_YET_SET = -1};
bool IsDelegateSet() {LIMITED_METHOD_DAC_CONTRACT; return m_delegatedException != (Exception*)DELEGATE_NOT_YET_SET; }
bool IsDelegateValid() {LIMITED_METHOD_DAC_CONTRACT; return IsDelegateSet() && m_delegatedException != NULL; }
public:
DelegatingException();
~DelegatingException();
static const int c_type = 0x44454C20; // 'DEL '
// Dynamic type query for catchers
static int GetType() {LIMITED_METHOD_CONTRACT; return c_type; }
virtual int GetInstanceType() { LIMITED_METHOD_CONTRACT; return c_type; }
virtual BOOL IsType(int type) { WRAPPER_NO_CONTRACT; return type == c_type || Exception::IsType(type); }
BOOL IsSameInstanceType(Exception *pException)
{
WRAPPER_NO_CONTRACT;
return pException->GetInstanceType() == GetType() && pException->GetHR() == GetHR();
}
// Virtual overrides
virtual BOOL IsDomainBound() {return Exception::IsDomainBound() ||(m_delegatedException!=NULL && m_delegatedException->IsDomainBound());} ;
HRESULT GetHR();
IErrorInfo *GetErrorInfo();
void GetMessage(SString &result);
virtual Exception *Clone();
protected:
virtual Exception *CloneHelper()
{
WRAPPER_NO_CONTRACT;
return new DelegatingException();
}
};
//------------------------------------------------------------------------------
// class OutOfMemoryException
//
// While there could be any number of instances of this class, there is one
// special instance, the pre-allocated OOM exception. Storage for that
// instance is allocated in the image, so we can always obtain it, even
// in low memory situations.
// Note that, in fact, there is only one instance.
//------------------------------------------------------------------------------
class OutOfMemoryException : public Exception
{
private:
static const int c_type = 0x4F4F4D20; // 'OOM '
BOOL bIsPreallocated;
public:
OutOfMemoryException() : bIsPreallocated(FALSE) {}
OutOfMemoryException(BOOL b) : bIsPreallocated(b) {}
// Dynamic type query for catchers
static int GetType() {LIMITED_METHOD_CONTRACT; return c_type; }
virtual int GetInstanceType() { LIMITED_METHOD_CONTRACT; return c_type; }
BOOL IsType(int type) { WRAPPER_NO_CONTRACT; return type == c_type || Exception::IsType(type); }
BOOL IsSameInstanceType(Exception *pException)
{
WRAPPER_NO_CONTRACT;
return pException->GetInstanceType() == GetType();
}
HRESULT GetHR() {LIMITED_METHOD_DAC_CONTRACT; return E_OUTOFMEMORY; }
void GetMessage(SString &result) { WRAPPER_NO_CONTRACT; result.SetASCII("Out Of Memory"); }
virtual Exception *Clone();
virtual BOOL IsPreallocatedException() { return bIsPreallocated; }
};
template <typename STATETYPE>
class CAutoTryCleanup
{
public:
DEBUG_NOINLINE CAutoTryCleanup(STATETYPE& refState) :
m_refState(refState)
{
SCAN_SCOPE_BEGIN;
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_SUPPORTS_DAC;
#ifdef ENABLE_CONTRACTS_IMPL
// This is similar to ClrTryMarkerHolder. We're marking that its okay to throw on this thread now because
// we're within a try block. We fold this into here strictly for performance reasons... we have one
// stack-allocated object do the work.
m_pClrDebugState = GetClrDebugState();
m_oldOkayToThrowValue = m_pClrDebugState->IsOkToThrow();
m_pClrDebugState->SetOkToThrow();
#endif
}
DEBUG_NOINLINE ~CAutoTryCleanup()
{
SCAN_SCOPE_END;
WRAPPER_NO_CONTRACT;
m_refState.CleanupTry();
#ifdef ENABLE_CONTRACTS_IMPL
// Restore the original OkayToThrow value since we're leaving the try block.
m_pClrDebugState->SetOkToThrow( m_oldOkayToThrowValue );
#endif // ENABLE_CONTRACTS_IMPL
}
protected:
STATETYPE& m_refState;
#ifdef ENABLE_CONTRACTS_DATA
private:
BOOL m_oldOkayToThrowValue;
ClrDebugState *m_pClrDebugState;
#endif
};
// ---------------------------------------------------------------------------
// Throw/Catch macros
//
// Usage:
//
// EX_TRY
// {
// EX_THROW(HRException, (E_FAIL));
// }
// EX_CATCH
// {
// Exception *e = GET_EXCEPTION();
// EX_RETHROW;
// }
// EX_END_CATCH(RethrowTerminalExceptions, RethrowTransientExceptions or SwallowAllExceptions)
//
// ---------------------------------------------------------------------------
// ---------------------------------------------------------------------------
// #NO_HOST_CPP_EH_ONLY
//
// The EX_CATCH* macros defined below can work one of two ways:
// 1. They catch all exceptions, both C++ and SEH exceptions.
// 2. They catch only C++ exceptions.
//
// Which way they are defined depends on what sort of handling of SEH
// exceptions, like AV's, you wish to have in your DLL. In general we
// do not typically want to catch and swallow AV's.
//
// By default, the macros catch all exceptions. This is how they work when
// compiled into the primary runtime DLL (clr.dll). This is reasonable for
// the CLR becuase it needs to also catch managed exceptions, which are SEH
// exceptions, and because that DLL also includes a vectored exception
// handler that will take down the process on any AV within clr.dll.
//
// But for uses of these macros outside of the CLR DLL there are other
// possibilities. If a DLL only uses facilities in Utilcode that throw the
// C++ exceptions defined above, and never needs to catch a managed exception,
// then that DLL should setup the macros to only catch C++ exceptions. That
// way, AV's are not accidentally swallowed and hidden.
//
// On the other hand, if a DLL needs to catch managed exceptions, then it has
// no choice but to also catch all SEH exceptions, including AV's. In that case
// the DLL should also include a vectored handler, like CLR.dll, to take the
// process down on an AV.
//
// The behavior difference is controled by NO_HOST_CPP_EH_ONLY. When defined,
// the EX_CATCH* macros only catch C++ exceptions. When not defined, they catch
// C++ and SEH exceptions.
//
// Note: use of NO_HOST_CPP_EH_ONLY is only valid outside the primary CLR DLLs.
// Thus it is an error to attempt to define it without also defining SELF_NO_HOST.
// ---------------------------------------------------------------------------
#if defined(NO_HOST_CPP_EH_ONLY) && !defined(SELF_NO_HOST)
#error It is incorrect to attempt to have C++-only EH macros when hosted. This is only valid for components outside the runtime DLLs.
#endif
//-----------------------------------------------------------------------
// EX_END_CATCH has a mandatory argument which is one of "RethrowTerminalExceptions",
// "RethrowTransientExceptions", or "SwallowAllExceptions".
//
// If an exception is considered "terminal" (e->IsTerminal()), it should normally
// be allowed to proceed. Hence, most of the time, you should use RethrowTerminalExceptions.
//
// In some cases you will want transient exceptions (terminal plus things like
// resource exhaustion) to proceed as well. Use RethrowTransientExceptions for this cas.
//
// If you have a good reason to use SwallowAllExceptions, (e.g. a hard COM interop boundary)
// use one of the higher level macros for this if available, or consider developing one.
// Otherwise, clearly document why you're swallowing terminal exceptions. Raw uses of
// SwallowAllExceptions will cause the cleanup police to come knocking on your door
// at some point.
//
// A lot of existing TRY's swallow terminals right now simply because there is
// backout code following the END_CATCH that has to be executed. The solution is
// to replace that backout code with holder objects.
//-----------------------------------------------------------------------
#define RethrowTransientExceptions \
if (GET_EXCEPTION()->IsTransient()) \
{ \
EX_RETHROW; \
} \
#define SwallowAllExceptions ;
// When applied to EX_END_CATCH, this policy will always rethrow Terminal exceptions if they are
// encountered.
#define RethrowTerminalExceptions \
if (GET_EXCEPTION()->IsTerminal()) \
{ \
STATIC_CONTRACT_THROWS_TERMINAL; \
EX_RETHROW; \
} \
// Special define to be used in EEStartup that will also check for VM initialization before
// commencing on a path that may use the managed thread object.
#define RethrowTerminalExceptionsWithInitCheck \
if ((g_fEEStarted == TRUE) && (GetThreadNULLOk() != NULL)) \
{ \
RethrowTerminalExceptions \
}
#ifdef _DEBUG
void ExThrowTrap(const char *fcn, const char *file, int line, const char *szType, HRESULT hr, const char *args);
#define EX_THROW_DEBUG_TRAP(fcn, file, line, szType, hr, args) ExThrowTrap(fcn, file, line, szType, hr, args)
#else
#define EX_THROW_DEBUG_TRAP(fcn, file, line, szType, hr, args)
#endif
#define EX_THROW(_type, _args) \
{ \
FAULT_NOT_FATAL(); \
\
_type * ___pExForExThrow = new _type _args ; \
/* don't embed file names in retail to save space and avoid IP */ \
/* a findstr /n will allow you to locate it in a pinch */ \
STRESS_LOG3(LF_EH, LL_INFO100, "EX_THROW Type = 0x%x HR = 0x%x, " \
INDEBUG(__FILE__) " line %d\n", _type::GetType(), \
___pExForExThrow->GetHR(), __LINE__); \
EX_THROW_DEBUG_TRAP(__FUNCTION__, __FILE__, __LINE__, #_type, ___pExForExThrow->GetHR(), #_args); \
PAL_CPP_THROW(_type *, ___pExForExThrow); \
}
//--------------------------------------------------------------------------------
// Clones an exception into the current domain. Also handles special cases for
// OOM and other stuff. Making this a function so we don't inline all this logic
// every place we call EX_THROW_WITH_INNER.
//--------------------------------------------------------------------------------
Exception *ExThrowWithInnerHelper(Exception *inner);
// This macro will set the m_innerException into the newly created exception
// The passed in _type has to be derived from CLRException. You cannot put OOM
// as the inner exception. If we are throwing in OOM case, allocate more memory (this macro will clone)
// does not make any sense.
//
#define EX_THROW_WITH_INNER(_type, _args, _inner) \
{ \
FAULT_NOT_FATAL(); \
\
Exception *_inner2 = ExThrowWithInnerHelper(_inner); \
_type *___pExForExThrow = new _type _args ; \
___pExForExThrow->SetInnerException(_inner2); \
STRESS_LOG3(LF_EH, LL_INFO100, "EX_THROW_WITH_INNER Type = 0x%x HR = 0x%x, " \
INDEBUG(__FILE__) " line %d\n", _type::GetType(), \
___pExForExThrow->GetHR(), __LINE__); \
EX_THROW_DEBUG_TRAP(__FUNCTION__, __FILE__, __LINE__, #_type, ___pExForExThrow->GetHR(), #_args); \
PAL_CPP_THROW(_type *, ___pExForExThrow); \
}
//#define IsCLRException(ex) ((ex !=NULL) && ex->IsType(CLRException::GetType())
#define EX_TRY_IMPL EX_TRY_CUSTOM(Exception::HandlerState, , DelegatingException /* was SEHException*/)
#define EX_TRY_CPP_ONLY EX_TRY_CUSTOM_CPP_ONLY(Exception::HandlerState, , DelegatingException /* was SEHException*/)
#ifndef INCONTRACT
#ifdef ENABLE_CONTRACTS
#define INCONTRACT(x) x
#else
#define INCONTRACT(x)
#endif
#endif
#define EX_TRY_CUSTOM(STATETYPE, STATEARG, DEFAULT_EXCEPTION_TYPE) \
{ \
STATETYPE __state STATEARG; \
typedef DEFAULT_EXCEPTION_TYPE __defaultException_t; \
SCAN_EHMARKER(); \
PAL_CPP_TRY \
{ \
SCAN_EHMARKER_TRY(); \
SCAN_EHMARKER(); \
PAL_CPP_TRY \
{ \
SCAN_EHMARKER_TRY(); \
CAutoTryCleanup<STATETYPE> __autoCleanupTry(__state); \
/* prevent annotations from being dropped by optimizations in debug */ \
INDEBUG(static bool __alwayszero;) \
INDEBUG(VolatileLoad(&__alwayszero);) \
{ \
/* Disallow returns to make exception handling work. */ \
/* Some work is done after the catch, see EX_ENDTRY. */ \
DEBUG_ASSURE_NO_RETURN_BEGIN(EX_TRY) \
EX_TRY_HOLDER \
#define EX_CATCH_IMPL_EX(DerivedExceptionClass) \
DEBUG_ASSURE_NO_RETURN_END(EX_TRY) \
} \
SCAN_EHMARKER_END_TRY(); \
} \
PAL_CPP_CATCH_DERIVED (DerivedExceptionClass, __pExceptionRaw) \
{ \
SCAN_EHMARKER_CATCH(); \
__state.SetCaughtCxx(); \
__state.m_pExceptionPtr = __pExceptionRaw; \
SCAN_EHMARKER_END_CATCH(); \
SCAN_IGNORE_THROW_MARKER; \
PAL_CPP_RETHROW; \
} \
PAL_CPP_ENDTRY \
SCAN_EHMARKER_END_TRY(); \
} \
PAL_CPP_CATCH_ALL \
{ \
SCAN_EHMARKER_CATCH(); \
__defaultException_t __defaultException; \
CHECK::ResetAssert(); \
ExceptionHolder __pException(__state.m_pExceptionPtr); \
/* work around unreachable code warning */ \
if (true) { \
DEBUG_ASSURE_NO_RETURN_BEGIN(EX_CATCH) \
/* don't embed file names in retail to save space and avoid IP */ \
/* a findstr /n will allow you to locate it in a pinch */ \
__state.SetupCatch(INDEBUG_COMMA(__FILE__) __LINE__); \
#define EX_CATCH_IMPL EX_CATCH_IMPL_EX(Exception)
#define EX_TRY_CUSTOM_CPP_ONLY(STATETYPE, STATEARG, DEFAULT_EXCEPTION_TYPE) \
{ \
STATETYPE __state STATEARG; \
typedef DEFAULT_EXCEPTION_TYPE __defaultException_t; \
SCAN_EHMARKER(); \
PAL_CPP_TRY \
{ \
SCAN_EHMARKER_TRY(); \
CAutoTryCleanup<STATETYPE> __autoCleanupTry(__state); \
/* prevent annotations from being dropped by optimizations in debug */ \
INDEBUG(static bool __alwayszero;) \
INDEBUG(VolatileLoad(&__alwayszero);) \
{ \
/* Disallow returns to make exception handling work. */ \
/* Some work is done after the catch, see EX_ENDTRY. */ \
DEBUG_ASSURE_NO_RETURN_BEGIN(EX_TRY) \
#define EX_CATCH_IMPL_CPP_ONLY \
DEBUG_ASSURE_NO_RETURN_END(EX_TRY) \
} \
SCAN_EHMARKER_END_TRY(); \
} \
PAL_CPP_CATCH_DERIVED (Exception, __pExceptionRaw) \
{ \
SCAN_EHMARKER_CATCH(); \
__state.SetCaughtCxx(); \
__state.m_pExceptionPtr = __pExceptionRaw; \
SCAN_EHMARKER_END_CATCH(); \
SCAN_IGNORE_THROW_MARKER; \
__defaultException_t __defaultException; \
CHECK::ResetAssert(); \
ExceptionHolder __pException(__state.m_pExceptionPtr); \
/* work around unreachable code warning */ \
if (true) { \
DEBUG_ASSURE_NO_RETURN_BEGIN(EX_CATCH) \
/* don't embed file names in retail to save space and avoid IP */ \
/* a findstr /n will allow you to locate it in a pinch */ \
__state.SetupCatch(INDEBUG_COMMA(__FILE__) __LINE__); \
// Here we finally define the EX_CATCH* macros that will be used throughout the system.
// These can catch C++ and SEH exceptions, or just C++ exceptions.
// See code:NO_HOST_CPP_EH_ONLY for more details.
//
// Note: we make it illegal to use forms that are redundant with the basic EX_CATCH
// version. I.e., in the C++ & SEH version, EX_CATCH_CPP_AND_SEH is the same as EX_CATCH.
// Likewise, in the C++ only version, EX_CATCH_CPP_ONLY is redundant with EX_CATCH.
#ifndef NO_HOST_CPP_EH_ONLY
#define EX_TRY EX_TRY_IMPL
#define EX_CATCH EX_CATCH_IMPL
#define EX_CATCH_EX EX_CATCH_IMPL_EX
#define EX_CATCH_CPP_ONLY EX_CATCH_IMPL_CPP_ONLY
#define EX_CATCH_CPP_AND_SEH Dont_Use_EX_CATCH_CPP_AND_SEH
#else
#define EX_TRY EX_TRY_CPP_ONLY
#define EX_CATCH EX_CATCH_IMPL_CPP_ONLY
#define EX_CATCH_CPP_ONLY Dont_Use_EX_CATCH_CPP_ONLY
#define EX_CATCH_CPP_AND_SEH EX_CATCH_IMPL
// Note: at this time we don't have a use case for EX_CATCH_EX, and we do not have
// the C++-only version of the implementation available. Thus we disallow its use at this time.
// If a real use case arises then we should go ahead and enable this.
#define EX_CATCH_EX Dont_Use_EX_CATCH_EX
#endif
#define EX_END_CATCH_UNREACHABLE \
DEBUG_ASSURE_NO_RETURN_END(EX_CATCH) \
} \
SCAN_EHMARKER_END_CATCH(); \
UNREACHABLE(); \
} \
PAL_CPP_ENDTRY \
} \
// "terminalexceptionpolicy" must be one of "RethrowTerminalExceptions",
// "RethrowTransientExceptions", or "SwallowAllExceptions"
#define EX_END_CATCH(terminalexceptionpolicy) \
terminalexceptionpolicy; \
__state.SucceedCatch(); \
DEBUG_ASSURE_NO_RETURN_END(EX_CATCH) \
} \
SCAN_EHMARKER_END_CATCH(); \
} \
EX_ENDTRY \
} \
#define EX_END_CATCH_FOR_HOOK \
__state.SucceedCatch(); \
DEBUG_ASSURE_NO_RETURN_END(EX_CATCH) \
ANNOTATION_HANDLER_END; \
} \
SCAN_EHMARKER_END_CATCH(); \
} \
EX_ENDTRY
#define EX_ENDTRY \
PAL_CPP_ENDTRY
#define EX_RETHROW \
{ \
__pException.SuppressRelease(); \
PAL_CPP_RETHROW; \
} \
// Define a copy of GET_EXCEPTION() that will not be redefined by clrex.h
#define GET_EXCEPTION() (__pException == NULL ? &__defaultException : __pException.GetValue())
#define EXTRACT_EXCEPTION() (__pException.Extract())
//==============================================================================
// High-level macros for common uses of EX_TRY. Try using these rather
// than the raw EX_TRY constructs.
//==============================================================================
//===================================================================================
// Macro for converting exceptions into HR internally. Unlike EX_CATCH_HRESULT,
// it does not set up IErrorInfo on the current thread.
//
// Usage:
//
// HRESULT hr = S_OK;
// EX_TRY
// <do managed stuff>
// EX_CATCH_HRESULT_NO_ERRORINFO(hr);
// return hr;
//
// Comments:
// Since IErrorInfo is not set up, this does not require COM interop to be started.
//===================================================================================
#define EX_CATCH_HRESULT_NO_ERRORINFO(_hr) \
EX_CATCH \
{ \
(_hr) = GET_EXCEPTION()->GetHR(); \
_ASSERTE(FAILED(_hr)); \
} \
EX_END_CATCH(SwallowAllExceptions)
//===================================================================================
// Macro for catching managed exception object.
//
// Usage:
//
// OBJECTREF pThrowable = NULL;
// EX_TRY
// <do managed stuff>
// EX_CATCH_THROWABLE(&pThrowable);
//
//===================================================================================
#define EX_CATCH_THROWABLE(ppThrowable) \
EX_CATCH \
{ \
*ppThrowable = GET_THROWABLE(); \
} \
EX_END_CATCH(SwallowAllExceptions)
#ifdef FEATURE_COMINTEROP
//===================================================================================
// Macro for defining external entrypoints such as COM interop boundaries.
// The boundary will catch all exceptions (including terminals) and convert
// them into HR/IErrorInfo pairs as appropriate.
//
// Usage:
//
// HRESULT hr = S_OK;
// EX_TRY
// <do managed stuff>
// EX_CATCH_HRESULT(hr);
// return hr;
//
// Comments:
// Note that IErrorInfo will automatically be set up on the thread if appropriate.
//===================================================================================
#define EX_CATCH_HRESULT(_hr) \
EX_CATCH \
{ \
(_hr) = GET_EXCEPTION()->GetHR(); \
_ASSERTE(FAILED(_hr)); \
IErrorInfo *pErr = GET_EXCEPTION()->GetErrorInfo(); \
if (pErr != NULL) \
{ \
SetErrorInfo(0, pErr); \
pErr->Release(); \
} \
} \
EX_END_CATCH(SwallowAllExceptions)
//===================================================================================
// Macro to make conditional catching more succinct.
//
// Usage:
//
// EX_TRY
// ...
// EX_CATCH_HRESULT_IF(IsHRESULTForExceptionKind(GET_EXCEPTION()->GetHR(), kFileNotFoundException));
//===================================================================================
#define EX_CATCH_HRESULT_IF(HR, ...) \
EX_CATCH \
{ \
(HR) = GET_EXCEPTION()->GetHR(); \
\
/* Rethrow if condition is false. */ \
if (!(__VA_ARGS__)) \
EX_RETHROW; \
\
_ASSERTE(FAILED(HR)); \
IErrorInfo *pErr = GET_EXCEPTION()->GetErrorInfo(); \
if (pErr != NULL) \
{ \
SetErrorInfo(0, pErr); \
pErr->Release(); \
} \
} \
EX_END_CATCH(SwallowAllExceptions)
#else // FEATURE_COMINTEROP
#define EX_CATCH_HRESULT(_hr) EX_CATCH_HRESULT_NO_ERRORINFO(_hr)
#endif // FEATURE_COMINTEROP
//===================================================================================
// Macro for containing normal exceptions but letting terminal exceptions continue to propagate.
//
// Usage:
//
// EX_TRY
// {
// ...your stuff...
// }
// EX_SWALLOW_NONTERMINAL
//
// Remember, terminal exceptions (such as ThreadAbort) will still throw out of this
// block. So don't use this as a substitute for exception-safe cleanup!
//===================================================================================
#define EX_SWALLOW_NONTERMINAL \
EX_CATCH \
{ \
} \
EX_END_CATCH(RethrowTerminalExceptions) \
//===================================================================================
// Macro for containing normal exceptions but letting transient exceptions continue to propagate.
//
// Usage:
//
// EX_TRY
// {
// ...your stuff...
// }
// EX_SWALLOW_NONTRANSIENT
//
// Terminal exceptions (such as ThreadAbort and OutOfMemory) will still throw out of this
// block. So don't use this as a substitute for exception-safe cleanup!
//===================================================================================
#define EX_SWALLOW_NONTRANSIENT \
EX_CATCH \
{ \
} \
EX_END_CATCH(RethrowTransientExceptions) \
//===================================================================================
// Macro for observing or wrapping exceptions in flight.
//
// Usage:
//
// EX_TRY
// {
// ... your stuff ...
// }
// EX_HOOK
// {
// ... your stuff ...
// }
// EX_END_HOOK
// ... control will never get here ...
//
//
// EX_HOOK is like EX_CATCH except that you can't prevent the
// exception from being rethrown. You can throw a new exception inside the hook
// (for example, if you want to wrap the exception in flight with your own).
// But if control reaches the end of the hook, the original exception gets rethrown.
//
// Avoid using EX_HOOK for conditional backout if a destructor-based holder
// will suffice. Because these macros are implemented on top of SEH, using them will
// prevent the use of holders anywhere else inside the same function. That is, instead
// of saying this:
//
// EX_TRY // DON'T DO THIS
// {
// thing = new Thing();
// blah
// }
// EX_HOOK
// {
// delete thing; // if it failed, we don't want to keep the Thing.
// }
// EX_END_HOOK
//
// do this:
//
// Holder<Thing> thing = new Thing(); //DO THIS INSTEAD
// blah
// // If we got here, we succeeded. So tell holder we want to keep the thing.
// thing.SuppressRelease();
//
// We won't rethrow the exception if it is a Stack Overflow exception. Instead, we'll throw a new
// exception. This will allow the stack to unwind point, and so we won't be jeopardizing a
// second stack overflow.
//===================================================================================
#define EX_HOOK \
EX_CATCH \
{ \
#define EX_END_HOOK \
} \
ANNOTATION_HANDLER_END; \
EX_RETHROW; \
EX_END_CATCH_FOR_HOOK; \
}
// ---------------------------------------------------------------------------
// Inline implementations. Pay no attention to that man behind the curtain.
// ---------------------------------------------------------------------------
inline Exception::HandlerState::HandlerState()
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_CANNOT_TAKE_LOCK;
STATIC_CONTRACT_SUPPORTS_DAC;
m_dwFlags = 0;
m_pExceptionPtr = NULL;
#if defined(STACK_GUARDS_DEBUG) && defined(ENABLE_CONTRACTS_IMPL)
// If we have a debug state, use its setting for SO tolerance. The default
// is SO-tolerant if we have no debug state. Can't probe w/o debug state and
// can't enter SO-interolant mode w/o probing.
GetClrDebugState();
#endif
}
inline void Exception::HandlerState::CleanupTry()
{
LIMITED_METHOD_DAC_CONTRACT;
}
inline void Exception::HandlerState::SetupCatch(INDEBUG_COMMA(_In_z_ const char * szFile) int lineNum)
{
WRAPPER_NO_CONTRACT;
/* don't embed file names in retail to save space and avoid IP */
/* a findstr /n will allow you to locate it in a pinch */
#ifdef _DEBUG
STRESS_LOG2(LF_EH, LL_INFO100, "EX_CATCH %s line %d\n", szFile, lineNum);
#else
STRESS_LOG1(LF_EH, LL_INFO100, "EX_CATCH line %d\n", lineNum);
#endif
SetCaught();
}
inline void Exception::HandlerState::SucceedCatch()
{
LIMITED_METHOD_DAC_CONTRACT;
}
inline HRException::HRException()
: m_hr(E_UNEXPECTED)
{
LIMITED_METHOD_CONTRACT;
SUPPORTS_DAC;
}
inline HRException::HRException(HRESULT hr)
: m_hr(hr)
{
LIMITED_METHOD_CONTRACT;
SUPPORTS_DAC;
// Catchers assume only failing hresults
_ASSERTE(FAILED(hr));
}
inline HRMsgException::HRMsgException()
: HRException()
{
LIMITED_METHOD_CONTRACT;
}
inline HRMsgException::HRMsgException(HRESULT hr, SString const &s)
: HRException(hr), m_msg(s)
{
WRAPPER_NO_CONTRACT;
}
inline COMException::COMException()
: HRException(),
m_pErrorInfo(NULL)
{
WRAPPER_NO_CONTRACT;
}
inline COMException::COMException(HRESULT hr)
: HRException(hr),
m_pErrorInfo(NULL)
{
LIMITED_METHOD_CONTRACT;
}
inline COMException::COMException(HRESULT hr, IErrorInfo *pErrorInfo)
: HRException(hr),
m_pErrorInfo(pErrorInfo)
{
LIMITED_METHOD_CONTRACT;
}
inline SEHException::SEHException()
{
LIMITED_METHOD_CONTRACT;
memset(&m_exception, 0, sizeof(EXCEPTION_RECORD));
}
inline SEHException::SEHException(EXCEPTION_RECORD *pointers, T_CONTEXT *pContext)
{
LIMITED_METHOD_CONTRACT;
memcpy(&m_exception, pointers, sizeof(EXCEPTION_RECORD));
}
// The exception throwing helpers are intentionally not inlined
// Exception throwing is a rare slow codepath that should be optimized for code size
void DECLSPEC_NORETURN ThrowHR(HRESULT hr);
void DECLSPEC_NORETURN ThrowHR(HRESULT hr, SString const &msg);
void DECLSPEC_NORETURN ThrowHR(HRESULT hr, UINT uText);
void DECLSPEC_NORETURN ThrowWin32(DWORD err);
void DECLSPEC_NORETURN ThrowLastError();
void DECLSPEC_NORETURN ThrowOutOfMemory();
void DECLSPEC_NORETURN ThrowStackOverflow();
#undef IfFailThrow
inline HRESULT IfFailThrow(HRESULT hr)
{
WRAPPER_NO_CONTRACT;
if (FAILED(hr))
{
ThrowHR(hr);
}
return hr;
}
inline HRESULT IfFailThrow(HRESULT hr, SString &msg)
{
WRAPPER_NO_CONTRACT;
if (FAILED(hr))
{
ThrowHR(hr, msg);
}
return hr;
}
inline HRESULT IfTransientFailThrow(HRESULT hr)
{
WRAPPER_NO_CONTRACT;
if (FAILED(hr) && Exception::IsTransient(hr))
{
ThrowHR(hr);
}
return hr;
}
// Set if fatal error (like stack overflow or out of memory) occurred in this process.
GVAL_DECL(HRESULT, g_hrFatalError);
#endif // _EX_H_
| -1 |
dotnet/runtime | 65,924 | Fix libraries GCStress pipeline | Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| BruceForstall | "2022-02-27T04:30:37Z" | "2022-03-01T18:20:06Z" | dfe08b3bea9a1897562635307ab4bb70bc31fada | 79026a542a8364b6783c5d212bb1841ac2c873a9 | Fix libraries GCStress pipeline. Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| ./src/libraries/System.Text.Json/ref/System.Text.Json.Typeforwards.netcoreapp.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// The compiler emits a reference to the internal copy of this type in our non-NETCoreApp assembly
// so we must include a forward to be compatible with libraries compiled against non-NETCoreApp System.Text.Json
[assembly: System.Runtime.CompilerServices.TypeForwardedTo(typeof(System.Runtime.CompilerServices.IsExternalInit))]
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// The compiler emits a reference to the internal copy of this type in our non-NETCoreApp assembly
// so we must include a forward to be compatible with libraries compiled against non-NETCoreApp System.Text.Json
[assembly: System.Runtime.CompilerServices.TypeForwardedTo(typeof(System.Runtime.CompilerServices.IsExternalInit))]
| -1 |
dotnet/runtime | 65,924 | Fix libraries GCStress pipeline | Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| BruceForstall | "2022-02-27T04:30:37Z" | "2022-03-01T18:20:06Z" | dfe08b3bea9a1897562635307ab4bb70bc31fada | 79026a542a8364b6783c5d212bb1841ac2c873a9 | Fix libraries GCStress pipeline. Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| ./src/tests/Loader/classloader/TypeGeneratorTests/TypeGeneratorTest87/Generated87.il | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
.assembly extern mscorlib { .publickeytoken = (B7 7A 5C 56 19 34 E0 89 ) .ver 4:0:0:0 }
.assembly extern TestFramework { .publickeytoken = ( B0 3F 5F 7F 11 D5 0A 3A ) }
//TYPES IN FORWARDER ASSEMBLIES:
//TEST ASSEMBLY:
.assembly Generated87 { .hash algorithm 0x00008004 }
.assembly extern xunit.core {}
.class public BaseClass0
{
.method public hidebysig specialname rtspecialname instance void .ctor() cil managed {
ldarg.0
call instance void [mscorlib]System.Object::.ctor()
ret
}
}
.class public BaseClass1
extends BaseClass0
{
.method public hidebysig specialname rtspecialname instance void .ctor() cil managed {
ldarg.0
call instance void BaseClass0::.ctor()
ret
}
}
.class public sequential sealed MyStruct137`1<T0>
extends [mscorlib]System.ValueType
implements class IBase2`2<class BaseClass1,!T0>, class IBase1`1<class BaseClass1>
{
.pack 0
.size 1
.method public hidebysig virtual instance string Method7<M0>() cil managed noinlining {
ldstr "MyStruct137::Method7.1190<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot virtual instance string Method4() cil managed noinlining {
ldstr "MyStruct137::Method4.1191()"
ret
}
.method public hidebysig newslot virtual instance string 'IBase1<class BaseClass1>.Method4'() cil managed noinlining {
.override method instance string class IBase1`1<class BaseClass1>::Method4()
ldstr "MyStruct137::Method4.MI.1192()"
ret
}
.method public hidebysig virtual instance string Method5() cil managed noinlining {
ldstr "MyStruct137::Method5.1193()"
ret
}
.method public hidebysig newslot virtual instance string 'IBase1<class BaseClass1>.Method5'() cil managed noinlining {
.override method instance string class IBase1`1<class BaseClass1>::Method5()
ldstr "MyStruct137::Method5.MI.1194()"
ret
}
.method public hidebysig virtual instance string Method6<M0>() cil managed noinlining {
ldstr "MyStruct137::Method6.1195<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot virtual instance string 'IBase1<class BaseClass1>.Method6'<M0>() cil managed noinlining {
.override method instance string class IBase1`1<class BaseClass1>::Method6<[1]>()
ldstr "MyStruct137::Method6.MI.1196<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot instance string ClassMethod272() cil managed noinlining {
ldstr "MyStruct137::ClassMethod272.1197()"
ret
}
.method public hidebysig newslot instance string ClassMethod273() cil managed noinlining {
ldstr "MyStruct137::ClassMethod273.1198()"
ret
}
.method public hidebysig virtual instance bool Equals(object obj) cil managed { ldc.i4.0 ret }
.method public hidebysig virtual instance int32 GetHashCode() cil managed { ldc.i4.0 ret }
.method public hidebysig virtual instance string ToString() cil managed { ldstr "" ret }
}
.class interface public abstract IBase2`2<+T0, -T1>
{
.method public hidebysig newslot abstract virtual instance string Method7<M0>() cil managed { }
}
.class interface public abstract IBase1`1<+T0>
{
.method public hidebysig newslot abstract virtual instance string Method4() cil managed { }
.method public hidebysig newslot abstract virtual instance string Method5() cil managed { }
.method public hidebysig newslot abstract virtual instance string Method6<M0>() cil managed { }
}
.class public auto ansi beforefieldinit Generated87 {
.method static void M.BaseClass0<(BaseClass0)W>(!!W inst, string exp) cil managed {
.maxstack 5
.locals init (string[] actualResults)
ldc.i4.s 0
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.BaseClass0<(BaseClass0)W>(!!W inst, string exp)"
ldc.i4.s 0
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.BaseClass1<(BaseClass1)W>(!!W inst, string exp) cil managed {
.maxstack 5
.locals init (string[] actualResults)
ldc.i4.s 0
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.BaseClass1<(BaseClass1)W>(!!W inst, string exp)"
ldc.i4.s 0
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.T.T<T0,T1,(class IBase2`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.T.T<T0,T1,(class IBase2`2<!!T0,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<!!T0,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.A.T<T1,(class IBase2`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.A.T<T1,(class IBase2`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass0,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.A.A<(class IBase2`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.A.A<(class IBase2`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.A.B<(class IBase2`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.A.B<(class IBase2`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.B.T<T1,(class IBase2`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.B.T<T1,(class IBase2`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass1,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.B.A<(class IBase2`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.B.A<(class IBase2`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.B.B<(class IBase2`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.B.B<(class IBase2`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase1.T<T0,(class IBase1`1<!!T0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 8
.locals init (string[] actualResults)
ldc.i4.s 3
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase1.T<T0,(class IBase1`1<!!T0>)W>(!!W 'inst', string exp)"
ldc.i4.s 3
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<!!T0>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<!!T0>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<!!T0>::Method6<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase1.A<(class IBase1`1<class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 8
.locals init (string[] actualResults)
ldc.i4.s 3
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase1.A<(class IBase1`1<class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 3
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass0>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass0>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase1.B<(class IBase1`1<class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 8
.locals init (string[] actualResults)
ldc.i4.s 3
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase1.B<(class IBase1`1<class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 3
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.MyStruct137.T<T0,(valuetype MyStruct137`1<!!T0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 9
.locals init (string[] actualResults)
ldc.i4.s 4
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.MyStruct137.T<T0,(valuetype MyStruct137`1<!!T0>)W>(!!W 'inst', string exp)"
ldc.i4.s 4
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. valuetype MyStruct137`1<!!T0>
callvirt instance string class IBase2`2<class BaseClass1,!!T0>::Method7<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. valuetype MyStruct137`1<!!T0>
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. valuetype MyStruct137`1<!!T0>
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. valuetype MyStruct137`1<!!T0>
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.MyStruct137.A<(valuetype MyStruct137`1<class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 9
.locals init (string[] actualResults)
ldc.i4.s 4
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.MyStruct137.A<(valuetype MyStruct137`1<class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 4
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. valuetype MyStruct137`1<class BaseClass0>
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. valuetype MyStruct137`1<class BaseClass0>
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. valuetype MyStruct137`1<class BaseClass0>
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. valuetype MyStruct137`1<class BaseClass0>
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.MyStruct137.B<(valuetype MyStruct137`1<class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 9
.locals init (string[] actualResults)
ldc.i4.s 4
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.MyStruct137.B<(valuetype MyStruct137`1<class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 4
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. valuetype MyStruct137`1<class BaseClass1>
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. valuetype MyStruct137`1<class BaseClass1>
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. valuetype MyStruct137`1<class BaseClass1>
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. valuetype MyStruct137`1<class BaseClass1>
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method public hidebysig static void MethodCallingTest() cil managed
{
.maxstack 10
.locals init (object V_0)
ldstr "========================== Method Calling Test =========================="
call void [mscorlib]System.Console::WriteLine(string)
.locals init (valuetype MyStruct137`1<class BaseClass0> V_1)
ldloca V_1
initobj valuetype MyStruct137`1<class BaseClass0>
ldloca V_1
dup
call instance string valuetype MyStruct137`1<class BaseClass0>::Method7<object>()
ldstr "MyStruct137::Method7.1190<System.Object>()"
ldstr "valuetype MyStruct137`1<class BaseClass0> on type MyStruct137"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct137`1<class BaseClass0>::Method4()
ldstr "MyStruct137::Method4.1191()"
ldstr "valuetype MyStruct137`1<class BaseClass0> on type MyStruct137"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct137`1<class BaseClass0>::Method5()
ldstr "MyStruct137::Method5.1193()"
ldstr "valuetype MyStruct137`1<class BaseClass0> on type MyStruct137"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct137`1<class BaseClass0>::Method6<object>()
ldstr "MyStruct137::Method6.1195<System.Object>()"
ldstr "valuetype MyStruct137`1<class BaseClass0> on type MyStruct137"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct137`1<class BaseClass0>::ClassMethod272()
ldstr "MyStruct137::ClassMethod272.1197()"
ldstr "valuetype MyStruct137`1<class BaseClass0> on type MyStruct137"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct137`1<class BaseClass0>::ClassMethod273()
ldstr "MyStruct137::ClassMethod273.1198()"
ldstr "valuetype MyStruct137`1<class BaseClass0> on type MyStruct137"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup ldnull call instance bool valuetype MyStruct137`1<class BaseClass0>::Equals(object) pop
dup call instance int32 valuetype MyStruct137`1<class BaseClass0>::GetHashCode() pop
dup call instance string valuetype MyStruct137`1<class BaseClass0>::ToString() pop
pop
ldloc V_1
box valuetype MyStruct137`1<class BaseClass0>
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
ldstr "MyStruct137::Method7.1190<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct137`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc V_1
box valuetype MyStruct137`1<class BaseClass0>
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
ldstr "MyStruct137::Method4.MI.1192()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct137`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
ldstr "MyStruct137::Method5.MI.1194()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct137`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
ldstr "MyStruct137::Method6.MI.1196<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct137`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc V_1
box valuetype MyStruct137`1<class BaseClass0>
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
ldstr "MyStruct137::Method7.1190<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct137`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc V_1
box valuetype MyStruct137`1<class BaseClass0>
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
ldstr "MyStruct137::Method7.1190<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct137`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc V_1
box valuetype MyStruct137`1<class BaseClass0>
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "MyStruct137::Method7.1190<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct137`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc V_1
box valuetype MyStruct137`1<class BaseClass0>
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method4()
ldstr "MyStruct137::Method4.MI.1192()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct137`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method5()
ldstr "MyStruct137::Method5.MI.1194()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct137`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>()
ldstr "MyStruct137::Method6.MI.1196<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct137`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
.locals init (valuetype MyStruct137`1<class BaseClass1> V_2)
ldloca V_2
initobj valuetype MyStruct137`1<class BaseClass1>
ldloca V_2
dup
call instance string valuetype MyStruct137`1<class BaseClass1>::Method7<object>()
ldstr "MyStruct137::Method7.1190<System.Object>()"
ldstr "valuetype MyStruct137`1<class BaseClass1> on type MyStruct137"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct137`1<class BaseClass1>::Method4()
ldstr "MyStruct137::Method4.1191()"
ldstr "valuetype MyStruct137`1<class BaseClass1> on type MyStruct137"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct137`1<class BaseClass1>::Method5()
ldstr "MyStruct137::Method5.1193()"
ldstr "valuetype MyStruct137`1<class BaseClass1> on type MyStruct137"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct137`1<class BaseClass1>::Method6<object>()
ldstr "MyStruct137::Method6.1195<System.Object>()"
ldstr "valuetype MyStruct137`1<class BaseClass1> on type MyStruct137"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct137`1<class BaseClass1>::ClassMethod272()
ldstr "MyStruct137::ClassMethod272.1197()"
ldstr "valuetype MyStruct137`1<class BaseClass1> on type MyStruct137"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct137`1<class BaseClass1>::ClassMethod273()
ldstr "MyStruct137::ClassMethod273.1198()"
ldstr "valuetype MyStruct137`1<class BaseClass1> on type MyStruct137"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup ldnull call instance bool valuetype MyStruct137`1<class BaseClass1>::Equals(object) pop
dup call instance int32 valuetype MyStruct137`1<class BaseClass1>::GetHashCode() pop
dup call instance string valuetype MyStruct137`1<class BaseClass1>::ToString() pop
pop
ldloc V_2
box valuetype MyStruct137`1<class BaseClass1>
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "MyStruct137::Method7.1190<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct137`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc V_2
box valuetype MyStruct137`1<class BaseClass1>
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
ldstr "MyStruct137::Method4.MI.1192()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct137`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
ldstr "MyStruct137::Method5.MI.1194()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct137`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
ldstr "MyStruct137::Method6.MI.1196<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct137`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc V_2
box valuetype MyStruct137`1<class BaseClass1>
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
ldstr "MyStruct137::Method7.1190<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct137`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc V_2
box valuetype MyStruct137`1<class BaseClass1>
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method4()
ldstr "MyStruct137::Method4.MI.1192()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct137`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method5()
ldstr "MyStruct137::Method5.MI.1194()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct137`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>()
ldstr "MyStruct137::Method6.MI.1196<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct137`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static void ConstrainedCallsTest() cil managed
{
.maxstack 10
.locals init (object V_0)
ldstr "========================== Constrained Calls Test =========================="
call void [mscorlib]System.Console::WriteLine(string)
.locals init (valuetype MyStruct137`1<class BaseClass0> V_3)
ldloca V_3
initobj valuetype MyStruct137`1<class BaseClass0>
.try { ldloc V_3
ldstr "MyStruct137::Method7.1190<System.Object>()#"
call void Generated87::M.IBase2.T.T<class BaseClass1,class BaseClass0,valuetype MyStruct137`1<class BaseClass0>>(!!2,string) leave.s LV0
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV0} LV0:
.try { ldloc V_3
ldstr "MyStruct137::Method7.1190<System.Object>()#"
call void Generated87::M.IBase2.B.T<class BaseClass0,valuetype MyStruct137`1<class BaseClass0>>(!!1,string) leave.s LV1
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV1} LV1:
.try { ldloc V_3
ldstr "MyStruct137::Method7.1190<System.Object>()#"
call void Generated87::M.IBase2.B.A<valuetype MyStruct137`1<class BaseClass0>>(!!0,string) leave.s LV2
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV2} LV2:
.try { ldloc V_3
ldstr "MyStruct137::Method4.MI.1192()#MyStruct137::Method5.MI.1194()#MyStruct137::Method6.MI.1196<System.Object>()#"
call void Generated87::M.IBase1.T<class BaseClass1,valuetype MyStruct137`1<class BaseClass0>>(!!1,string) leave.s LV3
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV3} LV3:
.try { ldloc V_3
ldstr "MyStruct137::Method4.MI.1192()#MyStruct137::Method5.MI.1194()#MyStruct137::Method6.MI.1196<System.Object>()#"
call void Generated87::M.IBase1.B<valuetype MyStruct137`1<class BaseClass0>>(!!0,string) leave.s LV4
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV4} LV4:
.try { ldloc V_3
ldstr "MyStruct137::Method7.1190<System.Object>()#"
call void Generated87::M.IBase2.T.T<class BaseClass0,class BaseClass0,valuetype MyStruct137`1<class BaseClass0>>(!!2,string) leave.s LV5
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV5} LV5:
.try { ldloc V_3
ldstr "MyStruct137::Method7.1190<System.Object>()#"
call void Generated87::M.IBase2.A.T<class BaseClass0,valuetype MyStruct137`1<class BaseClass0>>(!!1,string) leave.s LV6
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV6} LV6:
.try { ldloc V_3
ldstr "MyStruct137::Method7.1190<System.Object>()#"
call void Generated87::M.IBase2.A.A<valuetype MyStruct137`1<class BaseClass0>>(!!0,string) leave.s LV7
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV7} LV7:
.try { ldloc V_3
ldstr "MyStruct137::Method7.1190<System.Object>()#"
call void Generated87::M.IBase2.T.T<class BaseClass0,class BaseClass1,valuetype MyStruct137`1<class BaseClass0>>(!!2,string) leave.s LV8
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV8} LV8:
.try { ldloc V_3
ldstr "MyStruct137::Method7.1190<System.Object>()#"
call void Generated87::M.IBase2.A.T<class BaseClass1,valuetype MyStruct137`1<class BaseClass0>>(!!1,string) leave.s LV9
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV9} LV9:
.try { ldloc V_3
ldstr "MyStruct137::Method7.1190<System.Object>()#"
call void Generated87::M.IBase2.A.B<valuetype MyStruct137`1<class BaseClass0>>(!!0,string) leave.s LV10
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV10} LV10:
.try { ldloc V_3
ldstr "MyStruct137::Method7.1190<System.Object>()#"
call void Generated87::M.IBase2.T.T<class BaseClass1,class BaseClass1,valuetype MyStruct137`1<class BaseClass0>>(!!2,string) leave.s LV11
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV11} LV11:
.try { ldloc V_3
ldstr "MyStruct137::Method7.1190<System.Object>()#"
call void Generated87::M.IBase2.B.T<class BaseClass1,valuetype MyStruct137`1<class BaseClass0>>(!!1,string) leave.s LV12
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV12} LV12:
.try { ldloc V_3
ldstr "MyStruct137::Method7.1190<System.Object>()#"
call void Generated87::M.IBase2.B.B<valuetype MyStruct137`1<class BaseClass0>>(!!0,string) leave.s LV13
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV13} LV13:
.try { ldloc V_3
ldstr "MyStruct137::Method4.MI.1192()#MyStruct137::Method5.MI.1194()#MyStruct137::Method6.MI.1196<System.Object>()#"
call void Generated87::M.IBase1.T<class BaseClass0,valuetype MyStruct137`1<class BaseClass0>>(!!1,string) leave.s LV14
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV14} LV14:
.try { ldloc V_3
ldstr "MyStruct137::Method4.MI.1192()#MyStruct137::Method5.MI.1194()#MyStruct137::Method6.MI.1196<System.Object>()#"
call void Generated87::M.IBase1.A<valuetype MyStruct137`1<class BaseClass0>>(!!0,string) leave.s LV15
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV15} LV15:
.locals init (valuetype MyStruct137`1<class BaseClass1> V_4)
ldloca V_4
initobj valuetype MyStruct137`1<class BaseClass1>
.try { ldloc V_4
ldstr "MyStruct137::Method7.1190<System.Object>()#"
call void Generated87::M.IBase2.T.T<class BaseClass1,class BaseClass1,valuetype MyStruct137`1<class BaseClass1>>(!!2,string) leave.s LV16
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV16} LV16:
.try { ldloc V_4
ldstr "MyStruct137::Method7.1190<System.Object>()#"
call void Generated87::M.IBase2.B.T<class BaseClass1,valuetype MyStruct137`1<class BaseClass1>>(!!1,string) leave.s LV17
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV17} LV17:
.try { ldloc V_4
ldstr "MyStruct137::Method7.1190<System.Object>()#"
call void Generated87::M.IBase2.B.B<valuetype MyStruct137`1<class BaseClass1>>(!!0,string) leave.s LV18
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV18} LV18:
.try { ldloc V_4
ldstr "MyStruct137::Method4.MI.1192()#MyStruct137::Method5.MI.1194()#MyStruct137::Method6.MI.1196<System.Object>()#"
call void Generated87::M.IBase1.T<class BaseClass1,valuetype MyStruct137`1<class BaseClass1>>(!!1,string) leave.s LV19
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV19} LV19:
.try { ldloc V_4
ldstr "MyStruct137::Method4.MI.1192()#MyStruct137::Method5.MI.1194()#MyStruct137::Method6.MI.1196<System.Object>()#"
call void Generated87::M.IBase1.B<valuetype MyStruct137`1<class BaseClass1>>(!!0,string) leave.s LV20
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV20} LV20:
.try { ldloc V_4
ldstr "MyStruct137::Method7.1190<System.Object>()#"
call void Generated87::M.IBase2.T.T<class BaseClass0,class BaseClass1,valuetype MyStruct137`1<class BaseClass1>>(!!2,string) leave.s LV21
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV21} LV21:
.try { ldloc V_4
ldstr "MyStruct137::Method7.1190<System.Object>()#"
call void Generated87::M.IBase2.A.T<class BaseClass1,valuetype MyStruct137`1<class BaseClass1>>(!!1,string) leave.s LV22
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV22} LV22:
.try { ldloc V_4
ldstr "MyStruct137::Method7.1190<System.Object>()#"
call void Generated87::M.IBase2.A.B<valuetype MyStruct137`1<class BaseClass1>>(!!0,string) leave.s LV23
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV23} LV23:
.try { ldloc V_4
ldstr "MyStruct137::Method4.MI.1192()#MyStruct137::Method5.MI.1194()#MyStruct137::Method6.MI.1196<System.Object>()#"
call void Generated87::M.IBase1.T<class BaseClass0,valuetype MyStruct137`1<class BaseClass1>>(!!1,string) leave.s LV24
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV24} LV24:
.try { ldloc V_4
ldstr "MyStruct137::Method4.MI.1192()#MyStruct137::Method5.MI.1194()#MyStruct137::Method6.MI.1196<System.Object>()#"
call void Generated87::M.IBase1.A<valuetype MyStruct137`1<class BaseClass1>>(!!0,string) leave.s LV25
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV25} LV25:
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static void StructConstrainedInterfaceCallsTest() cil managed
{
.maxstack 10
ldstr "===================== Struct Constrained Interface Calls Test ====================="
call void [mscorlib]System.Console::WriteLine(string)
.locals init (valuetype MyStruct137`1<class BaseClass0> V_5)
ldloca V_5
initobj valuetype MyStruct137`1<class BaseClass0>
.try { ldloc V_5
ldstr "MyStruct137::Method7.1190<System.Object>()#" +
"MyStruct137::Method4.MI.1192()#" +
"MyStruct137::Method5.MI.1194()#" +
"MyStruct137::Method6.MI.1196<System.Object>()#"
call void Generated87::M.MyStruct137.T<class BaseClass0,valuetype MyStruct137`1<class BaseClass0>>(!!1,string) leave.s LV0
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV0} LV0:
.try { ldloc V_5
ldstr "MyStruct137::Method7.1190<System.Object>()#" +
"MyStruct137::Method4.MI.1192()#" +
"MyStruct137::Method5.MI.1194()#" +
"MyStruct137::Method6.MI.1196<System.Object>()#"
call void Generated87::M.MyStruct137.A<valuetype MyStruct137`1<class BaseClass0>>(!!0,string) leave.s LV1
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV1} LV1:
.locals init (valuetype MyStruct137`1<class BaseClass1> V_6)
ldloca V_6
initobj valuetype MyStruct137`1<class BaseClass1>
.try { ldloc V_6
ldstr "MyStruct137::Method7.1190<System.Object>()#" +
"MyStruct137::Method4.MI.1192()#" +
"MyStruct137::Method5.MI.1194()#" +
"MyStruct137::Method6.MI.1196<System.Object>()#"
call void Generated87::M.MyStruct137.T<class BaseClass1,valuetype MyStruct137`1<class BaseClass1>>(!!1,string) leave.s LV2
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV2} LV2:
.try { ldloc V_6
ldstr "MyStruct137::Method7.1190<System.Object>()#" +
"MyStruct137::Method4.MI.1192()#" +
"MyStruct137::Method5.MI.1194()#" +
"MyStruct137::Method6.MI.1196<System.Object>()#"
call void Generated87::M.MyStruct137.B<valuetype MyStruct137`1<class BaseClass1>>(!!0,string) leave.s LV3
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV3} LV3:
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static void CalliTest() cil managed
{
.maxstack 10
.locals init (object V_0)
ldstr "========================== Method Calli Test =========================="
call void [mscorlib]System.Console::WriteLine(string)
.locals init (valuetype MyStruct137`1<class BaseClass0> V_7)
ldloca V_7
initobj valuetype MyStruct137`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldvirtftn instance string valuetype MyStruct137`1<class BaseClass0>::Method7<object>()
calli default string(object)
ldstr "MyStruct137::Method7.1190<System.Object>()"
ldstr "valuetype MyStruct137`1<class BaseClass0> on type valuetype MyStruct137`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldvirtftn instance string valuetype MyStruct137`1<class BaseClass0>::Method4()
calli default string(object)
ldstr "MyStruct137::Method4.1191()"
ldstr "valuetype MyStruct137`1<class BaseClass0> on type valuetype MyStruct137`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldvirtftn instance string valuetype MyStruct137`1<class BaseClass0>::Method5()
calli default string(object)
ldstr "MyStruct137::Method5.1193()"
ldstr "valuetype MyStruct137`1<class BaseClass0> on type valuetype MyStruct137`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldvirtftn instance string valuetype MyStruct137`1<class BaseClass0>::Method6<object>()
calli default string(object)
ldstr "MyStruct137::Method6.1195<System.Object>()"
ldstr "valuetype MyStruct137`1<class BaseClass0> on type valuetype MyStruct137`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldvirtftn instance string valuetype MyStruct137`1<class BaseClass0>::ClassMethod272()
calli default string(object)
ldstr "MyStruct137::ClassMethod272.1197()"
ldstr "valuetype MyStruct137`1<class BaseClass0> on type valuetype MyStruct137`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldvirtftn instance string valuetype MyStruct137`1<class BaseClass0>::ClassMethod273()
calli default string(object)
ldstr "MyStruct137::ClassMethod273.1198()"
ldstr "valuetype MyStruct137`1<class BaseClass0> on type valuetype MyStruct137`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7 box valuetype MyStruct137`1<class BaseClass0> ldnull
ldloc V_7 box valuetype MyStruct137`1<class BaseClass0>
ldvirtftn instance bool valuetype MyStruct137`1<class BaseClass0>::Equals(object) calli default bool(object,object) pop
ldloc V_7 box valuetype MyStruct137`1<class BaseClass0>
ldloc V_7 box valuetype MyStruct137`1<class BaseClass0>
ldvirtftn instance int32 valuetype MyStruct137`1<class BaseClass0>::GetHashCode() calli default int32(object) pop
ldloc V_7 box valuetype MyStruct137`1<class BaseClass0>
ldloc V_7 box valuetype MyStruct137`1<class BaseClass0>
ldvirtftn instance string valuetype MyStruct137`1<class BaseClass0>::ToString() calli default string(object) pop
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
calli default string(object)
ldstr "MyStruct137::Method7.1190<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct137`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4()
calli default string(object)
ldstr "MyStruct137::Method4.MI.1192()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct137`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5()
calli default string(object)
ldstr "MyStruct137::Method5.MI.1194()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct137`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>()
calli default string(object)
ldstr "MyStruct137::Method6.MI.1196<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct137`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
calli default string(object)
ldstr "MyStruct137::Method7.1190<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct137`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
calli default string(object)
ldstr "MyStruct137::Method7.1190<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct137`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(object)
ldstr "MyStruct137::Method7.1190<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct137`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4()
calli default string(object)
ldstr "MyStruct137::Method4.MI.1192()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct137`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5()
calli default string(object)
ldstr "MyStruct137::Method5.MI.1194()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct137`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>()
calli default string(object)
ldstr "MyStruct137::Method6.MI.1196<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct137`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
.locals init (valuetype MyStruct137`1<class BaseClass1> V_8)
ldloca V_8
initobj valuetype MyStruct137`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldvirtftn instance string valuetype MyStruct137`1<class BaseClass1>::Method7<object>()
calli default string(object)
ldstr "MyStruct137::Method7.1190<System.Object>()"
ldstr "valuetype MyStruct137`1<class BaseClass1> on type valuetype MyStruct137`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldvirtftn instance string valuetype MyStruct137`1<class BaseClass1>::Method4()
calli default string(object)
ldstr "MyStruct137::Method4.1191()"
ldstr "valuetype MyStruct137`1<class BaseClass1> on type valuetype MyStruct137`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldvirtftn instance string valuetype MyStruct137`1<class BaseClass1>::Method5()
calli default string(object)
ldstr "MyStruct137::Method5.1193()"
ldstr "valuetype MyStruct137`1<class BaseClass1> on type valuetype MyStruct137`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldvirtftn instance string valuetype MyStruct137`1<class BaseClass1>::Method6<object>()
calli default string(object)
ldstr "MyStruct137::Method6.1195<System.Object>()"
ldstr "valuetype MyStruct137`1<class BaseClass1> on type valuetype MyStruct137`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldvirtftn instance string valuetype MyStruct137`1<class BaseClass1>::ClassMethod272()
calli default string(object)
ldstr "MyStruct137::ClassMethod272.1197()"
ldstr "valuetype MyStruct137`1<class BaseClass1> on type valuetype MyStruct137`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldvirtftn instance string valuetype MyStruct137`1<class BaseClass1>::ClassMethod273()
calli default string(object)
ldstr "MyStruct137::ClassMethod273.1198()"
ldstr "valuetype MyStruct137`1<class BaseClass1> on type valuetype MyStruct137`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8 box valuetype MyStruct137`1<class BaseClass1> ldnull
ldloc V_8 box valuetype MyStruct137`1<class BaseClass1>
ldvirtftn instance bool valuetype MyStruct137`1<class BaseClass1>::Equals(object) calli default bool(object,object) pop
ldloc V_8 box valuetype MyStruct137`1<class BaseClass1>
ldloc V_8 box valuetype MyStruct137`1<class BaseClass1>
ldvirtftn instance int32 valuetype MyStruct137`1<class BaseClass1>::GetHashCode() calli default int32(object) pop
ldloc V_8 box valuetype MyStruct137`1<class BaseClass1>
ldloc V_8 box valuetype MyStruct137`1<class BaseClass1>
ldvirtftn instance string valuetype MyStruct137`1<class BaseClass1>::ToString() calli default string(object) pop
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(object)
ldstr "MyStruct137::Method7.1190<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct137`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4()
calli default string(object)
ldstr "MyStruct137::Method4.MI.1192()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct137`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5()
calli default string(object)
ldstr "MyStruct137::Method5.MI.1194()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct137`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>()
calli default string(object)
ldstr "MyStruct137::Method6.MI.1196<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct137`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
calli default string(object)
ldstr "MyStruct137::Method7.1190<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct137`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4()
calli default string(object)
ldstr "MyStruct137::Method4.MI.1192()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct137`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5()
calli default string(object)
ldstr "MyStruct137::Method5.MI.1194()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct137`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>()
calli default string(object)
ldstr "MyStruct137::Method6.MI.1196<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct137`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static int32 Main() cil managed
{
.custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = (
01 00 00 00
)
.entrypoint
.maxstack 10
call void Generated87::MethodCallingTest()
call void Generated87::ConstrainedCallsTest()
call void Generated87::StructConstrainedInterfaceCallsTest()
call void Generated87::CalliTest()
ldc.i4 100
ret
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
.assembly extern mscorlib { .publickeytoken = (B7 7A 5C 56 19 34 E0 89 ) .ver 4:0:0:0 }
.assembly extern TestFramework { .publickeytoken = ( B0 3F 5F 7F 11 D5 0A 3A ) }
//TYPES IN FORWARDER ASSEMBLIES:
//TEST ASSEMBLY:
.assembly Generated87 { .hash algorithm 0x00008004 }
.assembly extern xunit.core {}
.class public BaseClass0
{
.method public hidebysig specialname rtspecialname instance void .ctor() cil managed {
ldarg.0
call instance void [mscorlib]System.Object::.ctor()
ret
}
}
.class public BaseClass1
extends BaseClass0
{
.method public hidebysig specialname rtspecialname instance void .ctor() cil managed {
ldarg.0
call instance void BaseClass0::.ctor()
ret
}
}
.class public sequential sealed MyStruct137`1<T0>
extends [mscorlib]System.ValueType
implements class IBase2`2<class BaseClass1,!T0>, class IBase1`1<class BaseClass1>
{
.pack 0
.size 1
.method public hidebysig virtual instance string Method7<M0>() cil managed noinlining {
ldstr "MyStruct137::Method7.1190<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot virtual instance string Method4() cil managed noinlining {
ldstr "MyStruct137::Method4.1191()"
ret
}
.method public hidebysig newslot virtual instance string 'IBase1<class BaseClass1>.Method4'() cil managed noinlining {
.override method instance string class IBase1`1<class BaseClass1>::Method4()
ldstr "MyStruct137::Method4.MI.1192()"
ret
}
.method public hidebysig virtual instance string Method5() cil managed noinlining {
ldstr "MyStruct137::Method5.1193()"
ret
}
.method public hidebysig newslot virtual instance string 'IBase1<class BaseClass1>.Method5'() cil managed noinlining {
.override method instance string class IBase1`1<class BaseClass1>::Method5()
ldstr "MyStruct137::Method5.MI.1194()"
ret
}
.method public hidebysig virtual instance string Method6<M0>() cil managed noinlining {
ldstr "MyStruct137::Method6.1195<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot virtual instance string 'IBase1<class BaseClass1>.Method6'<M0>() cil managed noinlining {
.override method instance string class IBase1`1<class BaseClass1>::Method6<[1]>()
ldstr "MyStruct137::Method6.MI.1196<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot instance string ClassMethod272() cil managed noinlining {
ldstr "MyStruct137::ClassMethod272.1197()"
ret
}
.method public hidebysig newslot instance string ClassMethod273() cil managed noinlining {
ldstr "MyStruct137::ClassMethod273.1198()"
ret
}
.method public hidebysig virtual instance bool Equals(object obj) cil managed { ldc.i4.0 ret }
.method public hidebysig virtual instance int32 GetHashCode() cil managed { ldc.i4.0 ret }
.method public hidebysig virtual instance string ToString() cil managed { ldstr "" ret }
}
.class interface public abstract IBase2`2<+T0, -T1>
{
.method public hidebysig newslot abstract virtual instance string Method7<M0>() cil managed { }
}
.class interface public abstract IBase1`1<+T0>
{
.method public hidebysig newslot abstract virtual instance string Method4() cil managed { }
.method public hidebysig newslot abstract virtual instance string Method5() cil managed { }
.method public hidebysig newslot abstract virtual instance string Method6<M0>() cil managed { }
}
.class public auto ansi beforefieldinit Generated87 {
.method static void M.BaseClass0<(BaseClass0)W>(!!W inst, string exp) cil managed {
.maxstack 5
.locals init (string[] actualResults)
ldc.i4.s 0
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.BaseClass0<(BaseClass0)W>(!!W inst, string exp)"
ldc.i4.s 0
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.BaseClass1<(BaseClass1)W>(!!W inst, string exp) cil managed {
.maxstack 5
.locals init (string[] actualResults)
ldc.i4.s 0
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.BaseClass1<(BaseClass1)W>(!!W inst, string exp)"
ldc.i4.s 0
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.T.T<T0,T1,(class IBase2`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.T.T<T0,T1,(class IBase2`2<!!T0,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<!!T0,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.A.T<T1,(class IBase2`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.A.T<T1,(class IBase2`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass0,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.A.A<(class IBase2`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.A.A<(class IBase2`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.A.B<(class IBase2`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.A.B<(class IBase2`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.B.T<T1,(class IBase2`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.B.T<T1,(class IBase2`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass1,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.B.A<(class IBase2`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.B.A<(class IBase2`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.B.B<(class IBase2`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.B.B<(class IBase2`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase1.T<T0,(class IBase1`1<!!T0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 8
.locals init (string[] actualResults)
ldc.i4.s 3
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase1.T<T0,(class IBase1`1<!!T0>)W>(!!W 'inst', string exp)"
ldc.i4.s 3
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<!!T0>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<!!T0>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<!!T0>::Method6<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase1.A<(class IBase1`1<class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 8
.locals init (string[] actualResults)
ldc.i4.s 3
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase1.A<(class IBase1`1<class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 3
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass0>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass0>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase1.B<(class IBase1`1<class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 8
.locals init (string[] actualResults)
ldc.i4.s 3
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase1.B<(class IBase1`1<class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 3
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.MyStruct137.T<T0,(valuetype MyStruct137`1<!!T0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 9
.locals init (string[] actualResults)
ldc.i4.s 4
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.MyStruct137.T<T0,(valuetype MyStruct137`1<!!T0>)W>(!!W 'inst', string exp)"
ldc.i4.s 4
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. valuetype MyStruct137`1<!!T0>
callvirt instance string class IBase2`2<class BaseClass1,!!T0>::Method7<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. valuetype MyStruct137`1<!!T0>
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. valuetype MyStruct137`1<!!T0>
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. valuetype MyStruct137`1<!!T0>
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.MyStruct137.A<(valuetype MyStruct137`1<class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 9
.locals init (string[] actualResults)
ldc.i4.s 4
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.MyStruct137.A<(valuetype MyStruct137`1<class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 4
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. valuetype MyStruct137`1<class BaseClass0>
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. valuetype MyStruct137`1<class BaseClass0>
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. valuetype MyStruct137`1<class BaseClass0>
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. valuetype MyStruct137`1<class BaseClass0>
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.MyStruct137.B<(valuetype MyStruct137`1<class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 9
.locals init (string[] actualResults)
ldc.i4.s 4
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.MyStruct137.B<(valuetype MyStruct137`1<class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 4
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. valuetype MyStruct137`1<class BaseClass1>
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. valuetype MyStruct137`1<class BaseClass1>
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. valuetype MyStruct137`1<class BaseClass1>
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. valuetype MyStruct137`1<class BaseClass1>
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method public hidebysig static void MethodCallingTest() cil managed
{
.maxstack 10
.locals init (object V_0)
ldstr "========================== Method Calling Test =========================="
call void [mscorlib]System.Console::WriteLine(string)
.locals init (valuetype MyStruct137`1<class BaseClass0> V_1)
ldloca V_1
initobj valuetype MyStruct137`1<class BaseClass0>
ldloca V_1
dup
call instance string valuetype MyStruct137`1<class BaseClass0>::Method7<object>()
ldstr "MyStruct137::Method7.1190<System.Object>()"
ldstr "valuetype MyStruct137`1<class BaseClass0> on type MyStruct137"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct137`1<class BaseClass0>::Method4()
ldstr "MyStruct137::Method4.1191()"
ldstr "valuetype MyStruct137`1<class BaseClass0> on type MyStruct137"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct137`1<class BaseClass0>::Method5()
ldstr "MyStruct137::Method5.1193()"
ldstr "valuetype MyStruct137`1<class BaseClass0> on type MyStruct137"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct137`1<class BaseClass0>::Method6<object>()
ldstr "MyStruct137::Method6.1195<System.Object>()"
ldstr "valuetype MyStruct137`1<class BaseClass0> on type MyStruct137"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct137`1<class BaseClass0>::ClassMethod272()
ldstr "MyStruct137::ClassMethod272.1197()"
ldstr "valuetype MyStruct137`1<class BaseClass0> on type MyStruct137"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct137`1<class BaseClass0>::ClassMethod273()
ldstr "MyStruct137::ClassMethod273.1198()"
ldstr "valuetype MyStruct137`1<class BaseClass0> on type MyStruct137"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup ldnull call instance bool valuetype MyStruct137`1<class BaseClass0>::Equals(object) pop
dup call instance int32 valuetype MyStruct137`1<class BaseClass0>::GetHashCode() pop
dup call instance string valuetype MyStruct137`1<class BaseClass0>::ToString() pop
pop
ldloc V_1
box valuetype MyStruct137`1<class BaseClass0>
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
ldstr "MyStruct137::Method7.1190<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct137`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc V_1
box valuetype MyStruct137`1<class BaseClass0>
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
ldstr "MyStruct137::Method4.MI.1192()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct137`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
ldstr "MyStruct137::Method5.MI.1194()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct137`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
ldstr "MyStruct137::Method6.MI.1196<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct137`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc V_1
box valuetype MyStruct137`1<class BaseClass0>
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
ldstr "MyStruct137::Method7.1190<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct137`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc V_1
box valuetype MyStruct137`1<class BaseClass0>
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
ldstr "MyStruct137::Method7.1190<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct137`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc V_1
box valuetype MyStruct137`1<class BaseClass0>
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "MyStruct137::Method7.1190<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct137`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc V_1
box valuetype MyStruct137`1<class BaseClass0>
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method4()
ldstr "MyStruct137::Method4.MI.1192()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct137`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method5()
ldstr "MyStruct137::Method5.MI.1194()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct137`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>()
ldstr "MyStruct137::Method6.MI.1196<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct137`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
.locals init (valuetype MyStruct137`1<class BaseClass1> V_2)
ldloca V_2
initobj valuetype MyStruct137`1<class BaseClass1>
ldloca V_2
dup
call instance string valuetype MyStruct137`1<class BaseClass1>::Method7<object>()
ldstr "MyStruct137::Method7.1190<System.Object>()"
ldstr "valuetype MyStruct137`1<class BaseClass1> on type MyStruct137"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct137`1<class BaseClass1>::Method4()
ldstr "MyStruct137::Method4.1191()"
ldstr "valuetype MyStruct137`1<class BaseClass1> on type MyStruct137"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct137`1<class BaseClass1>::Method5()
ldstr "MyStruct137::Method5.1193()"
ldstr "valuetype MyStruct137`1<class BaseClass1> on type MyStruct137"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct137`1<class BaseClass1>::Method6<object>()
ldstr "MyStruct137::Method6.1195<System.Object>()"
ldstr "valuetype MyStruct137`1<class BaseClass1> on type MyStruct137"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct137`1<class BaseClass1>::ClassMethod272()
ldstr "MyStruct137::ClassMethod272.1197()"
ldstr "valuetype MyStruct137`1<class BaseClass1> on type MyStruct137"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct137`1<class BaseClass1>::ClassMethod273()
ldstr "MyStruct137::ClassMethod273.1198()"
ldstr "valuetype MyStruct137`1<class BaseClass1> on type MyStruct137"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup ldnull call instance bool valuetype MyStruct137`1<class BaseClass1>::Equals(object) pop
dup call instance int32 valuetype MyStruct137`1<class BaseClass1>::GetHashCode() pop
dup call instance string valuetype MyStruct137`1<class BaseClass1>::ToString() pop
pop
ldloc V_2
box valuetype MyStruct137`1<class BaseClass1>
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "MyStruct137::Method7.1190<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct137`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc V_2
box valuetype MyStruct137`1<class BaseClass1>
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
ldstr "MyStruct137::Method4.MI.1192()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct137`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
ldstr "MyStruct137::Method5.MI.1194()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct137`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
ldstr "MyStruct137::Method6.MI.1196<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct137`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc V_2
box valuetype MyStruct137`1<class BaseClass1>
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
ldstr "MyStruct137::Method7.1190<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct137`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc V_2
box valuetype MyStruct137`1<class BaseClass1>
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method4()
ldstr "MyStruct137::Method4.MI.1192()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct137`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method5()
ldstr "MyStruct137::Method5.MI.1194()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct137`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>()
ldstr "MyStruct137::Method6.MI.1196<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct137`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static void ConstrainedCallsTest() cil managed
{
.maxstack 10
.locals init (object V_0)
ldstr "========================== Constrained Calls Test =========================="
call void [mscorlib]System.Console::WriteLine(string)
.locals init (valuetype MyStruct137`1<class BaseClass0> V_3)
ldloca V_3
initobj valuetype MyStruct137`1<class BaseClass0>
.try { ldloc V_3
ldstr "MyStruct137::Method7.1190<System.Object>()#"
call void Generated87::M.IBase2.T.T<class BaseClass1,class BaseClass0,valuetype MyStruct137`1<class BaseClass0>>(!!2,string) leave.s LV0
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV0} LV0:
.try { ldloc V_3
ldstr "MyStruct137::Method7.1190<System.Object>()#"
call void Generated87::M.IBase2.B.T<class BaseClass0,valuetype MyStruct137`1<class BaseClass0>>(!!1,string) leave.s LV1
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV1} LV1:
.try { ldloc V_3
ldstr "MyStruct137::Method7.1190<System.Object>()#"
call void Generated87::M.IBase2.B.A<valuetype MyStruct137`1<class BaseClass0>>(!!0,string) leave.s LV2
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV2} LV2:
.try { ldloc V_3
ldstr "MyStruct137::Method4.MI.1192()#MyStruct137::Method5.MI.1194()#MyStruct137::Method6.MI.1196<System.Object>()#"
call void Generated87::M.IBase1.T<class BaseClass1,valuetype MyStruct137`1<class BaseClass0>>(!!1,string) leave.s LV3
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV3} LV3:
.try { ldloc V_3
ldstr "MyStruct137::Method4.MI.1192()#MyStruct137::Method5.MI.1194()#MyStruct137::Method6.MI.1196<System.Object>()#"
call void Generated87::M.IBase1.B<valuetype MyStruct137`1<class BaseClass0>>(!!0,string) leave.s LV4
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV4} LV4:
.try { ldloc V_3
ldstr "MyStruct137::Method7.1190<System.Object>()#"
call void Generated87::M.IBase2.T.T<class BaseClass0,class BaseClass0,valuetype MyStruct137`1<class BaseClass0>>(!!2,string) leave.s LV5
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV5} LV5:
.try { ldloc V_3
ldstr "MyStruct137::Method7.1190<System.Object>()#"
call void Generated87::M.IBase2.A.T<class BaseClass0,valuetype MyStruct137`1<class BaseClass0>>(!!1,string) leave.s LV6
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV6} LV6:
.try { ldloc V_3
ldstr "MyStruct137::Method7.1190<System.Object>()#"
call void Generated87::M.IBase2.A.A<valuetype MyStruct137`1<class BaseClass0>>(!!0,string) leave.s LV7
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV7} LV7:
.try { ldloc V_3
ldstr "MyStruct137::Method7.1190<System.Object>()#"
call void Generated87::M.IBase2.T.T<class BaseClass0,class BaseClass1,valuetype MyStruct137`1<class BaseClass0>>(!!2,string) leave.s LV8
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV8} LV8:
.try { ldloc V_3
ldstr "MyStruct137::Method7.1190<System.Object>()#"
call void Generated87::M.IBase2.A.T<class BaseClass1,valuetype MyStruct137`1<class BaseClass0>>(!!1,string) leave.s LV9
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV9} LV9:
.try { ldloc V_3
ldstr "MyStruct137::Method7.1190<System.Object>()#"
call void Generated87::M.IBase2.A.B<valuetype MyStruct137`1<class BaseClass0>>(!!0,string) leave.s LV10
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV10} LV10:
.try { ldloc V_3
ldstr "MyStruct137::Method7.1190<System.Object>()#"
call void Generated87::M.IBase2.T.T<class BaseClass1,class BaseClass1,valuetype MyStruct137`1<class BaseClass0>>(!!2,string) leave.s LV11
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV11} LV11:
.try { ldloc V_3
ldstr "MyStruct137::Method7.1190<System.Object>()#"
call void Generated87::M.IBase2.B.T<class BaseClass1,valuetype MyStruct137`1<class BaseClass0>>(!!1,string) leave.s LV12
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV12} LV12:
.try { ldloc V_3
ldstr "MyStruct137::Method7.1190<System.Object>()#"
call void Generated87::M.IBase2.B.B<valuetype MyStruct137`1<class BaseClass0>>(!!0,string) leave.s LV13
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV13} LV13:
.try { ldloc V_3
ldstr "MyStruct137::Method4.MI.1192()#MyStruct137::Method5.MI.1194()#MyStruct137::Method6.MI.1196<System.Object>()#"
call void Generated87::M.IBase1.T<class BaseClass0,valuetype MyStruct137`1<class BaseClass0>>(!!1,string) leave.s LV14
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV14} LV14:
.try { ldloc V_3
ldstr "MyStruct137::Method4.MI.1192()#MyStruct137::Method5.MI.1194()#MyStruct137::Method6.MI.1196<System.Object>()#"
call void Generated87::M.IBase1.A<valuetype MyStruct137`1<class BaseClass0>>(!!0,string) leave.s LV15
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV15} LV15:
.locals init (valuetype MyStruct137`1<class BaseClass1> V_4)
ldloca V_4
initobj valuetype MyStruct137`1<class BaseClass1>
.try { ldloc V_4
ldstr "MyStruct137::Method7.1190<System.Object>()#"
call void Generated87::M.IBase2.T.T<class BaseClass1,class BaseClass1,valuetype MyStruct137`1<class BaseClass1>>(!!2,string) leave.s LV16
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV16} LV16:
.try { ldloc V_4
ldstr "MyStruct137::Method7.1190<System.Object>()#"
call void Generated87::M.IBase2.B.T<class BaseClass1,valuetype MyStruct137`1<class BaseClass1>>(!!1,string) leave.s LV17
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV17} LV17:
.try { ldloc V_4
ldstr "MyStruct137::Method7.1190<System.Object>()#"
call void Generated87::M.IBase2.B.B<valuetype MyStruct137`1<class BaseClass1>>(!!0,string) leave.s LV18
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV18} LV18:
.try { ldloc V_4
ldstr "MyStruct137::Method4.MI.1192()#MyStruct137::Method5.MI.1194()#MyStruct137::Method6.MI.1196<System.Object>()#"
call void Generated87::M.IBase1.T<class BaseClass1,valuetype MyStruct137`1<class BaseClass1>>(!!1,string) leave.s LV19
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV19} LV19:
.try { ldloc V_4
ldstr "MyStruct137::Method4.MI.1192()#MyStruct137::Method5.MI.1194()#MyStruct137::Method6.MI.1196<System.Object>()#"
call void Generated87::M.IBase1.B<valuetype MyStruct137`1<class BaseClass1>>(!!0,string) leave.s LV20
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV20} LV20:
.try { ldloc V_4
ldstr "MyStruct137::Method7.1190<System.Object>()#"
call void Generated87::M.IBase2.T.T<class BaseClass0,class BaseClass1,valuetype MyStruct137`1<class BaseClass1>>(!!2,string) leave.s LV21
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV21} LV21:
.try { ldloc V_4
ldstr "MyStruct137::Method7.1190<System.Object>()#"
call void Generated87::M.IBase2.A.T<class BaseClass1,valuetype MyStruct137`1<class BaseClass1>>(!!1,string) leave.s LV22
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV22} LV22:
.try { ldloc V_4
ldstr "MyStruct137::Method7.1190<System.Object>()#"
call void Generated87::M.IBase2.A.B<valuetype MyStruct137`1<class BaseClass1>>(!!0,string) leave.s LV23
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV23} LV23:
.try { ldloc V_4
ldstr "MyStruct137::Method4.MI.1192()#MyStruct137::Method5.MI.1194()#MyStruct137::Method6.MI.1196<System.Object>()#"
call void Generated87::M.IBase1.T<class BaseClass0,valuetype MyStruct137`1<class BaseClass1>>(!!1,string) leave.s LV24
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV24} LV24:
.try { ldloc V_4
ldstr "MyStruct137::Method4.MI.1192()#MyStruct137::Method5.MI.1194()#MyStruct137::Method6.MI.1196<System.Object>()#"
call void Generated87::M.IBase1.A<valuetype MyStruct137`1<class BaseClass1>>(!!0,string) leave.s LV25
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV25} LV25:
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static void StructConstrainedInterfaceCallsTest() cil managed
{
.maxstack 10
ldstr "===================== Struct Constrained Interface Calls Test ====================="
call void [mscorlib]System.Console::WriteLine(string)
.locals init (valuetype MyStruct137`1<class BaseClass0> V_5)
ldloca V_5
initobj valuetype MyStruct137`1<class BaseClass0>
.try { ldloc V_5
ldstr "MyStruct137::Method7.1190<System.Object>()#" +
"MyStruct137::Method4.MI.1192()#" +
"MyStruct137::Method5.MI.1194()#" +
"MyStruct137::Method6.MI.1196<System.Object>()#"
call void Generated87::M.MyStruct137.T<class BaseClass0,valuetype MyStruct137`1<class BaseClass0>>(!!1,string) leave.s LV0
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV0} LV0:
.try { ldloc V_5
ldstr "MyStruct137::Method7.1190<System.Object>()#" +
"MyStruct137::Method4.MI.1192()#" +
"MyStruct137::Method5.MI.1194()#" +
"MyStruct137::Method6.MI.1196<System.Object>()#"
call void Generated87::M.MyStruct137.A<valuetype MyStruct137`1<class BaseClass0>>(!!0,string) leave.s LV1
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV1} LV1:
.locals init (valuetype MyStruct137`1<class BaseClass1> V_6)
ldloca V_6
initobj valuetype MyStruct137`1<class BaseClass1>
.try { ldloc V_6
ldstr "MyStruct137::Method7.1190<System.Object>()#" +
"MyStruct137::Method4.MI.1192()#" +
"MyStruct137::Method5.MI.1194()#" +
"MyStruct137::Method6.MI.1196<System.Object>()#"
call void Generated87::M.MyStruct137.T<class BaseClass1,valuetype MyStruct137`1<class BaseClass1>>(!!1,string) leave.s LV2
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV2} LV2:
.try { ldloc V_6
ldstr "MyStruct137::Method7.1190<System.Object>()#" +
"MyStruct137::Method4.MI.1192()#" +
"MyStruct137::Method5.MI.1194()#" +
"MyStruct137::Method6.MI.1196<System.Object>()#"
call void Generated87::M.MyStruct137.B<valuetype MyStruct137`1<class BaseClass1>>(!!0,string) leave.s LV3
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV3} LV3:
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static void CalliTest() cil managed
{
.maxstack 10
.locals init (object V_0)
ldstr "========================== Method Calli Test =========================="
call void [mscorlib]System.Console::WriteLine(string)
.locals init (valuetype MyStruct137`1<class BaseClass0> V_7)
ldloca V_7
initobj valuetype MyStruct137`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldvirtftn instance string valuetype MyStruct137`1<class BaseClass0>::Method7<object>()
calli default string(object)
ldstr "MyStruct137::Method7.1190<System.Object>()"
ldstr "valuetype MyStruct137`1<class BaseClass0> on type valuetype MyStruct137`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldvirtftn instance string valuetype MyStruct137`1<class BaseClass0>::Method4()
calli default string(object)
ldstr "MyStruct137::Method4.1191()"
ldstr "valuetype MyStruct137`1<class BaseClass0> on type valuetype MyStruct137`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldvirtftn instance string valuetype MyStruct137`1<class BaseClass0>::Method5()
calli default string(object)
ldstr "MyStruct137::Method5.1193()"
ldstr "valuetype MyStruct137`1<class BaseClass0> on type valuetype MyStruct137`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldvirtftn instance string valuetype MyStruct137`1<class BaseClass0>::Method6<object>()
calli default string(object)
ldstr "MyStruct137::Method6.1195<System.Object>()"
ldstr "valuetype MyStruct137`1<class BaseClass0> on type valuetype MyStruct137`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldvirtftn instance string valuetype MyStruct137`1<class BaseClass0>::ClassMethod272()
calli default string(object)
ldstr "MyStruct137::ClassMethod272.1197()"
ldstr "valuetype MyStruct137`1<class BaseClass0> on type valuetype MyStruct137`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldvirtftn instance string valuetype MyStruct137`1<class BaseClass0>::ClassMethod273()
calli default string(object)
ldstr "MyStruct137::ClassMethod273.1198()"
ldstr "valuetype MyStruct137`1<class BaseClass0> on type valuetype MyStruct137`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7 box valuetype MyStruct137`1<class BaseClass0> ldnull
ldloc V_7 box valuetype MyStruct137`1<class BaseClass0>
ldvirtftn instance bool valuetype MyStruct137`1<class BaseClass0>::Equals(object) calli default bool(object,object) pop
ldloc V_7 box valuetype MyStruct137`1<class BaseClass0>
ldloc V_7 box valuetype MyStruct137`1<class BaseClass0>
ldvirtftn instance int32 valuetype MyStruct137`1<class BaseClass0>::GetHashCode() calli default int32(object) pop
ldloc V_7 box valuetype MyStruct137`1<class BaseClass0>
ldloc V_7 box valuetype MyStruct137`1<class BaseClass0>
ldvirtftn instance string valuetype MyStruct137`1<class BaseClass0>::ToString() calli default string(object) pop
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
calli default string(object)
ldstr "MyStruct137::Method7.1190<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct137`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4()
calli default string(object)
ldstr "MyStruct137::Method4.MI.1192()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct137`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5()
calli default string(object)
ldstr "MyStruct137::Method5.MI.1194()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct137`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>()
calli default string(object)
ldstr "MyStruct137::Method6.MI.1196<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct137`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
calli default string(object)
ldstr "MyStruct137::Method7.1190<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct137`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
calli default string(object)
ldstr "MyStruct137::Method7.1190<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct137`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(object)
ldstr "MyStruct137::Method7.1190<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct137`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4()
calli default string(object)
ldstr "MyStruct137::Method4.MI.1192()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct137`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5()
calli default string(object)
ldstr "MyStruct137::Method5.MI.1194()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct137`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct137`1<class BaseClass0>
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>()
calli default string(object)
ldstr "MyStruct137::Method6.MI.1196<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct137`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
.locals init (valuetype MyStruct137`1<class BaseClass1> V_8)
ldloca V_8
initobj valuetype MyStruct137`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldvirtftn instance string valuetype MyStruct137`1<class BaseClass1>::Method7<object>()
calli default string(object)
ldstr "MyStruct137::Method7.1190<System.Object>()"
ldstr "valuetype MyStruct137`1<class BaseClass1> on type valuetype MyStruct137`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldvirtftn instance string valuetype MyStruct137`1<class BaseClass1>::Method4()
calli default string(object)
ldstr "MyStruct137::Method4.1191()"
ldstr "valuetype MyStruct137`1<class BaseClass1> on type valuetype MyStruct137`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldvirtftn instance string valuetype MyStruct137`1<class BaseClass1>::Method5()
calli default string(object)
ldstr "MyStruct137::Method5.1193()"
ldstr "valuetype MyStruct137`1<class BaseClass1> on type valuetype MyStruct137`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldvirtftn instance string valuetype MyStruct137`1<class BaseClass1>::Method6<object>()
calli default string(object)
ldstr "MyStruct137::Method6.1195<System.Object>()"
ldstr "valuetype MyStruct137`1<class BaseClass1> on type valuetype MyStruct137`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldvirtftn instance string valuetype MyStruct137`1<class BaseClass1>::ClassMethod272()
calli default string(object)
ldstr "MyStruct137::ClassMethod272.1197()"
ldstr "valuetype MyStruct137`1<class BaseClass1> on type valuetype MyStruct137`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldvirtftn instance string valuetype MyStruct137`1<class BaseClass1>::ClassMethod273()
calli default string(object)
ldstr "MyStruct137::ClassMethod273.1198()"
ldstr "valuetype MyStruct137`1<class BaseClass1> on type valuetype MyStruct137`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8 box valuetype MyStruct137`1<class BaseClass1> ldnull
ldloc V_8 box valuetype MyStruct137`1<class BaseClass1>
ldvirtftn instance bool valuetype MyStruct137`1<class BaseClass1>::Equals(object) calli default bool(object,object) pop
ldloc V_8 box valuetype MyStruct137`1<class BaseClass1>
ldloc V_8 box valuetype MyStruct137`1<class BaseClass1>
ldvirtftn instance int32 valuetype MyStruct137`1<class BaseClass1>::GetHashCode() calli default int32(object) pop
ldloc V_8 box valuetype MyStruct137`1<class BaseClass1>
ldloc V_8 box valuetype MyStruct137`1<class BaseClass1>
ldvirtftn instance string valuetype MyStruct137`1<class BaseClass1>::ToString() calli default string(object) pop
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(object)
ldstr "MyStruct137::Method7.1190<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct137`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4()
calli default string(object)
ldstr "MyStruct137::Method4.MI.1192()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct137`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5()
calli default string(object)
ldstr "MyStruct137::Method5.MI.1194()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct137`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>()
calli default string(object)
ldstr "MyStruct137::Method6.MI.1196<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type valuetype MyStruct137`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
calli default string(object)
ldstr "MyStruct137::Method7.1190<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct137`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4()
calli default string(object)
ldstr "MyStruct137::Method4.MI.1192()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct137`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5()
calli default string(object)
ldstr "MyStruct137::Method5.MI.1194()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct137`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct137`1<class BaseClass1>
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>()
calli default string(object)
ldstr "MyStruct137::Method6.MI.1196<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type valuetype MyStruct137`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static int32 Main() cil managed
{
.custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = (
01 00 00 00
)
.entrypoint
.maxstack 10
call void Generated87::MethodCallingTest()
call void Generated87::ConstrainedCallsTest()
call void Generated87::StructConstrainedInterfaceCallsTest()
call void Generated87::CalliTest()
ldc.i4 100
ret
}
}
| -1 |
dotnet/runtime | 65,924 | Fix libraries GCStress pipeline | Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| BruceForstall | "2022-02-27T04:30:37Z" | "2022-03-01T18:20:06Z" | dfe08b3bea9a1897562635307ab4bb70bc31fada | 79026a542a8364b6783c5d212bb1841ac2c873a9 | Fix libraries GCStress pipeline. Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| ./src/tests/JIT/Regression/CLR-x86-JIT/V1-M12-Beta2/b50033/b50033.ilproj | <Project Sdk="Microsoft.NET.Sdk.IL">
<PropertyGroup>
<OutputType>Exe</OutputType>
</PropertyGroup>
<PropertyGroup>
<DebugType>PdbOnly</DebugType>
<Optimize>True</Optimize>
</PropertyGroup>
<ItemGroup>
<Compile Include="$(MSBuildProjectName).il" />
</ItemGroup>
</Project>
| <Project Sdk="Microsoft.NET.Sdk.IL">
<PropertyGroup>
<OutputType>Exe</OutputType>
</PropertyGroup>
<PropertyGroup>
<DebugType>PdbOnly</DebugType>
<Optimize>True</Optimize>
</PropertyGroup>
<ItemGroup>
<Compile Include="$(MSBuildProjectName).il" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime | 65,924 | Fix libraries GCStress pipeline | Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| BruceForstall | "2022-02-27T04:30:37Z" | "2022-03-01T18:20:06Z" | dfe08b3bea9a1897562635307ab4bb70bc31fada | 79026a542a8364b6783c5d212bb1841ac2c873a9 | Fix libraries GCStress pipeline. Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| ./src/libraries/System.Private.Xml/tests/Xslt/TestFiles/TestData/XsltScenarios/EXslt/sets-difference.xsl | <?xml version="1.0" encoding="UTF-8" ?>
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:set="http://exslt.org/sets" exclude-result-prefixes="set">
<xsl:output indent="yes" omit-xml-declaration="yes"/>
<xsl:template match="data">
<out>
<test1>
<xsl:copy-of select="set:difference(set/*, set/foo[@bar])"/>
</test1>
<test2>
<xsl:copy-of select="set:difference(set/*, set/*)"/>
</test2>
<test3>
<xsl:copy-of select="set:difference(/no/such/nodes, set/foo[@bar])"/>
</test3>
</out>
</xsl:template>
</xsl:stylesheet>
| <?xml version="1.0" encoding="UTF-8" ?>
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:set="http://exslt.org/sets" exclude-result-prefixes="set">
<xsl:output indent="yes" omit-xml-declaration="yes"/>
<xsl:template match="data">
<out>
<test1>
<xsl:copy-of select="set:difference(set/*, set/foo[@bar])"/>
</test1>
<test2>
<xsl:copy-of select="set:difference(set/*, set/*)"/>
</test2>
<test3>
<xsl:copy-of select="set:difference(/no/such/nodes, set/foo[@bar])"/>
</test3>
</out>
</xsl:template>
</xsl:stylesheet>
| -1 |
dotnet/runtime | 65,924 | Fix libraries GCStress pipeline | Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| BruceForstall | "2022-02-27T04:30:37Z" | "2022-03-01T18:20:06Z" | dfe08b3bea9a1897562635307ab4bb70bc31fada | 79026a542a8364b6783c5d212bb1841ac2c873a9 | Fix libraries GCStress pipeline. Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| ./src/libraries/System.Net.NetworkInformation/src/System/Net/NetworkInformation/SystemMulticastIPAddressInformation.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
namespace System.Net.NetworkInformation
{
// Specifies the Multicast addresses for an interface.
internal sealed class SystemMulticastIPAddressInformation : MulticastIPAddressInformation
{
private readonly SystemIPAddressInformation _innerInfo;
public SystemMulticastIPAddressInformation(SystemIPAddressInformation addressInfo)
{
_innerInfo = addressInfo;
}
public override IPAddress Address { get { return _innerInfo.Address; } }
// The address is a cluster address and shouldn't be used by most applications.
public override bool IsTransient
{
get
{
return (_innerInfo.IsTransient);
}
}
// This address can be used for DNS.
public override bool IsDnsEligible
{
get
{
return (_innerInfo.IsDnsEligible);
}
}
public override PrefixOrigin PrefixOrigin
{
get
{
return PrefixOrigin.Other;
}
}
public override SuffixOrigin SuffixOrigin
{
get
{
return SuffixOrigin.Other;
}
}
public override DuplicateAddressDetectionState DuplicateAddressDetectionState
{
get
{
return DuplicateAddressDetectionState.Invalid;
}
}
// Specifies the valid lifetime of the address in seconds.
public override long AddressValidLifetime
{
get
{
return 0;
}
}
// Specifies the preferred lifetime of the address in seconds.
public override long AddressPreferredLifetime
{
get
{
return 0;
}
}
// Specifies the preferred lifetime of the address in seconds.
public override long DhcpLeaseLifetime
{
get
{
return 0;
}
}
internal static MulticastIPAddressInformationCollection ToMulticastIpAddressInformationCollection(IPAddressInformationCollection addresses)
{
MulticastIPAddressInformationCollection multicastList = new MulticastIPAddressInformationCollection();
foreach (IPAddressInformation addressInfo in addresses)
{
multicastList.InternalAdd(new SystemMulticastIPAddressInformation((SystemIPAddressInformation)addressInfo));
}
return multicastList;
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
namespace System.Net.NetworkInformation
{
// Specifies the Multicast addresses for an interface.
internal sealed class SystemMulticastIPAddressInformation : MulticastIPAddressInformation
{
private readonly SystemIPAddressInformation _innerInfo;
public SystemMulticastIPAddressInformation(SystemIPAddressInformation addressInfo)
{
_innerInfo = addressInfo;
}
public override IPAddress Address { get { return _innerInfo.Address; } }
// The address is a cluster address and shouldn't be used by most applications.
public override bool IsTransient
{
get
{
return (_innerInfo.IsTransient);
}
}
// This address can be used for DNS.
public override bool IsDnsEligible
{
get
{
return (_innerInfo.IsDnsEligible);
}
}
public override PrefixOrigin PrefixOrigin
{
get
{
return PrefixOrigin.Other;
}
}
public override SuffixOrigin SuffixOrigin
{
get
{
return SuffixOrigin.Other;
}
}
public override DuplicateAddressDetectionState DuplicateAddressDetectionState
{
get
{
return DuplicateAddressDetectionState.Invalid;
}
}
// Specifies the valid lifetime of the address in seconds.
public override long AddressValidLifetime
{
get
{
return 0;
}
}
// Specifies the preferred lifetime of the address in seconds.
public override long AddressPreferredLifetime
{
get
{
return 0;
}
}
// Specifies the preferred lifetime of the address in seconds.
public override long DhcpLeaseLifetime
{
get
{
return 0;
}
}
internal static MulticastIPAddressInformationCollection ToMulticastIpAddressInformationCollection(IPAddressInformationCollection addresses)
{
MulticastIPAddressInformationCollection multicastList = new MulticastIPAddressInformationCollection();
foreach (IPAddressInformation addressInfo in addresses)
{
multicastList.InternalAdd(new SystemMulticastIPAddressInformation((SystemIPAddressInformation)addressInfo));
}
return multicastList;
}
}
}
| -1 |
dotnet/runtime | 65,924 | Fix libraries GCStress pipeline | Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| BruceForstall | "2022-02-27T04:30:37Z" | "2022-03-01T18:20:06Z" | dfe08b3bea9a1897562635307ab4bb70bc31fada | 79026a542a8364b6783c5d212bb1841ac2c873a9 | Fix libraries GCStress pipeline. Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| ./src/libraries/System.Private.CoreLib/src/System/Collections/Generic/RandomizedStringEqualityComparer.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Runtime.CompilerServices;
namespace System.Collections.Generic
{
/// <summary>
/// A randomized <see cref="EqualityComparer{String}"/> which uses a different seed on each
/// construction as a general good hygiene + defense-in-depth mechanism. This implementation
/// *does not* need to stay in sync with <see cref="string.GetHashCode"/>, which for stability
/// is required to use an app-global seed.
/// </summary>
internal abstract class RandomizedStringEqualityComparer : EqualityComparer<string?>, IInternalStringEqualityComparer
{
private readonly MarvinSeed _seed;
private readonly IEqualityComparer<string?> _underlyingComparer;
private unsafe RandomizedStringEqualityComparer(IEqualityComparer<string?> underlyingComparer)
{
_underlyingComparer = underlyingComparer;
fixed (MarvinSeed* seed = &_seed)
{
Interop.GetRandomBytes((byte*)seed, sizeof(MarvinSeed));
}
}
internal static RandomizedStringEqualityComparer Create(IEqualityComparer<string?> underlyingComparer, bool ignoreCase)
{
if (!ignoreCase)
{
return new OrdinalComparer(underlyingComparer);
}
else
{
return new OrdinalIgnoreCaseComparer(underlyingComparer);
}
}
public IEqualityComparer<string?> GetUnderlyingEqualityComparer() => _underlyingComparer;
private struct MarvinSeed
{
internal uint p0;
internal uint p1;
}
private sealed class OrdinalComparer : RandomizedStringEqualityComparer
{
internal OrdinalComparer(IEqualityComparer<string?> wrappedComparer)
: base(wrappedComparer)
{
}
public override bool Equals(string? x, string? y) => string.Equals(x, y);
public override int GetHashCode(string? obj)
{
if (obj is null)
{
return 0;
}
// The Ordinal version of Marvin32 operates over bytes.
// The multiplication from # chars -> # bytes will never integer overflow.
return Marvin.ComputeHash32(
ref Unsafe.As<char, byte>(ref obj.GetRawStringData()),
(uint)obj.Length * 2,
_seed.p0, _seed.p1);
}
}
private sealed class OrdinalIgnoreCaseComparer : RandomizedStringEqualityComparer
{
internal OrdinalIgnoreCaseComparer(IEqualityComparer<string?> wrappedComparer)
: base(wrappedComparer)
{
}
public override bool Equals(string? x, string? y) => string.EqualsOrdinalIgnoreCase(x, y);
public override int GetHashCode(string? obj)
{
if (obj is null)
{
return 0;
}
// The OrdinalIgnoreCase version of Marvin32 operates over chars,
// so pass in the char count directly.
return Marvin.ComputeHash32OrdinalIgnoreCase(
ref obj.GetRawStringData(),
obj.Length,
_seed.p0, _seed.p1);
}
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Runtime.CompilerServices;
namespace System.Collections.Generic
{
/// <summary>
/// A randomized <see cref="EqualityComparer{String}"/> which uses a different seed on each
/// construction as a general good hygiene + defense-in-depth mechanism. This implementation
/// *does not* need to stay in sync with <see cref="string.GetHashCode"/>, which for stability
/// is required to use an app-global seed.
/// </summary>
internal abstract class RandomizedStringEqualityComparer : EqualityComparer<string?>, IInternalStringEqualityComparer
{
private readonly MarvinSeed _seed;
private readonly IEqualityComparer<string?> _underlyingComparer;
private unsafe RandomizedStringEqualityComparer(IEqualityComparer<string?> underlyingComparer)
{
_underlyingComparer = underlyingComparer;
fixed (MarvinSeed* seed = &_seed)
{
Interop.GetRandomBytes((byte*)seed, sizeof(MarvinSeed));
}
}
internal static RandomizedStringEqualityComparer Create(IEqualityComparer<string?> underlyingComparer, bool ignoreCase)
{
if (!ignoreCase)
{
return new OrdinalComparer(underlyingComparer);
}
else
{
return new OrdinalIgnoreCaseComparer(underlyingComparer);
}
}
public IEqualityComparer<string?> GetUnderlyingEqualityComparer() => _underlyingComparer;
private struct MarvinSeed
{
internal uint p0;
internal uint p1;
}
private sealed class OrdinalComparer : RandomizedStringEqualityComparer
{
internal OrdinalComparer(IEqualityComparer<string?> wrappedComparer)
: base(wrappedComparer)
{
}
public override bool Equals(string? x, string? y) => string.Equals(x, y);
public override int GetHashCode(string? obj)
{
if (obj is null)
{
return 0;
}
// The Ordinal version of Marvin32 operates over bytes.
// The multiplication from # chars -> # bytes will never integer overflow.
return Marvin.ComputeHash32(
ref Unsafe.As<char, byte>(ref obj.GetRawStringData()),
(uint)obj.Length * 2,
_seed.p0, _seed.p1);
}
}
private sealed class OrdinalIgnoreCaseComparer : RandomizedStringEqualityComparer
{
internal OrdinalIgnoreCaseComparer(IEqualityComparer<string?> wrappedComparer)
: base(wrappedComparer)
{
}
public override bool Equals(string? x, string? y) => string.EqualsOrdinalIgnoreCase(x, y);
public override int GetHashCode(string? obj)
{
if (obj is null)
{
return 0;
}
// The OrdinalIgnoreCase version of Marvin32 operates over chars,
// so pass in the char count directly.
return Marvin.ComputeHash32OrdinalIgnoreCase(
ref obj.GetRawStringData(),
obj.Length,
_seed.p0, _seed.p1);
}
}
}
}
| -1 |
dotnet/runtime | 65,924 | Fix libraries GCStress pipeline | Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| BruceForstall | "2022-02-27T04:30:37Z" | "2022-03-01T18:20:06Z" | dfe08b3bea9a1897562635307ab4bb70bc31fada | 79026a542a8364b6783c5d212bb1841ac2c873a9 | Fix libraries GCStress pipeline. Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| ./src/tests/JIT/IL_Conformance/Old/Conformance_Base/bge_i8.ilproj | <Project Sdk="Microsoft.NET.Sdk.IL">
<PropertyGroup>
<OutputType>Exe</OutputType>
<RestorePackages>true</RestorePackages>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType>PdbOnly</DebugType>
<Optimize>True</Optimize>
</PropertyGroup>
<ItemGroup>
<Compile Include="bge_i8.il" />
</ItemGroup>
</Project>
| <Project Sdk="Microsoft.NET.Sdk.IL">
<PropertyGroup>
<OutputType>Exe</OutputType>
<RestorePackages>true</RestorePackages>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType>PdbOnly</DebugType>
<Optimize>True</Optimize>
</PropertyGroup>
<ItemGroup>
<Compile Include="bge_i8.il" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime | 65,924 | Fix libraries GCStress pipeline | Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| BruceForstall | "2022-02-27T04:30:37Z" | "2022-03-01T18:20:06Z" | dfe08b3bea9a1897562635307ab4bb70bc31fada | 79026a542a8364b6783c5d212bb1841ac2c873a9 | Fix libraries GCStress pipeline. Remove reference to deleted setup-stress-dependencies.cmd/sh;
Automatically download and copy coredistools.dll to the libraries testhost.
Increase timeout for GCStress runs.
Note that the pipeline is still not scheduled to run; there are lots of timeouts and failures that
need to be addressed before that can happen.
| ./src/tests/JIT/jit64/valuetypes/nullable/box-unbox/value/box-unbox-value008.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Runtime.InteropServices;
using System;
internal class NullableTest
{
private static bool BoxUnboxToNQ(ValueType o)
{
return Helper.Compare((uint)o, Helper.Create(default(uint)));
}
private static bool BoxUnboxToQ(ValueType o)
{
return Helper.Compare((uint?)o, Helper.Create(default(uint)));
}
private static int Main()
{
uint? s = Helper.Create(default(uint));
if (BoxUnboxToNQ(s) && BoxUnboxToQ(s))
return ExitCode.Passed;
else
return ExitCode.Failed;
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Runtime.InteropServices;
using System;
internal class NullableTest
{
private static bool BoxUnboxToNQ(ValueType o)
{
return Helper.Compare((uint)o, Helper.Create(default(uint)));
}
private static bool BoxUnboxToQ(ValueType o)
{
return Helper.Compare((uint?)o, Helper.Create(default(uint)));
}
private static int Main()
{
uint? s = Helper.Create(default(uint));
if (BoxUnboxToNQ(s) && BoxUnboxToQ(s))
return ExitCode.Passed;
else
return ExitCode.Failed;
}
}
| -1 |
dotnet/runtime | 65,922 | Rely on PGO for isinst/castclass | Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | EgorBo | "2022-02-26T22:13:46Z" | "2022-04-01T21:25:56Z" | 063d08018602291f8dbd9d9b6055b8e1af874cb8 | f249a3d58b0b2bb55adca1a56c5a39cf446437b7 | Rely on PGO for isinst/castclass. Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | ./src/coreclr/jit/compiler.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Compiler XX
XX XX
XX Represents the method data we are currently JIT-compiling. XX
XX An instance of this class is created for every method we JIT. XX
XX This contains all the info needed for the method. So allocating a XX
XX a new instance per method makes it thread-safe. XX
XX It should be used to do all the memory management for the compiler run. XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
/*****************************************************************************/
#ifndef _COMPILER_H_
#define _COMPILER_H_
/*****************************************************************************/
#include "jit.h"
#include "opcode.h"
#include "varset.h"
#include "jitstd.h"
#include "jithashtable.h"
#include "gentree.h"
#include "debuginfo.h"
#include "lir.h"
#include "block.h"
#include "inline.h"
#include "jiteh.h"
#include "instr.h"
#include "regalloc.h"
#include "sm.h"
#include "cycletimer.h"
#include "blockset.h"
#include "arraystack.h"
#include "hashbv.h"
#include "jitexpandarray.h"
#include "tinyarray.h"
#include "valuenum.h"
#include "jittelemetry.h"
#include "namedintrinsiclist.h"
#ifdef LATE_DISASM
#include "disasm.h"
#endif
#include "codegeninterface.h"
#include "regset.h"
#include "jitgcinfo.h"
#if DUMP_GC_TABLES && defined(JIT32_GCENCODER)
#include "gcdump.h"
#endif
#include "emit.h"
#include "hwintrinsic.h"
#include "simd.h"
#include "simdashwintrinsic.h"
// This is only used locally in the JIT to indicate that
// a verification block should be inserted
#define SEH_VERIFICATION_EXCEPTION 0xe0564552 // VER
/*****************************************************************************
* Forward declarations
*/
struct InfoHdr; // defined in GCInfo.h
struct escapeMapping_t; // defined in fgdiagnostic.cpp
class emitter; // defined in emit.h
struct ShadowParamVarInfo; // defined in GSChecks.cpp
struct InitVarDscInfo; // defined in register_arg_convention.h
class FgStack; // defined in fgbasic.cpp
class Instrumentor; // defined in fgprofile.cpp
class SpanningTreeVisitor; // defined in fgprofile.cpp
class CSE_DataFlow; // defined in OptCSE.cpp
class OptBoolsDsc; // defined in optimizer.cpp
#ifdef DEBUG
struct IndentStack;
#endif
class Lowering; // defined in lower.h
// The following are defined in this file, Compiler.h
class Compiler;
/*****************************************************************************
* Unwind info
*/
#include "unwind.h"
/*****************************************************************************/
//
// Declare global operator new overloads that use the compiler's arena allocator
//
// I wanted to make the second argument optional, with default = CMK_Unknown, but that
// caused these to be ambiguous with the global placement new operators.
void* __cdecl operator new(size_t n, Compiler* context, CompMemKind cmk);
void* __cdecl operator new[](size_t n, Compiler* context, CompMemKind cmk);
void* __cdecl operator new(size_t n, void* p, const jitstd::placement_t& syntax_difference);
// Requires the definitions of "operator new" so including "LoopCloning.h" after the definitions.
#include "loopcloning.h"
/*****************************************************************************/
/* This is included here and not earlier as it needs the definition of "CSE"
* which is defined in the section above */
/*****************************************************************************/
unsigned genLog2(unsigned value);
unsigned genLog2(unsigned __int64 value);
unsigned ReinterpretHexAsDecimal(unsigned in);
/*****************************************************************************/
const unsigned FLG_CCTOR = (CORINFO_FLG_CONSTRUCTOR | CORINFO_FLG_STATIC);
#ifdef DEBUG
const int BAD_STK_OFFS = 0xBAADF00D; // for LclVarDsc::lvStkOffs
#endif
//------------------------------------------------------------------------
// HFA info shared by LclVarDsc and fgArgTabEntry
//------------------------------------------------------------------------
inline bool IsHfa(CorInfoHFAElemType kind)
{
return kind != CORINFO_HFA_ELEM_NONE;
}
inline var_types HfaTypeFromElemKind(CorInfoHFAElemType kind)
{
switch (kind)
{
case CORINFO_HFA_ELEM_FLOAT:
return TYP_FLOAT;
case CORINFO_HFA_ELEM_DOUBLE:
return TYP_DOUBLE;
#ifdef FEATURE_SIMD
case CORINFO_HFA_ELEM_VECTOR64:
return TYP_SIMD8;
case CORINFO_HFA_ELEM_VECTOR128:
return TYP_SIMD16;
#endif
case CORINFO_HFA_ELEM_NONE:
return TYP_UNDEF;
default:
assert(!"Invalid HfaElemKind");
return TYP_UNDEF;
}
}
inline CorInfoHFAElemType HfaElemKindFromType(var_types type)
{
switch (type)
{
case TYP_FLOAT:
return CORINFO_HFA_ELEM_FLOAT;
case TYP_DOUBLE:
return CORINFO_HFA_ELEM_DOUBLE;
#ifdef FEATURE_SIMD
case TYP_SIMD8:
return CORINFO_HFA_ELEM_VECTOR64;
case TYP_SIMD16:
return CORINFO_HFA_ELEM_VECTOR128;
#endif
case TYP_UNDEF:
return CORINFO_HFA_ELEM_NONE;
default:
assert(!"Invalid HFA Type");
return CORINFO_HFA_ELEM_NONE;
}
}
// The following holds the Local var info (scope information)
typedef const char* VarName; // Actual ASCII string
struct VarScopeDsc
{
unsigned vsdVarNum; // (remapped) LclVarDsc number
unsigned vsdLVnum; // 'which' in eeGetLVinfo().
// Also, it is the index of this entry in the info.compVarScopes array,
// which is useful since the array is also accessed via the
// compEnterScopeList and compExitScopeList sorted arrays.
IL_OFFSET vsdLifeBeg; // instr offset of beg of life
IL_OFFSET vsdLifeEnd; // instr offset of end of life
#ifdef DEBUG
VarName vsdName; // name of the var
#endif
};
// This class stores information associated with a LclVar SSA definition.
class LclSsaVarDsc
{
// The basic block where the definition occurs. Definitions of uninitialized variables
// are considered to occur at the start of the first basic block (fgFirstBB).
//
// TODO-Cleanup: In the case of uninitialized variables the block is set to nullptr by
// SsaBuilder and changed to fgFirstBB during value numbering. It would be useful to
// investigate and perhaps eliminate this rather unexpected behavior.
BasicBlock* m_block;
// The GT_ASG node that generates the definition, or nullptr for definitions
// of uninitialized variables.
GenTreeOp* m_asg;
public:
LclSsaVarDsc() : m_block(nullptr), m_asg(nullptr)
{
}
LclSsaVarDsc(BasicBlock* block) : m_block(block), m_asg(nullptr)
{
}
LclSsaVarDsc(BasicBlock* block, GenTreeOp* asg) : m_block(block), m_asg(asg)
{
assert((asg == nullptr) || asg->OperIs(GT_ASG));
}
BasicBlock* GetBlock() const
{
return m_block;
}
void SetBlock(BasicBlock* block)
{
m_block = block;
}
GenTreeOp* GetAssignment() const
{
return m_asg;
}
void SetAssignment(GenTreeOp* asg)
{
assert((asg == nullptr) || asg->OperIs(GT_ASG));
m_asg = asg;
}
ValueNumPair m_vnPair;
};
// This class stores information associated with a memory SSA definition.
class SsaMemDef
{
public:
ValueNumPair m_vnPair;
};
//------------------------------------------------------------------------
// SsaDefArray: A resizable array of SSA definitions.
//
// Unlike an ordinary resizable array implementation, this allows only element
// addition (by calling AllocSsaNum) and has special handling for RESERVED_SSA_NUM
// (basically it's a 1-based array). The array doesn't impose any particular
// requirements on the elements it stores and AllocSsaNum forwards its arguments
// to the array element constructor, this way the array supports both LclSsaVarDsc
// and SsaMemDef elements.
//
template <typename T>
class SsaDefArray
{
T* m_array;
unsigned m_arraySize;
unsigned m_count;
static_assert_no_msg(SsaConfig::RESERVED_SSA_NUM == 0);
static_assert_no_msg(SsaConfig::FIRST_SSA_NUM == 1);
// Get the minimum valid SSA number.
unsigned GetMinSsaNum() const
{
return SsaConfig::FIRST_SSA_NUM;
}
// Increase (double) the size of the array.
void GrowArray(CompAllocator alloc)
{
unsigned oldSize = m_arraySize;
unsigned newSize = max(2, oldSize * 2);
T* newArray = alloc.allocate<T>(newSize);
for (unsigned i = 0; i < oldSize; i++)
{
newArray[i] = m_array[i];
}
m_array = newArray;
m_arraySize = newSize;
}
public:
// Construct an empty SsaDefArray.
SsaDefArray() : m_array(nullptr), m_arraySize(0), m_count(0)
{
}
// Reset the array (used only if the SSA form is reconstructed).
void Reset()
{
m_count = 0;
}
// Allocate a new SSA number (starting with SsaConfig::FIRST_SSA_NUM).
template <class... Args>
unsigned AllocSsaNum(CompAllocator alloc, Args&&... args)
{
if (m_count == m_arraySize)
{
GrowArray(alloc);
}
unsigned ssaNum = GetMinSsaNum() + m_count;
m_array[m_count++] = T(std::forward<Args>(args)...);
// Ensure that the first SSA number we allocate is SsaConfig::FIRST_SSA_NUM
assert((ssaNum == SsaConfig::FIRST_SSA_NUM) || (m_count > 1));
return ssaNum;
}
// Get the number of SSA definitions in the array.
unsigned GetCount() const
{
return m_count;
}
// Get a pointer to the SSA definition at the specified index.
T* GetSsaDefByIndex(unsigned index)
{
assert(index < m_count);
return &m_array[index];
}
// Check if the specified SSA number is valid.
bool IsValidSsaNum(unsigned ssaNum) const
{
return (GetMinSsaNum() <= ssaNum) && (ssaNum < (GetMinSsaNum() + m_count));
}
// Get a pointer to the SSA definition associated with the specified SSA number.
T* GetSsaDef(unsigned ssaNum)
{
assert(ssaNum != SsaConfig::RESERVED_SSA_NUM);
return GetSsaDefByIndex(ssaNum - GetMinSsaNum());
}
// Get an SSA number associated with the specified SSA def (that must be in this array).
unsigned GetSsaNum(T* ssaDef)
{
assert((m_array <= ssaDef) && (ssaDef < &m_array[m_count]));
return GetMinSsaNum() + static_cast<unsigned>(ssaDef - &m_array[0]);
}
};
enum RefCountState
{
RCS_INVALID, // not valid to get/set ref counts
RCS_EARLY, // early counts for struct promotion and struct passing
RCS_NORMAL, // normal ref counts (from lvaMarkRefs onward)
};
#ifdef DEBUG
// Reasons why we can't enregister a local.
enum class DoNotEnregisterReason
{
None,
AddrExposed, // the address of this local is exposed.
DontEnregStructs, // struct enregistration is disabled.
NotRegSizeStruct, // the struct size does not much any register size, usually the struct size is too big.
LocalField, // the local is accessed with LCL_FLD, note we can do it not only for struct locals.
VMNeedsStackAddr,
LiveInOutOfHandler, // the local is alive in and out of exception handler and not signle def.
BlockOp, // Is read or written via a block operation.
IsStructArg, // Is a struct passed as an argument in a way that requires a stack location.
DepField, // It is a field of a dependently promoted struct
NoRegVars, // opts.compFlags & CLFLG_REGVAR is not set
MinOptsGC, // It is a GC Ref and we are compiling MinOpts
#if !defined(TARGET_64BIT)
LongParamField, // It is a decomposed field of a long parameter.
#endif
#ifdef JIT32_GCENCODER
PinningRef,
#endif
LclAddrNode, // the local is accessed with LCL_ADDR_VAR/FLD.
CastTakesAddr,
StoreBlkSrc, // the local is used as STORE_BLK source.
OneAsgRetyping, // fgMorphOneAsgBlockOp prevents this local from being enregister.
SwizzleArg, // the local is passed using LCL_FLD as another type.
BlockOpRet, // the struct is returned and it promoted or there is a cast.
ReturnSpCheck, // the local is used to do SP check
SimdUserForcesDep, // a promoted struct was used by a SIMD/HWI node; it must be dependently promoted
HiddenBufferStructArg // the argument is a hidden return buffer passed to a method.
};
enum class AddressExposedReason
{
NONE,
PARENT_EXPOSED, // This is a promoted field but the parent is exposed.
TOO_CONSERVATIVE, // Were marked as exposed to be conservative, fix these places.
ESCAPE_ADDRESS, // The address is escaping, for example, passed as call argument.
WIDE_INDIR, // We access via indirection with wider type.
OSR_EXPOSED, // It was exposed in the original method, osr has to repeat it.
STRESS_LCL_FLD, // Stress mode replaces localVar with localFld and makes them addrExposed.
COPY_FLD_BY_FLD, // Field by field copy takes the address of the local, can be fixed.
DISPATCH_RET_BUF // Caller return buffer dispatch.
};
#endif // DEBUG
class LclVarDsc
{
public:
// The constructor. Most things can just be zero'ed.
//
// Initialize the ArgRegs to REG_STK.
// Morph will update if this local is passed in a register.
LclVarDsc()
: _lvArgReg(REG_STK)
,
#if FEATURE_MULTIREG_ARGS
_lvOtherArgReg(REG_STK)
,
#endif // FEATURE_MULTIREG_ARGS
lvClassHnd(NO_CLASS_HANDLE)
, lvRefBlks(BlockSetOps::UninitVal())
, lvPerSsaData()
{
}
// note this only packs because var_types is a typedef of unsigned char
var_types lvType : 5; // TYP_INT/LONG/FLOAT/DOUBLE/REF
unsigned char lvIsParam : 1; // is this a parameter?
unsigned char lvIsRegArg : 1; // is this an argument that was passed by register?
unsigned char lvFramePointerBased : 1; // 0 = off of REG_SPBASE (e.g., ESP), 1 = off of REG_FPBASE (e.g., EBP)
unsigned char lvOnFrame : 1; // (part of) the variable lives on the frame
unsigned char lvRegister : 1; // assigned to live in a register? For RyuJIT backend, this is only set if the
// variable is in the same register for the entire function.
unsigned char lvTracked : 1; // is this a tracked variable?
bool lvTrackedNonStruct()
{
return lvTracked && lvType != TYP_STRUCT;
}
unsigned char lvPinned : 1; // is this a pinned variable?
unsigned char lvMustInit : 1; // must be initialized
private:
bool m_addrExposed : 1; // The address of this variable is "exposed" -- passed as an argument, stored in a
// global location, etc.
// We cannot reason reliably about the value of the variable.
public:
unsigned char lvDoNotEnregister : 1; // Do not enregister this variable.
unsigned char lvFieldAccessed : 1; // The var is a struct local, and a field of the variable is accessed. Affects
// struct promotion.
unsigned char lvLiveInOutOfHndlr : 1; // The variable is live in or out of an exception handler, and therefore must
// be on the stack (at least at those boundaries.)
unsigned char lvInSsa : 1; // The variable is in SSA form (set by SsaBuilder)
unsigned char lvIsCSE : 1; // Indicates if this LclVar is a CSE variable.
unsigned char lvHasLdAddrOp : 1; // has ldloca or ldarga opcode on this local.
unsigned char lvStackByref : 1; // This is a compiler temporary of TYP_BYREF that is known to point into our local
// stack frame.
unsigned char lvHasILStoreOp : 1; // there is at least one STLOC or STARG on this local
unsigned char lvHasMultipleILStoreOp : 1; // there is more than one STLOC on this local
unsigned char lvIsTemp : 1; // Short-lifetime compiler temp
#if defined(TARGET_AMD64) || defined(TARGET_ARM64)
unsigned char lvIsImplicitByRef : 1; // Set if the argument is an implicit byref.
#endif // defined(TARGET_AMD64) || defined(TARGET_ARM64)
unsigned char lvIsBoolean : 1; // set if variable is boolean
unsigned char lvSingleDef : 1; // variable has a single def
// before lvaMarkLocalVars: identifies ref type locals that can get type updates
// after lvaMarkLocalVars: identifies locals that are suitable for optAddCopies
unsigned char lvSingleDefRegCandidate : 1; // variable has a single def and hence is a register candidate
// Currently, this is only used to decide if an EH variable can be
// a register candiate or not.
unsigned char lvDisqualifySingleDefRegCandidate : 1; // tracks variable that are disqualified from register
// candidancy
unsigned char lvSpillAtSingleDef : 1; // variable has a single def (as determined by LSRA interval scan)
// and is spilled making it candidate to spill right after the
// first (and only) definition.
// Note: We cannot reuse lvSingleDefRegCandidate because it is set
// in earlier phase and the information might not be appropriate
// in LSRA.
unsigned char lvDisqualify : 1; // variable is no longer OK for add copy optimization
unsigned char lvVolatileHint : 1; // hint for AssertionProp
#ifndef TARGET_64BIT
unsigned char lvStructDoubleAlign : 1; // Must we double align this struct?
#endif // !TARGET_64BIT
#ifdef TARGET_64BIT
unsigned char lvQuirkToLong : 1; // Quirk to allocate this LclVar as a 64-bit long
#endif
#ifdef DEBUG
unsigned char lvKeepType : 1; // Don't change the type of this variable
unsigned char lvNoLclFldStress : 1; // Can't apply local field stress on this one
#endif
unsigned char lvIsPtr : 1; // Might this be used in an address computation? (used by buffer overflow security
// checks)
unsigned char lvIsUnsafeBuffer : 1; // Does this contain an unsafe buffer requiring buffer overflow security checks?
unsigned char lvPromoted : 1; // True when this local is a promoted struct, a normed struct, or a "split" long on a
// 32-bit target. For implicit byref parameters, this gets hijacked between
// fgRetypeImplicitByRefArgs and fgMarkDemotedImplicitByRefArgs to indicate whether
// references to the arg are being rewritten as references to a promoted shadow local.
unsigned char lvIsStructField : 1; // Is this local var a field of a promoted struct local?
unsigned char lvOverlappingFields : 1; // True when we have a struct with possibly overlapping fields
unsigned char lvContainsHoles : 1; // True when we have a promoted struct that contains holes
unsigned char lvCustomLayout : 1; // True when this struct has "CustomLayout"
unsigned char lvIsMultiRegArg : 1; // true if this is a multireg LclVar struct used in an argument context
unsigned char lvIsMultiRegRet : 1; // true if this is a multireg LclVar struct assigned from a multireg call
#ifdef DEBUG
unsigned char lvHiddenBufferStructArg : 1; // True when this struct (or its field) are passed as hidden buffer
// pointer.
#endif
#ifdef FEATURE_HFA_FIELDS_PRESENT
CorInfoHFAElemType _lvHfaElemKind : 3; // What kind of an HFA this is (CORINFO_HFA_ELEM_NONE if it is not an HFA).
#endif // FEATURE_HFA_FIELDS_PRESENT
#ifdef DEBUG
// TODO-Cleanup: See the note on lvSize() - this flag is only in use by asserts that are checking for struct
// types, and is needed because of cases where TYP_STRUCT is bashed to an integral type.
// Consider cleaning this up so this workaround is not required.
unsigned char lvUnusedStruct : 1; // All references to this promoted struct are through its field locals.
// I.e. there is no longer any reference to the struct directly.
// In this case we can simply remove this struct local.
unsigned char lvUndoneStructPromotion : 1; // The struct promotion was undone and hence there should be no
// reference to the fields of this struct.
#endif
unsigned char lvLRACandidate : 1; // Tracked for linear scan register allocation purposes
#ifdef FEATURE_SIMD
// Note that both SIMD vector args and locals are marked as lvSIMDType = true, but the
// type of an arg node is TYP_BYREF and a local node is TYP_SIMD*.
unsigned char lvSIMDType : 1; // This is a SIMD struct
unsigned char lvUsedInSIMDIntrinsic : 1; // This tells lclvar is used for simd intrinsic
unsigned char lvSimdBaseJitType : 5; // Note: this only packs because CorInfoType has less than 32 entries
CorInfoType GetSimdBaseJitType() const
{
return (CorInfoType)lvSimdBaseJitType;
}
void SetSimdBaseJitType(CorInfoType simdBaseJitType)
{
assert(simdBaseJitType < (1 << 5));
lvSimdBaseJitType = (unsigned char)simdBaseJitType;
}
var_types GetSimdBaseType() const;
#endif // FEATURE_SIMD
unsigned char lvRegStruct : 1; // This is a reg-sized non-field-addressed struct.
unsigned char lvClassIsExact : 1; // lvClassHandle is the exact type
#ifdef DEBUG
unsigned char lvClassInfoUpdated : 1; // true if this var has updated class handle or exactness
#endif
unsigned char lvImplicitlyReferenced : 1; // true if there are non-IR references to this local (prolog, epilog, gc,
// eh)
unsigned char lvSuppressedZeroInit : 1; // local needs zero init if we transform tail call to loop
unsigned char lvHasExplicitInit : 1; // The local is explicitly initialized and doesn't need zero initialization in
// the prolog. If the local has gc pointers, there are no gc-safe points
// between the prolog and the explicit initialization.
unsigned char lvIsOSRLocal : 1; // Root method local in an OSR method. Any stack home will be on the Tier0 frame.
// Initial value will be defined by Tier0. Requires special handing in prolog.
union {
unsigned lvFieldLclStart; // The index of the local var representing the first field in the promoted struct
// local. For implicit byref parameters, this gets hijacked between
// fgRetypeImplicitByRefArgs and fgMarkDemotedImplicitByRefArgs to point to the
// struct local created to model the parameter's struct promotion, if any.
unsigned lvParentLcl; // The index of the local var representing the parent (i.e. the promoted struct local).
// Valid on promoted struct local fields.
};
unsigned char lvFieldCnt; // Number of fields in the promoted VarDsc.
unsigned char lvFldOffset;
unsigned char lvFldOrdinal;
#ifdef DEBUG
unsigned char lvSingleDefDisqualifyReason = 'H';
#endif
#if FEATURE_MULTIREG_ARGS
regNumber lvRegNumForSlot(unsigned slotNum)
{
if (slotNum == 0)
{
return (regNumber)_lvArgReg;
}
else if (slotNum == 1)
{
return GetOtherArgReg();
}
else
{
assert(false && "Invalid slotNum!");
}
unreached();
}
#endif // FEATURE_MULTIREG_ARGS
CorInfoHFAElemType GetLvHfaElemKind() const
{
#ifdef FEATURE_HFA_FIELDS_PRESENT
return _lvHfaElemKind;
#else
NOWAY_MSG("GetLvHfaElemKind");
return CORINFO_HFA_ELEM_NONE;
#endif // FEATURE_HFA_FIELDS_PRESENT
}
void SetLvHfaElemKind(CorInfoHFAElemType elemKind)
{
#ifdef FEATURE_HFA_FIELDS_PRESENT
_lvHfaElemKind = elemKind;
#else
NOWAY_MSG("SetLvHfaElemKind");
#endif // FEATURE_HFA_FIELDS_PRESENT
}
bool lvIsHfa() const
{
if (GlobalJitOptions::compFeatureHfa)
{
return IsHfa(GetLvHfaElemKind());
}
else
{
return false;
}
}
bool lvIsHfaRegArg() const
{
if (GlobalJitOptions::compFeatureHfa)
{
return lvIsRegArg && lvIsHfa();
}
else
{
return false;
}
}
//------------------------------------------------------------------------------
// lvHfaSlots: Get the number of slots used by an HFA local
//
// Return Value:
// On Arm64 - Returns 1-4 indicating the number of register slots used by the HFA
// On Arm32 - Returns the total number of single FP register slots used by the HFA, max is 8
//
unsigned lvHfaSlots() const
{
assert(lvIsHfa());
assert(varTypeIsStruct(lvType));
unsigned slots = 0;
#ifdef TARGET_ARM
slots = lvExactSize / sizeof(float);
assert(slots <= 8);
#elif defined(TARGET_ARM64)
switch (GetLvHfaElemKind())
{
case CORINFO_HFA_ELEM_NONE:
assert(!"lvHfaSlots called for non-HFA");
break;
case CORINFO_HFA_ELEM_FLOAT:
assert((lvExactSize % 4) == 0);
slots = lvExactSize >> 2;
break;
case CORINFO_HFA_ELEM_DOUBLE:
case CORINFO_HFA_ELEM_VECTOR64:
assert((lvExactSize % 8) == 0);
slots = lvExactSize >> 3;
break;
case CORINFO_HFA_ELEM_VECTOR128:
assert((lvExactSize % 16) == 0);
slots = lvExactSize >> 4;
break;
default:
unreached();
}
assert(slots <= 4);
#endif // TARGET_ARM64
return slots;
}
// lvIsMultiRegArgOrRet()
// returns true if this is a multireg LclVar struct used in an argument context
// or if this is a multireg LclVar struct assigned from a multireg call
bool lvIsMultiRegArgOrRet()
{
return lvIsMultiRegArg || lvIsMultiRegRet;
}
#if defined(DEBUG)
private:
DoNotEnregisterReason m_doNotEnregReason;
AddressExposedReason m_addrExposedReason;
public:
void SetDoNotEnregReason(DoNotEnregisterReason reason)
{
m_doNotEnregReason = reason;
}
DoNotEnregisterReason GetDoNotEnregReason() const
{
return m_doNotEnregReason;
}
AddressExposedReason GetAddrExposedReason() const
{
return m_addrExposedReason;
}
#endif // DEBUG
public:
void SetAddressExposed(bool value DEBUGARG(AddressExposedReason reason))
{
m_addrExposed = value;
INDEBUG(m_addrExposedReason = reason);
}
void CleanAddressExposed()
{
m_addrExposed = false;
}
bool IsAddressExposed() const
{
return m_addrExposed;
}
#ifdef DEBUG
void SetHiddenBufferStructArg(char value)
{
lvHiddenBufferStructArg = value;
}
bool IsHiddenBufferStructArg() const
{
return lvHiddenBufferStructArg;
}
#endif
private:
regNumberSmall _lvRegNum; // Used to store the register this variable is in (or, the low register of a
// register pair). It is set during codegen any time the
// variable is enregistered (lvRegister is only set
// to non-zero if the variable gets the same register assignment for its entire
// lifetime).
#if !defined(TARGET_64BIT)
regNumberSmall _lvOtherReg; // Used for "upper half" of long var.
#endif // !defined(TARGET_64BIT)
regNumberSmall _lvArgReg; // The (first) register in which this argument is passed.
#if FEATURE_MULTIREG_ARGS
regNumberSmall _lvOtherArgReg; // Used for the second part of the struct passed in a register.
// Note this is defined but not used by ARM32
#endif // FEATURE_MULTIREG_ARGS
regNumberSmall _lvArgInitReg; // the register into which the argument is moved at entry
public:
// The register number is stored in a small format (8 bits), but the getters return and the setters take
// a full-size (unsigned) format, to localize the casts here.
/////////////////////
regNumber GetRegNum() const
{
return (regNumber)_lvRegNum;
}
void SetRegNum(regNumber reg)
{
_lvRegNum = (regNumberSmall)reg;
assert(_lvRegNum == reg);
}
/////////////////////
#if defined(TARGET_64BIT)
regNumber GetOtherReg() const
{
assert(!"shouldn't get here"); // can't use "unreached();" because it's NORETURN, which causes C4072
// "unreachable code" warnings
return REG_NA;
}
void SetOtherReg(regNumber reg)
{
assert(!"shouldn't get here"); // can't use "unreached();" because it's NORETURN, which causes C4072
// "unreachable code" warnings
}
#else // !TARGET_64BIT
regNumber GetOtherReg() const
{
return (regNumber)_lvOtherReg;
}
void SetOtherReg(regNumber reg)
{
_lvOtherReg = (regNumberSmall)reg;
assert(_lvOtherReg == reg);
}
#endif // !TARGET_64BIT
/////////////////////
regNumber GetArgReg() const
{
return (regNumber)_lvArgReg;
}
void SetArgReg(regNumber reg)
{
_lvArgReg = (regNumberSmall)reg;
assert(_lvArgReg == reg);
}
#if FEATURE_MULTIREG_ARGS
regNumber GetOtherArgReg() const
{
return (regNumber)_lvOtherArgReg;
}
void SetOtherArgReg(regNumber reg)
{
_lvOtherArgReg = (regNumberSmall)reg;
assert(_lvOtherArgReg == reg);
}
#endif // FEATURE_MULTIREG_ARGS
#ifdef FEATURE_SIMD
// Is this is a SIMD struct?
bool lvIsSIMDType() const
{
return lvSIMDType;
}
// Is this is a SIMD struct which is used for SIMD intrinsic?
bool lvIsUsedInSIMDIntrinsic() const
{
return lvUsedInSIMDIntrinsic;
}
#else
// If feature_simd not enabled, return false
bool lvIsSIMDType() const
{
return false;
}
bool lvIsUsedInSIMDIntrinsic() const
{
return false;
}
#endif
/////////////////////
regNumber GetArgInitReg() const
{
return (regNumber)_lvArgInitReg;
}
void SetArgInitReg(regNumber reg)
{
_lvArgInitReg = (regNumberSmall)reg;
assert(_lvArgInitReg == reg);
}
/////////////////////
bool lvIsRegCandidate() const
{
return lvLRACandidate != 0;
}
bool lvIsInReg() const
{
return lvIsRegCandidate() && (GetRegNum() != REG_STK);
}
regMaskTP lvRegMask() const
{
regMaskTP regMask = RBM_NONE;
if (varTypeUsesFloatReg(TypeGet()))
{
if (GetRegNum() != REG_STK)
{
regMask = genRegMaskFloat(GetRegNum(), TypeGet());
}
}
else
{
if (GetRegNum() != REG_STK)
{
regMask = genRegMask(GetRegNum());
}
}
return regMask;
}
unsigned short lvVarIndex; // variable tracking index
private:
unsigned short m_lvRefCnt; // unweighted (real) reference count. For implicit by reference
// parameters, this gets hijacked from fgResetImplicitByRefRefCount
// through fgMarkDemotedImplicitByRefArgs, to provide a static
// appearance count (computed during address-exposed analysis)
// that fgMakeOutgoingStructArgCopy consults during global morph
// to determine if eliding its copy is legal.
weight_t m_lvRefCntWtd; // weighted reference count
public:
unsigned short lvRefCnt(RefCountState state = RCS_NORMAL) const;
void incLvRefCnt(unsigned short delta, RefCountState state = RCS_NORMAL);
void setLvRefCnt(unsigned short newValue, RefCountState state = RCS_NORMAL);
weight_t lvRefCntWtd(RefCountState state = RCS_NORMAL) const;
void incLvRefCntWtd(weight_t delta, RefCountState state = RCS_NORMAL);
void setLvRefCntWtd(weight_t newValue, RefCountState state = RCS_NORMAL);
private:
int lvStkOffs; // stack offset of home in bytes.
public:
int GetStackOffset() const
{
return lvStkOffs;
}
void SetStackOffset(int offset)
{
lvStkOffs = offset;
}
unsigned lvExactSize; // (exact) size of the type in bytes
// Is this a promoted struct?
// This method returns true only for structs (including SIMD structs), not for
// locals that are split on a 32-bit target.
// It is only necessary to use this:
// 1) if only structs are wanted, and
// 2) if Lowering has already been done.
// Otherwise lvPromoted is valid.
bool lvPromotedStruct()
{
#if !defined(TARGET_64BIT)
return (lvPromoted && !varTypeIsLong(lvType));
#else // defined(TARGET_64BIT)
return lvPromoted;
#endif // defined(TARGET_64BIT)
}
unsigned lvSize() const;
size_t lvArgStackSize() const;
unsigned lvSlotNum; // original slot # (if remapped)
typeInfo lvVerTypeInfo; // type info needed for verification
// class handle for the local or null if not known or not a class,
// for a struct handle use `GetStructHnd()`.
CORINFO_CLASS_HANDLE lvClassHnd;
// Get class handle for a struct local or implicitByRef struct local.
CORINFO_CLASS_HANDLE GetStructHnd() const
{
#ifdef FEATURE_SIMD
if (lvSIMDType && (m_layout == nullptr))
{
return NO_CLASS_HANDLE;
}
#endif
assert(m_layout != nullptr);
#if defined(TARGET_AMD64) || defined(TARGET_ARM64)
assert(varTypeIsStruct(TypeGet()) || (lvIsImplicitByRef && (TypeGet() == TYP_BYREF)));
#else
assert(varTypeIsStruct(TypeGet()));
#endif
CORINFO_CLASS_HANDLE structHnd = m_layout->GetClassHandle();
assert(structHnd != NO_CLASS_HANDLE);
return structHnd;
}
CORINFO_FIELD_HANDLE lvFieldHnd; // field handle for promoted struct fields
private:
ClassLayout* m_layout; // layout info for structs
public:
BlockSet lvRefBlks; // Set of blocks that contain refs
Statement* lvDefStmt; // Pointer to the statement with the single definition
void lvaDisqualifyVar(); // Call to disqualify a local variable from use in optAddCopies
var_types TypeGet() const
{
return (var_types)lvType;
}
bool lvStackAligned() const
{
assert(lvIsStructField);
return ((lvFldOffset % TARGET_POINTER_SIZE) == 0);
}
bool lvNormalizeOnLoad() const
{
return varTypeIsSmall(TypeGet()) &&
// lvIsStructField is treated the same as the aliased local, see fgDoNormalizeOnStore.
(lvIsParam || m_addrExposed || lvIsStructField);
}
bool lvNormalizeOnStore() const
{
return varTypeIsSmall(TypeGet()) &&
// lvIsStructField is treated the same as the aliased local, see fgDoNormalizeOnStore.
!(lvIsParam || m_addrExposed || lvIsStructField);
}
void incRefCnts(weight_t weight, Compiler* pComp, RefCountState state = RCS_NORMAL, bool propagate = true);
var_types GetHfaType() const
{
if (GlobalJitOptions::compFeatureHfa)
{
assert(lvIsHfa());
return HfaTypeFromElemKind(GetLvHfaElemKind());
}
else
{
return TYP_UNDEF;
}
}
void SetHfaType(var_types type)
{
if (GlobalJitOptions::compFeatureHfa)
{
CorInfoHFAElemType elemKind = HfaElemKindFromType(type);
SetLvHfaElemKind(elemKind);
// Ensure we've allocated enough bits.
assert(GetLvHfaElemKind() == elemKind);
}
}
// Returns true if this variable contains GC pointers (including being a GC pointer itself).
bool HasGCPtr() const
{
return varTypeIsGC(lvType) || ((lvType == TYP_STRUCT) && m_layout->HasGCPtr());
}
// Returns the layout of a struct variable.
ClassLayout* GetLayout() const
{
assert(varTypeIsStruct(lvType));
return m_layout;
}
// Sets the layout of a struct variable.
void SetLayout(ClassLayout* layout)
{
assert(varTypeIsStruct(lvType));
assert((m_layout == nullptr) || ClassLayout::AreCompatible(m_layout, layout));
m_layout = layout;
}
SsaDefArray<LclSsaVarDsc> lvPerSsaData;
// Returns the address of the per-Ssa data for the given ssaNum (which is required
// not to be the SsaConfig::RESERVED_SSA_NUM, which indicates that the variable is
// not an SSA variable).
LclSsaVarDsc* GetPerSsaData(unsigned ssaNum)
{
return lvPerSsaData.GetSsaDef(ssaNum);
}
// Returns the SSA number for "ssaDef". Requires "ssaDef" to be a valid definition
// of this variable.
unsigned GetSsaNumForSsaDef(LclSsaVarDsc* ssaDef)
{
return lvPerSsaData.GetSsaNum(ssaDef);
}
var_types GetRegisterType(const GenTreeLclVarCommon* tree) const;
var_types GetRegisterType() const;
var_types GetActualRegisterType() const;
bool IsEnregisterableType() const
{
return GetRegisterType() != TYP_UNDEF;
}
bool IsEnregisterableLcl() const
{
if (lvDoNotEnregister)
{
return false;
}
return IsEnregisterableType();
}
//-----------------------------------------------------------------------------
// IsAlwaysAliveInMemory: Determines if this variable's value is always
// up-to-date on stack. This is possible if this is an EH-var or
// we decided to spill after single-def.
//
bool IsAlwaysAliveInMemory() const
{
return lvLiveInOutOfHndlr || lvSpillAtSingleDef;
}
bool CanBeReplacedWithItsField(Compiler* comp) const;
#ifdef DEBUG
public:
const char* lvReason;
void PrintVarReg() const
{
printf("%s", getRegName(GetRegNum()));
}
#endif // DEBUG
}; // class LclVarDsc
enum class SymbolicIntegerValue : int32_t
{
LongMin,
IntMin,
ShortMin,
ByteMin,
Zero,
One,
ByteMax,
UByteMax,
ShortMax,
UShortMax,
IntMax,
UIntMax,
LongMax,
};
inline constexpr bool operator>(SymbolicIntegerValue left, SymbolicIntegerValue right)
{
return static_cast<int32_t>(left) > static_cast<int32_t>(right);
}
inline constexpr bool operator>=(SymbolicIntegerValue left, SymbolicIntegerValue right)
{
return static_cast<int32_t>(left) >= static_cast<int32_t>(right);
}
inline constexpr bool operator<(SymbolicIntegerValue left, SymbolicIntegerValue right)
{
return static_cast<int32_t>(left) < static_cast<int32_t>(right);
}
inline constexpr bool operator<=(SymbolicIntegerValue left, SymbolicIntegerValue right)
{
return static_cast<int32_t>(left) <= static_cast<int32_t>(right);
}
// Represents an integral range useful for reasoning about integral casts.
// It uses a symbolic representation for lower and upper bounds so
// that it can efficiently handle integers of all sizes on all hosts.
//
// Note that the ranges represented by this class are **always** in the
// "signed" domain. This is so that if we know the range a node produces, it
// can be trivially used to determine if a cast above the node does or does not
// overflow, which requires that the interpretation of integers be the same both
// for the "input" and "output". We choose signed interpretation here because it
// produces nice continuous ranges and because IR uses sign-extension for constants.
//
// Some examples of how ranges are computed for casts:
// 1. CAST_OVF(ubyte <- uint): does not overflow for [0..UBYTE_MAX], produces the
// same range - all casts that do not change the representation, i. e. have the same
// "actual" input and output type, have the same "input" and "output" range.
// 2. CAST_OVF(ulong <- uint): never oveflows => the "input" range is [INT_MIN..INT_MAX]
// (aka all possible 32 bit integers). Produces [0..UINT_MAX] (aka all possible 32
// bit integers zero-extended to 64 bits).
// 3. CAST_OVF(int <- uint): overflows for inputs larger than INT_MAX <=> less than 0
// when interpreting as signed => the "input" range is [0..INT_MAX], the same range
// being the produced one as the node does not change the width of the integer.
//
class IntegralRange
{
private:
SymbolicIntegerValue m_lowerBound;
SymbolicIntegerValue m_upperBound;
public:
IntegralRange() = default;
IntegralRange(SymbolicIntegerValue lowerBound, SymbolicIntegerValue upperBound)
: m_lowerBound(lowerBound), m_upperBound(upperBound)
{
assert(lowerBound <= upperBound);
}
bool Contains(int64_t value) const;
bool Contains(IntegralRange other) const
{
return (m_lowerBound <= other.m_lowerBound) && (other.m_upperBound <= m_upperBound);
}
bool IsPositive()
{
return m_lowerBound >= SymbolicIntegerValue::Zero;
}
bool Equals(IntegralRange other) const
{
return (m_lowerBound == other.m_lowerBound) && (m_upperBound == other.m_upperBound);
}
static int64_t SymbolicToRealValue(SymbolicIntegerValue value);
static SymbolicIntegerValue LowerBoundForType(var_types type);
static SymbolicIntegerValue UpperBoundForType(var_types type);
static IntegralRange ForType(var_types type)
{
return {LowerBoundForType(type), UpperBoundForType(type)};
}
static IntegralRange ForNode(GenTree* node, Compiler* compiler);
static IntegralRange ForCastInput(GenTreeCast* cast);
static IntegralRange ForCastOutput(GenTreeCast* cast);
#ifdef DEBUG
static void Print(IntegralRange range);
#endif // DEBUG
};
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX TempsInfo XX
XX XX
XX The temporary lclVars allocated by the compiler for code generation XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
/*****************************************************************************
*
* The following keeps track of temporaries allocated in the stack frame
* during code-generation (after register allocation). These spill-temps are
* only used if we run out of registers while evaluating a tree.
*
* These are different from the more common temps allocated by lvaGrabTemp().
*/
class TempDsc
{
public:
TempDsc* tdNext;
private:
int tdOffs;
#ifdef DEBUG
static const int BAD_TEMP_OFFSET = 0xDDDDDDDD; // used as a sentinel "bad value" for tdOffs in DEBUG
#endif // DEBUG
int tdNum;
BYTE tdSize;
var_types tdType;
public:
TempDsc(int _tdNum, unsigned _tdSize, var_types _tdType) : tdNum(_tdNum), tdSize((BYTE)_tdSize), tdType(_tdType)
{
#ifdef DEBUG
// temps must have a negative number (so they have a different number from all local variables)
assert(tdNum < 0);
tdOffs = BAD_TEMP_OFFSET;
#endif // DEBUG
if (tdNum != _tdNum)
{
IMPL_LIMITATION("too many spill temps");
}
}
#ifdef DEBUG
bool tdLegalOffset() const
{
return tdOffs != BAD_TEMP_OFFSET;
}
#endif // DEBUG
int tdTempOffs() const
{
assert(tdLegalOffset());
return tdOffs;
}
void tdSetTempOffs(int offs)
{
tdOffs = offs;
assert(tdLegalOffset());
}
void tdAdjustTempOffs(int offs)
{
tdOffs += offs;
assert(tdLegalOffset());
}
int tdTempNum() const
{
assert(tdNum < 0);
return tdNum;
}
unsigned tdTempSize() const
{
return tdSize;
}
var_types tdTempType() const
{
return tdType;
}
};
// interface to hide linearscan implementation from rest of compiler
class LinearScanInterface
{
public:
virtual void doLinearScan() = 0;
virtual void recordVarLocationsAtStartOfBB(BasicBlock* bb) = 0;
virtual bool willEnregisterLocalVars() const = 0;
#if TRACK_LSRA_STATS
virtual void dumpLsraStatsCsv(FILE* file) = 0;
virtual void dumpLsraStatsSummary(FILE* file) = 0;
#endif // TRACK_LSRA_STATS
};
LinearScanInterface* getLinearScanAllocator(Compiler* comp);
// This enumeration names the phases into which we divide compilation. The phases should completely
// partition a compilation.
enum Phases
{
#define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) enum_nm,
#include "compphases.h"
PHASE_NUMBER_OF
};
extern const char* PhaseNames[];
extern const char* PhaseEnums[];
extern const LPCWSTR PhaseShortNames[];
// Specify which checks should be run after each phase
//
enum class PhaseChecks
{
CHECK_NONE,
CHECK_ALL
};
// Specify compiler data that a phase might modify
enum class PhaseStatus : unsigned
{
MODIFIED_NOTHING,
MODIFIED_EVERYTHING
};
// The following enum provides a simple 1:1 mapping to CLR API's
enum API_ICorJitInfo_Names
{
#define DEF_CLR_API(name) API_##name,
#include "ICorJitInfo_API_names.h"
API_COUNT
};
//---------------------------------------------------------------
// Compilation time.
//
// A "CompTimeInfo" is a structure for tracking the compilation time of one or more methods.
// We divide a compilation into a sequence of contiguous phases, and track the total (per-thread) cycles
// of the compilation, as well as the cycles for each phase. We also track the number of bytecodes.
// If there is a failure in reading a timer at any point, the "CompTimeInfo" becomes invalid, as indicated
// by "m_timerFailure" being true.
// If FEATURE_JIT_METHOD_PERF is not set, we define a minimal form of this, enough to let other code compile.
struct CompTimeInfo
{
#ifdef FEATURE_JIT_METHOD_PERF
// The string names of the phases.
static const char* PhaseNames[];
static bool PhaseHasChildren[];
static int PhaseParent[];
static bool PhaseReportsIRSize[];
unsigned m_byteCodeBytes;
unsigned __int64 m_totalCycles;
unsigned __int64 m_invokesByPhase[PHASE_NUMBER_OF];
unsigned __int64 m_cyclesByPhase[PHASE_NUMBER_OF];
#if MEASURE_CLRAPI_CALLS
unsigned __int64 m_CLRinvokesByPhase[PHASE_NUMBER_OF];
unsigned __int64 m_CLRcyclesByPhase[PHASE_NUMBER_OF];
#endif
unsigned m_nodeCountAfterPhase[PHASE_NUMBER_OF];
// For better documentation, we call EndPhase on
// non-leaf phases. We should also call EndPhase on the
// last leaf subphase; obviously, the elapsed cycles between the EndPhase
// for the last leaf subphase and the EndPhase for an ancestor should be very small.
// We add all such "redundant end phase" intervals to this variable below; we print
// it out in a report, so we can verify that it is, indeed, very small. If it ever
// isn't, this means that we're doing something significant between the end of the last
// declared subphase and the end of its parent.
unsigned __int64 m_parentPhaseEndSlop;
bool m_timerFailure;
#if MEASURE_CLRAPI_CALLS
// The following measures the time spent inside each individual CLR API call.
unsigned m_allClrAPIcalls;
unsigned m_perClrAPIcalls[API_ICorJitInfo_Names::API_COUNT];
unsigned __int64 m_allClrAPIcycles;
unsigned __int64 m_perClrAPIcycles[API_ICorJitInfo_Names::API_COUNT];
unsigned __int32 m_maxClrAPIcycles[API_ICorJitInfo_Names::API_COUNT];
#endif // MEASURE_CLRAPI_CALLS
CompTimeInfo(unsigned byteCodeBytes);
#endif
};
#ifdef FEATURE_JIT_METHOD_PERF
#if MEASURE_CLRAPI_CALLS
struct WrapICorJitInfo;
#endif
// This class summarizes the JIT time information over the course of a run: the number of methods compiled,
// and the total and maximum timings. (These are instances of the "CompTimeInfo" type described above).
// The operation of adding a single method's timing to the summary may be performed concurrently by several
// threads, so it is protected by a lock.
// This class is intended to be used as a singleton type, with only a single instance.
class CompTimeSummaryInfo
{
// This lock protects the fields of all CompTimeSummaryInfo(s) (of which we expect there to be one).
static CritSecObject s_compTimeSummaryLock;
int m_numMethods;
int m_totMethods;
CompTimeInfo m_total;
CompTimeInfo m_maximum;
int m_numFilteredMethods;
CompTimeInfo m_filtered;
// This can use what ever data you want to determine if the value to be added
// belongs in the filtered section (it's always included in the unfiltered section)
bool IncludedInFilteredData(CompTimeInfo& info);
public:
// This is the unique CompTimeSummaryInfo object for this instance of the runtime.
static CompTimeSummaryInfo s_compTimeSummary;
CompTimeSummaryInfo()
: m_numMethods(0), m_totMethods(0), m_total(0), m_maximum(0), m_numFilteredMethods(0), m_filtered(0)
{
}
// Assumes that "info" is a completed CompTimeInfo for a compilation; adds it to the summary.
// This is thread safe.
void AddInfo(CompTimeInfo& info, bool includePhases);
// Print the summary information to "f".
// This is not thread-safe; assumed to be called by only one thread.
void Print(FILE* f);
};
// A JitTimer encapsulates a CompTimeInfo for a single compilation. It also tracks the start of compilation,
// and when the current phase started. This is intended to be part of a Compilation object.
//
class JitTimer
{
unsigned __int64 m_start; // Start of the compilation.
unsigned __int64 m_curPhaseStart; // Start of the current phase.
#if MEASURE_CLRAPI_CALLS
unsigned __int64 m_CLRcallStart; // Start of the current CLR API call (if any).
unsigned __int64 m_CLRcallInvokes; // CLR API invokes under current outer so far
unsigned __int64 m_CLRcallCycles; // CLR API cycles under current outer so far.
int m_CLRcallAPInum; // The enum/index of the current CLR API call (or -1).
static double s_cyclesPerSec; // Cached for speedier measurements
#endif
#ifdef DEBUG
Phases m_lastPhase; // The last phase that was completed (or (Phases)-1 to start).
#endif
CompTimeInfo m_info; // The CompTimeInfo for this compilation.
static CritSecObject s_csvLock; // Lock to protect the time log file.
static FILE* s_csvFile; // The time log file handle.
void PrintCsvMethodStats(Compiler* comp);
private:
void* operator new(size_t);
void* operator new[](size_t);
void operator delete(void*);
void operator delete[](void*);
public:
// Initialized the timer instance
JitTimer(unsigned byteCodeSize);
static JitTimer* Create(Compiler* comp, unsigned byteCodeSize)
{
return ::new (comp, CMK_Unknown) JitTimer(byteCodeSize);
}
static void PrintCsvHeader();
// Ends the current phase (argument is for a redundant check).
void EndPhase(Compiler* compiler, Phases phase);
#if MEASURE_CLRAPI_CALLS
// Start and end a timed CLR API call.
void CLRApiCallEnter(unsigned apix);
void CLRApiCallLeave(unsigned apix);
#endif // MEASURE_CLRAPI_CALLS
// Completes the timing of the current method, which is assumed to have "byteCodeBytes" bytes of bytecode,
// and adds it to "sum".
void Terminate(Compiler* comp, CompTimeSummaryInfo& sum, bool includePhases);
// Attempts to query the cycle counter of the current thread. If successful, returns "true" and sets
// *cycles to the cycle counter value. Otherwise, returns false and sets the "m_timerFailure" flag of
// "m_info" to true.
bool GetThreadCycles(unsigned __int64* cycles)
{
bool res = CycleTimer::GetThreadCyclesS(cycles);
if (!res)
{
m_info.m_timerFailure = true;
}
return res;
}
static void Shutdown();
};
#endif // FEATURE_JIT_METHOD_PERF
//------------------- Function/Funclet info -------------------------------
enum FuncKind : BYTE
{
FUNC_ROOT, // The main/root function (always id==0)
FUNC_HANDLER, // a funclet associated with an EH handler (finally, fault, catch, filter handler)
FUNC_FILTER, // a funclet associated with an EH filter
FUNC_COUNT
};
class emitLocation;
struct FuncInfoDsc
{
FuncKind funKind;
BYTE funFlags; // Currently unused, just here for padding
unsigned short funEHIndex; // index, into the ebd table, of innermost EH clause corresponding to this
// funclet. It is only valid if funKind field indicates this is a
// EH-related funclet: FUNC_HANDLER or FUNC_FILTER
#if defined(TARGET_AMD64)
// TODO-AMD64-Throughput: make the AMD64 info more like the ARM info to avoid having this large static array.
emitLocation* startLoc;
emitLocation* endLoc;
emitLocation* coldStartLoc; // locations for the cold section, if there is one.
emitLocation* coldEndLoc;
UNWIND_INFO unwindHeader;
// Maximum of 255 UNWIND_CODE 'nodes' and then the unwind header. If there are an odd
// number of codes, the VM or Zapper will 4-byte align the whole thing.
BYTE unwindCodes[offsetof(UNWIND_INFO, UnwindCode) + (0xFF * sizeof(UNWIND_CODE))];
unsigned unwindCodeSlot;
#elif defined(TARGET_X86)
emitLocation* startLoc;
emitLocation* endLoc;
emitLocation* coldStartLoc; // locations for the cold section, if there is one.
emitLocation* coldEndLoc;
#elif defined(TARGET_ARMARCH)
UnwindInfo uwi; // Unwind information for this function/funclet's hot section
UnwindInfo* uwiCold; // Unwind information for this function/funclet's cold section
// Note: we only have a pointer here instead of the actual object,
// to save memory in the JIT case (compared to the NGEN case),
// where we don't have any cold section.
// Note 2: we currently don't support hot/cold splitting in functions
// with EH, so uwiCold will be NULL for all funclets.
emitLocation* startLoc;
emitLocation* endLoc;
emitLocation* coldStartLoc; // locations for the cold section, if there is one.
emitLocation* coldEndLoc;
#endif // TARGET_ARMARCH
#if defined(FEATURE_CFI_SUPPORT)
jitstd::vector<CFI_CODE>* cfiCodes;
#endif // FEATURE_CFI_SUPPORT
// Eventually we may want to move rsModifiedRegsMask, lvaOutgoingArgSize, and anything else
// that isn't shared between the main function body and funclets.
};
struct fgArgTabEntry
{
GenTreeCall::Use* use; // Points to the argument's GenTreeCall::Use in gtCallArgs or gtCallThisArg.
GenTreeCall::Use* lateUse; // Points to the argument's GenTreeCall::Use in gtCallLateArgs, if any.
// Get the node that coresponds to this argument entry.
// This is the "real" node and not a placeholder or setup node.
GenTree* GetNode() const
{
return lateUse == nullptr ? use->GetNode() : lateUse->GetNode();
}
unsigned argNum; // The original argument number, also specifies the required argument evaluation order from the IL
private:
regNumberSmall regNums[MAX_ARG_REG_COUNT]; // The registers to use when passing this argument, set to REG_STK for
// arguments passed on the stack
public:
unsigned numRegs; // Count of number of registers that this argument uses.
// Note that on ARM, if we have a double hfa, this reflects the number
// of DOUBLE registers.
#if defined(UNIX_AMD64_ABI)
// Unix amd64 will split floating point types and integer types in structs
// between floating point and general purpose registers. Keep track of that
// information so we do not need to recompute it later.
unsigned structIntRegs;
unsigned structFloatRegs;
#endif // UNIX_AMD64_ABI
#if defined(DEBUG_ARG_SLOTS)
// These fields were used to calculate stack size in stack slots for arguments
// but now they are replaced by precise `m_byteOffset/m_byteSize` because of
// arm64 apple abi requirements.
// A slot is a pointer sized region in the OutArg area.
unsigned slotNum; // When an argument is passed in the OutArg area this is the slot number in the OutArg area
unsigned numSlots; // Count of number of slots that this argument uses
#endif // DEBUG_ARG_SLOTS
// Return number of stack slots that this argument is taking.
// TODO-Cleanup: this function does not align with arm64 apple model,
// delete it. In most cases we just want to know if we it is using stack or not
// but in some cases we are checking if it is a multireg arg, like:
// `numRegs + GetStackSlotsNumber() > 1` that is harder to replace.
//
unsigned GetStackSlotsNumber() const
{
return roundUp(GetStackByteSize(), TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE;
}
private:
unsigned _lateArgInx; // index into gtCallLateArgs list; UINT_MAX if this is not a late arg.
public:
unsigned tmpNum; // the LclVar number if we had to force evaluation of this arg
var_types argType; // The type used to pass this argument. This is generally the original argument type, but when a
// struct is passed as a scalar type, this is that type.
// Note that if a struct is passed by reference, this will still be the struct type.
bool needTmp : 1; // True when we force this argument's evaluation into a temp LclVar
bool needPlace : 1; // True when we must replace this argument with a placeholder node
bool isTmp : 1; // True when we setup a temp LclVar for this argument due to size issues with the struct
bool processed : 1; // True when we have decided the evaluation order for this argument in the gtCallLateArgs
bool isBackFilled : 1; // True when the argument fills a register slot skipped due to alignment requirements of
// previous arguments.
NonStandardArgKind nonStandardArgKind : 4; // The non-standard arg kind. Non-standard args are args that are forced
// to be in certain registers or on the stack, regardless of where they
// appear in the arg list.
bool isStruct : 1; // True if this is a struct arg
bool _isVararg : 1; // True if the argument is in a vararg context.
bool passedByRef : 1; // True iff the argument is passed by reference.
#if FEATURE_ARG_SPLIT
bool _isSplit : 1; // True when this argument is split between the registers and OutArg area
#endif // FEATURE_ARG_SPLIT
#ifdef FEATURE_HFA_FIELDS_PRESENT
CorInfoHFAElemType _hfaElemKind : 3; // What kind of an HFA this is (CORINFO_HFA_ELEM_NONE if it is not an HFA).
#endif
CorInfoHFAElemType GetHfaElemKind() const
{
#ifdef FEATURE_HFA_FIELDS_PRESENT
return _hfaElemKind;
#else
NOWAY_MSG("GetHfaElemKind");
return CORINFO_HFA_ELEM_NONE;
#endif
}
void SetHfaElemKind(CorInfoHFAElemType elemKind)
{
#ifdef FEATURE_HFA_FIELDS_PRESENT
_hfaElemKind = elemKind;
#else
NOWAY_MSG("SetHfaElemKind");
#endif
}
bool isNonStandard() const
{
return nonStandardArgKind != NonStandardArgKind::None;
}
// Returns true if the IR node for this non-standarg arg is added by fgInitArgInfo.
// In this case, it must be removed by GenTreeCall::ResetArgInfo.
bool isNonStandardArgAddedLate() const
{
switch (static_cast<NonStandardArgKind>(nonStandardArgKind))
{
case NonStandardArgKind::None:
case NonStandardArgKind::PInvokeFrame:
case NonStandardArgKind::ShiftLow:
case NonStandardArgKind::ShiftHigh:
case NonStandardArgKind::FixedRetBuffer:
case NonStandardArgKind::ValidateIndirectCallTarget:
return false;
case NonStandardArgKind::WrapperDelegateCell:
case NonStandardArgKind::VirtualStubCell:
case NonStandardArgKind::PInvokeCookie:
case NonStandardArgKind::PInvokeTarget:
case NonStandardArgKind::R2RIndirectionCell:
return true;
default:
unreached();
}
}
bool isLateArg() const
{
bool isLate = (_lateArgInx != UINT_MAX);
return isLate;
}
unsigned GetLateArgInx() const
{
assert(isLateArg());
return _lateArgInx;
}
void SetLateArgInx(unsigned inx)
{
_lateArgInx = inx;
}
regNumber GetRegNum() const
{
return (regNumber)regNums[0];
}
regNumber GetOtherRegNum() const
{
return (regNumber)regNums[1];
}
#if defined(UNIX_AMD64_ABI)
SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc;
#endif
void setRegNum(unsigned int i, regNumber regNum)
{
assert(i < MAX_ARG_REG_COUNT);
regNums[i] = (regNumberSmall)regNum;
}
regNumber GetRegNum(unsigned int i)
{
assert(i < MAX_ARG_REG_COUNT);
return (regNumber)regNums[i];
}
bool IsSplit() const
{
#if FEATURE_ARG_SPLIT
return compFeatureArgSplit() && _isSplit;
#else // FEATURE_ARG_SPLIT
return false;
#endif
}
void SetSplit(bool value)
{
#if FEATURE_ARG_SPLIT
_isSplit = value;
#endif
}
bool IsVararg() const
{
return compFeatureVarArg() && _isVararg;
}
void SetIsVararg(bool value)
{
if (compFeatureVarArg())
{
_isVararg = value;
}
}
bool IsHfaArg() const
{
if (GlobalJitOptions::compFeatureHfa)
{
return IsHfa(GetHfaElemKind());
}
else
{
return false;
}
}
bool IsHfaRegArg() const
{
if (GlobalJitOptions::compFeatureHfa)
{
return IsHfa(GetHfaElemKind()) && isPassedInRegisters();
}
else
{
return false;
}
}
unsigned intRegCount() const
{
#if defined(UNIX_AMD64_ABI)
if (this->isStruct)
{
return this->structIntRegs;
}
#endif // defined(UNIX_AMD64_ABI)
if (!this->isPassedInFloatRegisters())
{
return this->numRegs;
}
return 0;
}
unsigned floatRegCount() const
{
#if defined(UNIX_AMD64_ABI)
if (this->isStruct)
{
return this->structFloatRegs;
}
#endif // defined(UNIX_AMD64_ABI)
if (this->isPassedInFloatRegisters())
{
return this->numRegs;
}
return 0;
}
// Get the number of bytes that this argument is occupying on the stack,
// including padding up to the target pointer size for platforms
// where a stack argument can't take less.
unsigned GetStackByteSize() const
{
if (!IsSplit() && numRegs > 0)
{
return 0;
}
assert(!IsHfaArg() || !IsSplit());
assert(GetByteSize() > TARGET_POINTER_SIZE * numRegs);
const unsigned stackByteSize = GetByteSize() - TARGET_POINTER_SIZE * numRegs;
return stackByteSize;
}
var_types GetHfaType() const
{
if (GlobalJitOptions::compFeatureHfa)
{
return HfaTypeFromElemKind(GetHfaElemKind());
}
else
{
return TYP_UNDEF;
}
}
void SetHfaType(var_types type, unsigned hfaSlots)
{
if (GlobalJitOptions::compFeatureHfa)
{
if (type != TYP_UNDEF)
{
// We must already have set the passing mode.
assert(numRegs != 0 || GetStackByteSize() != 0);
// We originally set numRegs according to the size of the struct, but if the size of the
// hfaType is not the same as the pointer size, we need to correct it.
// Note that hfaSlots is the number of registers we will use. For ARM, that is twice
// the number of "double registers".
unsigned numHfaRegs = hfaSlots;
#ifdef TARGET_ARM
if (type == TYP_DOUBLE)
{
// Must be an even number of registers.
assert((numRegs & 1) == 0);
numHfaRegs = hfaSlots / 2;
}
#endif // TARGET_ARM
if (!IsHfaArg())
{
// We haven't previously set this; do so now.
CorInfoHFAElemType elemKind = HfaElemKindFromType(type);
SetHfaElemKind(elemKind);
// Ensure we've allocated enough bits.
assert(GetHfaElemKind() == elemKind);
if (isPassedInRegisters())
{
numRegs = numHfaRegs;
}
}
else
{
// We've already set this; ensure that it's consistent.
if (isPassedInRegisters())
{
assert(numRegs == numHfaRegs);
}
assert(type == HfaTypeFromElemKind(GetHfaElemKind()));
}
}
}
}
#ifdef TARGET_ARM
void SetIsBackFilled(bool backFilled)
{
isBackFilled = backFilled;
}
bool IsBackFilled() const
{
return isBackFilled;
}
#else // !TARGET_ARM
void SetIsBackFilled(bool backFilled)
{
}
bool IsBackFilled() const
{
return false;
}
#endif // !TARGET_ARM
bool isPassedInRegisters() const
{
return !IsSplit() && (numRegs != 0);
}
bool isPassedInFloatRegisters() const
{
#ifdef TARGET_X86
return false;
#else
return isValidFloatArgReg(GetRegNum());
#endif
}
// Can we replace the struct type of this node with a primitive type for argument passing?
bool TryPassAsPrimitive() const
{
return !IsSplit() && ((numRegs == 1) || (m_byteSize <= TARGET_POINTER_SIZE));
}
#if defined(DEBUG_ARG_SLOTS)
// Returns the number of "slots" used, where for this purpose a
// register counts as a slot.
unsigned getSlotCount() const
{
if (isBackFilled)
{
assert(isPassedInRegisters());
assert(numRegs == 1);
}
else if (GetRegNum() == REG_STK)
{
assert(!isPassedInRegisters());
assert(numRegs == 0);
}
else
{
assert(numRegs > 0);
}
return numSlots + numRegs;
}
#endif
#if defined(DEBUG_ARG_SLOTS)
// Returns the size as a multiple of pointer-size.
// For targets without HFAs, this is the same as getSlotCount().
unsigned getSize() const
{
unsigned size = getSlotCount();
if (GlobalJitOptions::compFeatureHfa)
{
if (IsHfaRegArg())
{
#ifdef TARGET_ARM
// We counted the number of regs, but if they are DOUBLE hfa regs we have to double the size.
if (GetHfaType() == TYP_DOUBLE)
{
assert(!IsSplit());
size <<= 1;
}
#elif defined(TARGET_ARM64)
// We counted the number of regs, but if they are FLOAT hfa regs we have to halve the size,
// or if they are SIMD16 vector hfa regs we have to double the size.
if (GetHfaType() == TYP_FLOAT)
{
// Round up in case of odd HFA count.
size = (size + 1) >> 1;
}
#ifdef FEATURE_SIMD
else if (GetHfaType() == TYP_SIMD16)
{
size <<= 1;
}
#endif // FEATURE_SIMD
#endif // TARGET_ARM64
}
}
return size;
}
#endif // DEBUG_ARG_SLOTS
private:
unsigned m_byteOffset;
// byte size that this argument takes including the padding after.
// For example, 1-byte arg on x64 with 8-byte alignment
// will have `m_byteSize == 8`, the same arg on apple arm64 will have `m_byteSize == 1`.
unsigned m_byteSize;
unsigned m_byteAlignment; // usually 4 or 8 bytes (slots/registers).
public:
void SetByteOffset(unsigned byteOffset)
{
DEBUG_ARG_SLOTS_ASSERT(byteOffset / TARGET_POINTER_SIZE == slotNum);
m_byteOffset = byteOffset;
}
unsigned GetByteOffset() const
{
DEBUG_ARG_SLOTS_ASSERT(m_byteOffset / TARGET_POINTER_SIZE == slotNum);
return m_byteOffset;
}
void SetByteSize(unsigned byteSize, bool isStruct, bool isFloatHfa)
{
unsigned roundedByteSize;
if (compMacOsArm64Abi())
{
// Only struct types need extension or rounding to pointer size, but HFA<float> does not.
if (isStruct && !isFloatHfa)
{
roundedByteSize = roundUp(byteSize, TARGET_POINTER_SIZE);
}
else
{
roundedByteSize = byteSize;
}
}
else
{
roundedByteSize = roundUp(byteSize, TARGET_POINTER_SIZE);
}
#if !defined(TARGET_ARM)
// Arm32 could have a struct with 8 byte alignment
// which rounded size % 8 is not 0.
assert(m_byteAlignment != 0);
assert(roundedByteSize % m_byteAlignment == 0);
#endif // TARGET_ARM
#if defined(DEBUG_ARG_SLOTS)
if (!compMacOsArm64Abi() && !isStruct)
{
assert(roundedByteSize == getSlotCount() * TARGET_POINTER_SIZE);
}
#endif
m_byteSize = roundedByteSize;
}
unsigned GetByteSize() const
{
return m_byteSize;
}
void SetByteAlignment(unsigned byteAlignment)
{
m_byteAlignment = byteAlignment;
}
unsigned GetByteAlignment() const
{
return m_byteAlignment;
}
// Set the register numbers for a multireg argument.
// There's nothing to do on x64/Ux because the structDesc has already been used to set the
// register numbers.
void SetMultiRegNums()
{
#if FEATURE_MULTIREG_ARGS && !defined(UNIX_AMD64_ABI)
if (numRegs == 1)
{
return;
}
regNumber argReg = GetRegNum(0);
#ifdef TARGET_ARM
unsigned int regSize = (GetHfaType() == TYP_DOUBLE) ? 2 : 1;
#else
unsigned int regSize = 1;
#endif
if (numRegs > MAX_ARG_REG_COUNT)
NO_WAY("Multireg argument exceeds the maximum length");
for (unsigned int regIndex = 1; regIndex < numRegs; regIndex++)
{
argReg = (regNumber)(argReg + regSize);
setRegNum(regIndex, argReg);
}
#endif // FEATURE_MULTIREG_ARGS && !defined(UNIX_AMD64_ABI)
}
#ifdef DEBUG
// Check that the value of 'isStruct' is consistent.
// A struct arg must be one of the following:
// - A node of struct type,
// - A GT_FIELD_LIST, or
// - A node of a scalar type, passed in a single register or slot
// (or two slots in the case of a struct pass on the stack as TYP_DOUBLE).
//
void checkIsStruct() const
{
GenTree* node = GetNode();
if (isStruct)
{
if (!varTypeIsStruct(node) && !node->OperIs(GT_FIELD_LIST))
{
// This is the case where we are passing a struct as a primitive type.
// On most targets, this is always a single register or slot.
// However, on ARM this could be two slots if it is TYP_DOUBLE.
bool isPassedAsPrimitiveType =
((numRegs == 1) || ((numRegs == 0) && (GetByteSize() <= TARGET_POINTER_SIZE)));
#ifdef TARGET_ARM
if (!isPassedAsPrimitiveType)
{
if (node->TypeGet() == TYP_DOUBLE && numRegs == 0 && (numSlots == 2))
{
isPassedAsPrimitiveType = true;
}
}
#endif // TARGET_ARM
assert(isPassedAsPrimitiveType);
}
}
else
{
assert(!varTypeIsStruct(node));
}
}
void Dump() const;
#endif
};
//-------------------------------------------------------------------------
//
// The class fgArgInfo is used to handle the arguments
// when morphing a GT_CALL node.
//
class fgArgInfo
{
Compiler* compiler; // Back pointer to the compiler instance so that we can allocate memory
GenTreeCall* callTree; // Back pointer to the GT_CALL node for this fgArgInfo
unsigned argCount; // Updatable arg count value
#if defined(DEBUG_ARG_SLOTS)
unsigned nextSlotNum; // Updatable slot count value
#endif
unsigned nextStackByteOffset;
unsigned stkLevel; // Stack depth when we make this call (for x86)
#if defined(UNIX_X86_ABI)
bool alignmentDone; // Updateable flag, set to 'true' after we've done any required alignment.
unsigned stkSizeBytes; // Size of stack used by this call, in bytes. Calculated during fgMorphArgs().
unsigned padStkAlign; // Stack alignment in bytes required before arguments are pushed for this call.
// Computed dynamically during codegen, based on stkSizeBytes and the current
// stack level (genStackLevel) when the first stack adjustment is made for
// this call.
#endif
#if FEATURE_FIXED_OUT_ARGS
unsigned outArgSize; // Size of the out arg area for the call, will be at least MIN_ARG_AREA_FOR_CALL
#endif
unsigned argTableSize; // size of argTable array (equal to the argCount when done with fgMorphArgs)
bool hasRegArgs; // true if we have one or more register arguments
bool hasStackArgs; // true if we have one or more stack arguments
bool argsComplete; // marker for state
bool argsSorted; // marker for state
bool needsTemps; // one or more arguments must be copied to a temp by EvalArgsToTemps
fgArgTabEntry** argTable; // variable sized array of per argument descrption: (i.e. argTable[argTableSize])
private:
void AddArg(fgArgTabEntry* curArgTabEntry);
public:
fgArgInfo(Compiler* comp, GenTreeCall* call, unsigned argCount);
fgArgInfo(GenTreeCall* newCall, GenTreeCall* oldCall);
fgArgTabEntry* AddRegArg(unsigned argNum,
GenTree* node,
GenTreeCall::Use* use,
regNumber regNum,
unsigned numRegs,
unsigned byteSize,
unsigned byteAlignment,
bool isStruct,
bool isFloatHfa,
bool isVararg = false);
#ifdef UNIX_AMD64_ABI
fgArgTabEntry* AddRegArg(unsigned argNum,
GenTree* node,
GenTreeCall::Use* use,
regNumber regNum,
unsigned numRegs,
unsigned byteSize,
unsigned byteAlignment,
const bool isStruct,
const bool isFloatHfa,
const bool isVararg,
const regNumber otherRegNum,
const unsigned structIntRegs,
const unsigned structFloatRegs,
const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* const structDescPtr = nullptr);
#endif // UNIX_AMD64_ABI
fgArgTabEntry* AddStkArg(unsigned argNum,
GenTree* node,
GenTreeCall::Use* use,
unsigned numSlots,
unsigned byteSize,
unsigned byteAlignment,
bool isStruct,
bool isFloatHfa,
bool isVararg = false);
void RemorphReset();
void UpdateRegArg(fgArgTabEntry* argEntry, GenTree* node, bool reMorphing);
void UpdateStkArg(fgArgTabEntry* argEntry, GenTree* node, bool reMorphing);
void SplitArg(unsigned argNum, unsigned numRegs, unsigned numSlots);
void EvalToTmp(fgArgTabEntry* curArgTabEntry, unsigned tmpNum, GenTree* newNode);
void ArgsComplete();
void SortArgs();
void EvalArgsToTemps();
unsigned ArgCount() const
{
return argCount;
}
fgArgTabEntry** ArgTable() const
{
return argTable;
}
#if defined(DEBUG_ARG_SLOTS)
unsigned GetNextSlotNum() const
{
return nextSlotNum;
}
#endif
unsigned GetNextSlotByteOffset() const
{
return nextStackByteOffset;
}
bool HasRegArgs() const
{
return hasRegArgs;
}
bool NeedsTemps() const
{
return needsTemps;
}
bool HasStackArgs() const
{
return hasStackArgs;
}
bool AreArgsComplete() const
{
return argsComplete;
}
#if FEATURE_FIXED_OUT_ARGS
unsigned GetOutArgSize() const
{
return outArgSize;
}
void SetOutArgSize(unsigned newVal)
{
outArgSize = newVal;
}
#endif // FEATURE_FIXED_OUT_ARGS
#if defined(UNIX_X86_ABI)
void ComputeStackAlignment(unsigned curStackLevelInBytes)
{
padStkAlign = AlignmentPad(curStackLevelInBytes, STACK_ALIGN);
}
unsigned GetStkAlign() const
{
return padStkAlign;
}
void SetStkSizeBytes(unsigned newStkSizeBytes)
{
stkSizeBytes = newStkSizeBytes;
}
unsigned GetStkSizeBytes() const
{
return stkSizeBytes;
}
bool IsStkAlignmentDone() const
{
return alignmentDone;
}
void SetStkAlignmentDone()
{
alignmentDone = true;
}
#endif // defined(UNIX_X86_ABI)
// Get the fgArgTabEntry for the arg at position argNum.
fgArgTabEntry* GetArgEntry(unsigned argNum, bool reMorphing = true) const
{
fgArgTabEntry* curArgTabEntry = nullptr;
if (!reMorphing)
{
// The arg table has not yet been sorted.
curArgTabEntry = argTable[argNum];
assert(curArgTabEntry->argNum == argNum);
return curArgTabEntry;
}
for (unsigned i = 0; i < argCount; i++)
{
curArgTabEntry = argTable[i];
if (curArgTabEntry->argNum == argNum)
{
return curArgTabEntry;
}
}
noway_assert(!"GetArgEntry: argNum not found");
return nullptr;
}
void SetNeedsTemps()
{
needsTemps = true;
}
// Get the node for the arg at position argIndex.
// Caller must ensure that this index is a valid arg index.
GenTree* GetArgNode(unsigned argIndex) const
{
return GetArgEntry(argIndex)->GetNode();
}
void Dump(Compiler* compiler) const;
};
#ifdef DEBUG
// XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
// We have the ability to mark source expressions with "Test Labels."
// These drive assertions within the JIT, or internal JIT testing. For example, we could label expressions
// that should be CSE defs, and other expressions that should uses of those defs, with a shared label.
enum TestLabel // This must be kept identical to System.Runtime.CompilerServices.JitTestLabel.TestLabel.
{
TL_SsaName,
TL_VN, // Defines a "VN equivalence class". (For full VN, including exceptions thrown).
TL_VNNorm, // Like above, but uses the non-exceptional value of the expression.
TL_CSE_Def, // This must be identified in the JIT as a CSE def
TL_CSE_Use, // This must be identified in the JIT as a CSE use
TL_LoopHoist, // Expression must (or must not) be hoisted out of the loop.
};
struct TestLabelAndNum
{
TestLabel m_tl;
ssize_t m_num;
TestLabelAndNum() : m_tl(TestLabel(0)), m_num(0)
{
}
};
typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, TestLabelAndNum> NodeToTestDataMap;
// XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#endif // DEBUG
//-------------------------------------------------------------------------
// LoopFlags: flags for the loop table.
//
enum LoopFlags : unsigned short
{
LPFLG_EMPTY = 0,
// LPFLG_UNUSED = 0x0001,
// LPFLG_UNUSED = 0x0002,
LPFLG_ITER = 0x0004, // loop of form: for (i = icon or expression; test_condition(); i++)
// LPFLG_UNUSED = 0x0008,
LPFLG_CONTAINS_CALL = 0x0010, // If executing the loop body *may* execute a call
// LPFLG_UNUSED = 0x0020,
LPFLG_CONST_INIT = 0x0040, // iterator is initialized with a constant (found in lpConstInit)
LPFLG_SIMD_LIMIT = 0x0080, // iterator is compared with vector element count (found in lpConstLimit)
LPFLG_VAR_LIMIT = 0x0100, // iterator is compared with a local var (var # found in lpVarLimit)
LPFLG_CONST_LIMIT = 0x0200, // iterator is compared with a constant (found in lpConstLimit)
LPFLG_ARRLEN_LIMIT = 0x0400, // iterator is compared with a.len or a[i].len (found in lpArrLenLimit)
LPFLG_HAS_PREHEAD = 0x0800, // lpHead is known to be a preHead for this loop
LPFLG_REMOVED = 0x1000, // has been removed from the loop table (unrolled or optimized away)
LPFLG_DONT_UNROLL = 0x2000, // do not unroll this loop
LPFLG_ASGVARS_YES = 0x4000, // "lpAsgVars" has been computed
LPFLG_ASGVARS_INC = 0x8000, // "lpAsgVars" is incomplete -- vars beyond those representable in an AllVarSet
// type are assigned to.
};
inline constexpr LoopFlags operator~(LoopFlags a)
{
return (LoopFlags)(~(unsigned short)a);
}
inline constexpr LoopFlags operator|(LoopFlags a, LoopFlags b)
{
return (LoopFlags)((unsigned short)a | (unsigned short)b);
}
inline constexpr LoopFlags operator&(LoopFlags a, LoopFlags b)
{
return (LoopFlags)((unsigned short)a & (unsigned short)b);
}
inline LoopFlags& operator|=(LoopFlags& a, LoopFlags b)
{
return a = (LoopFlags)((unsigned short)a | (unsigned short)b);
}
inline LoopFlags& operator&=(LoopFlags& a, LoopFlags b)
{
return a = (LoopFlags)((unsigned short)a & (unsigned short)b);
}
// The following holds information about instr offsets in terms of generated code.
enum class IPmappingDscKind
{
Prolog, // The mapping represents the start of a prolog.
Epilog, // The mapping represents the start of an epilog.
NoMapping, // This does not map to any IL offset.
Normal, // The mapping maps to an IL offset.
};
struct IPmappingDsc
{
emitLocation ipmdNativeLoc; // the emitter location of the native code corresponding to the IL offset
IPmappingDscKind ipmdKind; // The kind of mapping
ILLocation ipmdLoc; // The location for normal mappings
bool ipmdIsLabel; // Can this code be a branch label?
};
struct PreciseIPMapping
{
emitLocation nativeLoc;
DebugInfo debugInfo;
};
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX The big guy. The sections are currently organized as : XX
XX XX
XX o GenTree and BasicBlock XX
XX o LclVarsInfo XX
XX o Importer XX
XX o FlowGraph XX
XX o Optimizer XX
XX o RegAlloc XX
XX o EEInterface XX
XX o TempsInfo XX
XX o RegSet XX
XX o GCInfo XX
XX o Instruction XX
XX o ScopeInfo XX
XX o PrologScopeInfo XX
XX o CodeGenerator XX
XX o UnwindInfo XX
XX o Compiler XX
XX o typeInfo XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
struct HWIntrinsicInfo;
class Compiler
{
friend class emitter;
friend class UnwindInfo;
friend class UnwindFragmentInfo;
friend class UnwindEpilogInfo;
friend class JitTimer;
friend class LinearScan;
friend class fgArgInfo;
friend class Rationalizer;
friend class Phase;
friend class Lowering;
friend class CSE_DataFlow;
friend class CSE_Heuristic;
friend class CodeGenInterface;
friend class CodeGen;
friend class LclVarDsc;
friend class TempDsc;
friend class LIR;
friend class ObjectAllocator;
friend class LocalAddressVisitor;
friend struct GenTree;
friend class MorphInitBlockHelper;
friend class MorphCopyBlockHelper;
#ifdef FEATURE_HW_INTRINSICS
friend struct HWIntrinsicInfo;
#endif // FEATURE_HW_INTRINSICS
#ifndef TARGET_64BIT
friend class DecomposeLongs;
#endif // !TARGET_64BIT
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Misc structs definitions XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
public:
hashBvGlobalData hbvGlobalData; // Used by the hashBv bitvector package.
#ifdef DEBUG
bool verbose;
bool verboseTrees;
bool shouldUseVerboseTrees();
bool asciiTrees; // If true, dump trees using only ASCII characters
bool shouldDumpASCIITrees();
bool verboseSsa; // If true, produce especially verbose dump output in SSA construction.
bool shouldUseVerboseSsa();
bool treesBeforeAfterMorph; // If true, print trees before/after morphing (paired by an intra-compilation id:
int morphNum; // This counts the the trees that have been morphed, allowing us to label each uniquely.
bool doExtraSuperPmiQueries;
void makeExtraStructQueries(CORINFO_CLASS_HANDLE structHandle, int level); // Make queries recursively 'level' deep.
const char* VarNameToStr(VarName name)
{
return name;
}
DWORD expensiveDebugCheckLevel;
#endif
#if FEATURE_MULTIREG_RET
GenTree* impAssignMultiRegTypeToVar(GenTree* op,
CORINFO_CLASS_HANDLE hClass DEBUGARG(CorInfoCallConvExtension callConv));
#endif // FEATURE_MULTIREG_RET
#ifdef TARGET_X86
bool isTrivialPointerSizedStruct(CORINFO_CLASS_HANDLE clsHnd) const;
#endif // TARGET_X86
//-------------------------------------------------------------------------
// Functions to handle homogeneous floating-point aggregates (HFAs) in ARM/ARM64.
// HFAs are one to four element structs where each element is the same
// type, either all float or all double. We handle HVAs (one to four elements of
// vector types) uniformly with HFAs. HFAs are treated specially
// in the ARM/ARM64 Procedure Call Standards, specifically, they are passed in
// floating-point registers instead of the general purpose registers.
//
bool IsHfa(CORINFO_CLASS_HANDLE hClass);
bool IsHfa(GenTree* tree);
var_types GetHfaType(GenTree* tree);
unsigned GetHfaCount(GenTree* tree);
var_types GetHfaType(CORINFO_CLASS_HANDLE hClass);
unsigned GetHfaCount(CORINFO_CLASS_HANDLE hClass);
bool IsMultiRegReturnedType(CORINFO_CLASS_HANDLE hClass, CorInfoCallConvExtension callConv);
//-------------------------------------------------------------------------
// The following is used for validating format of EH table
//
struct EHNodeDsc;
typedef struct EHNodeDsc* pEHNodeDsc;
EHNodeDsc* ehnTree; // root of the tree comprising the EHnodes.
EHNodeDsc* ehnNext; // root of the tree comprising the EHnodes.
struct EHNodeDsc
{
enum EHBlockType
{
TryNode,
FilterNode,
HandlerNode,
FinallyNode,
FaultNode
};
EHBlockType ehnBlockType; // kind of EH block
IL_OFFSET ehnStartOffset; // IL offset of start of the EH block
IL_OFFSET ehnEndOffset; // IL offset past end of the EH block. (TODO: looks like verInsertEhNode() sets this to
// the last IL offset, not "one past the last one", i.e., the range Start to End is
// inclusive).
pEHNodeDsc ehnNext; // next (non-nested) block in sequential order
pEHNodeDsc ehnChild; // leftmost nested block
union {
pEHNodeDsc ehnTryNode; // for filters and handlers, the corresponding try node
pEHNodeDsc ehnHandlerNode; // for a try node, the corresponding handler node
};
pEHNodeDsc ehnFilterNode; // if this is a try node and has a filter, otherwise 0
pEHNodeDsc ehnEquivalent; // if blockType=tryNode, start offset and end offset is same,
void ehnSetTryNodeType()
{
ehnBlockType = TryNode;
}
void ehnSetFilterNodeType()
{
ehnBlockType = FilterNode;
}
void ehnSetHandlerNodeType()
{
ehnBlockType = HandlerNode;
}
void ehnSetFinallyNodeType()
{
ehnBlockType = FinallyNode;
}
void ehnSetFaultNodeType()
{
ehnBlockType = FaultNode;
}
bool ehnIsTryBlock()
{
return ehnBlockType == TryNode;
}
bool ehnIsFilterBlock()
{
return ehnBlockType == FilterNode;
}
bool ehnIsHandlerBlock()
{
return ehnBlockType == HandlerNode;
}
bool ehnIsFinallyBlock()
{
return ehnBlockType == FinallyNode;
}
bool ehnIsFaultBlock()
{
return ehnBlockType == FaultNode;
}
// returns true if there is any overlap between the two nodes
static bool ehnIsOverlap(pEHNodeDsc node1, pEHNodeDsc node2)
{
if (node1->ehnStartOffset < node2->ehnStartOffset)
{
return (node1->ehnEndOffset >= node2->ehnStartOffset);
}
else
{
return (node1->ehnStartOffset <= node2->ehnEndOffset);
}
}
// fails with BADCODE if inner is not completely nested inside outer
static bool ehnIsNested(pEHNodeDsc inner, pEHNodeDsc outer)
{
return ((inner->ehnStartOffset >= outer->ehnStartOffset) && (inner->ehnEndOffset <= outer->ehnEndOffset));
}
};
//-------------------------------------------------------------------------
// Exception handling functions
//
#if !defined(FEATURE_EH_FUNCLETS)
bool ehNeedsShadowSPslots()
{
return (info.compXcptnsCount || opts.compDbgEnC);
}
// 0 for methods with no EH
// 1 for methods with non-nested EH, or where only the try blocks are nested
// 2 for a method with a catch within a catch
// etc.
unsigned ehMaxHndNestingCount;
#endif // !FEATURE_EH_FUNCLETS
static bool jitIsBetween(unsigned value, unsigned start, unsigned end);
static bool jitIsBetweenInclusive(unsigned value, unsigned start, unsigned end);
bool bbInCatchHandlerILRange(BasicBlock* blk);
bool bbInFilterILRange(BasicBlock* blk);
bool bbInTryRegions(unsigned regionIndex, BasicBlock* blk);
bool bbInExnFlowRegions(unsigned regionIndex, BasicBlock* blk);
bool bbInHandlerRegions(unsigned regionIndex, BasicBlock* blk);
bool bbInCatchHandlerRegions(BasicBlock* tryBlk, BasicBlock* hndBlk);
unsigned short bbFindInnermostCommonTryRegion(BasicBlock* bbOne, BasicBlock* bbTwo);
unsigned short bbFindInnermostTryRegionContainingHandlerRegion(unsigned handlerIndex);
unsigned short bbFindInnermostHandlerRegionContainingTryRegion(unsigned tryIndex);
// Returns true if "block" is the start of a try region.
bool bbIsTryBeg(BasicBlock* block);
// Returns true if "block" is the start of a handler or filter region.
bool bbIsHandlerBeg(BasicBlock* block);
// Returns true iff "block" is where control flows if an exception is raised in the
// try region, and sets "*regionIndex" to the index of the try for the handler.
// Differs from "IsHandlerBeg" in the case of filters, where this is true for the first
// block of the filter, but not for the filter's handler.
bool bbIsExFlowBlock(BasicBlock* block, unsigned* regionIndex);
bool ehHasCallableHandlers();
// Return the EH descriptor for the given region index.
EHblkDsc* ehGetDsc(unsigned regionIndex);
// Return the EH index given a region descriptor.
unsigned ehGetIndex(EHblkDsc* ehDsc);
// Return the EH descriptor index of the enclosing try, for the given region index.
unsigned ehGetEnclosingTryIndex(unsigned regionIndex);
// Return the EH descriptor index of the enclosing handler, for the given region index.
unsigned ehGetEnclosingHndIndex(unsigned regionIndex);
// Return the EH descriptor for the most nested 'try' region this BasicBlock is a member of (or nullptr if this
// block is not in a 'try' region).
EHblkDsc* ehGetBlockTryDsc(BasicBlock* block);
// Return the EH descriptor for the most nested filter or handler region this BasicBlock is a member of (or nullptr
// if this block is not in a filter or handler region).
EHblkDsc* ehGetBlockHndDsc(BasicBlock* block);
// Return the EH descriptor for the most nested region that may handle exceptions raised in this BasicBlock (or
// nullptr if this block's exceptions propagate to caller).
EHblkDsc* ehGetBlockExnFlowDsc(BasicBlock* block);
EHblkDsc* ehIsBlockTryLast(BasicBlock* block);
EHblkDsc* ehIsBlockHndLast(BasicBlock* block);
bool ehIsBlockEHLast(BasicBlock* block);
bool ehBlockHasExnFlowDsc(BasicBlock* block);
// Return the region index of the most nested EH region this block is in.
unsigned ehGetMostNestedRegionIndex(BasicBlock* block, bool* inTryRegion);
// Find the true enclosing try index, ignoring 'mutual protect' try. Uses IL ranges to check.
unsigned ehTrueEnclosingTryIndexIL(unsigned regionIndex);
// Return the index of the most nested enclosing region for a particular EH region. Returns NO_ENCLOSING_INDEX
// if there is no enclosing region. If the returned index is not NO_ENCLOSING_INDEX, then '*inTryRegion'
// is set to 'true' if the enclosing region is a 'try', or 'false' if the enclosing region is a handler.
// (It can never be a filter.)
unsigned ehGetEnclosingRegionIndex(unsigned regionIndex, bool* inTryRegion);
// A block has been deleted. Update the EH table appropriately.
void ehUpdateForDeletedBlock(BasicBlock* block);
// Determine whether a block can be deleted while preserving the EH normalization rules.
bool ehCanDeleteEmptyBlock(BasicBlock* block);
// Update the 'last' pointers in the EH table to reflect new or deleted blocks in an EH region.
void ehUpdateLastBlocks(BasicBlock* oldLast, BasicBlock* newLast);
// For a finally handler, find the region index that the BBJ_CALLFINALLY lives in that calls the handler,
// or NO_ENCLOSING_INDEX if the BBJ_CALLFINALLY lives in the main function body. Normally, the index
// is the same index as the handler (and the BBJ_CALLFINALLY lives in the 'try' region), but for AMD64 the
// BBJ_CALLFINALLY lives in the enclosing try or handler region, whichever is more nested, or the main function
// body. If the returned index is not NO_ENCLOSING_INDEX, then '*inTryRegion' is set to 'true' if the
// BBJ_CALLFINALLY lives in the returned index's 'try' region, or 'false' if lives in the handler region. (It never
// lives in a filter.)
unsigned ehGetCallFinallyRegionIndex(unsigned finallyIndex, bool* inTryRegion);
// Find the range of basic blocks in which all BBJ_CALLFINALLY will be found that target the 'finallyIndex' region's
// handler. Set begBlk to the first block, and endBlk to the block after the last block of the range
// (nullptr if the last block is the last block in the program).
// Precondition: 'finallyIndex' is the EH region of a try/finally clause.
void ehGetCallFinallyBlockRange(unsigned finallyIndex, BasicBlock** begBlk, BasicBlock** endBlk);
#ifdef DEBUG
// Given a BBJ_CALLFINALLY block and the EH region index of the finally it is calling, return
// 'true' if the BBJ_CALLFINALLY is in the correct EH region.
bool ehCallFinallyInCorrectRegion(BasicBlock* blockCallFinally, unsigned finallyIndex);
#endif // DEBUG
#if defined(FEATURE_EH_FUNCLETS)
// Do we need a PSPSym in the main function? For codegen purposes, we only need one
// if there is a filter that protects a region with a nested EH clause (such as a
// try/catch nested in the 'try' body of a try/filter/filter-handler). See
// genFuncletProlog() for more details. However, the VM seems to use it for more
// purposes, maybe including debugging. Until we are sure otherwise, always create
// a PSPSym for functions with any EH.
bool ehNeedsPSPSym() const
{
#ifdef TARGET_X86
return false;
#else // TARGET_X86
return compHndBBtabCount > 0;
#endif // TARGET_X86
}
bool ehAnyFunclets(); // Are there any funclets in this function?
unsigned ehFuncletCount(); // Return the count of funclets in the function
unsigned bbThrowIndex(BasicBlock* blk); // Get the index to use as the cache key for sharing throw blocks
#else // !FEATURE_EH_FUNCLETS
bool ehAnyFunclets()
{
return false;
}
unsigned ehFuncletCount()
{
return 0;
}
unsigned bbThrowIndex(BasicBlock* blk)
{
return blk->bbTryIndex;
} // Get the index to use as the cache key for sharing throw blocks
#endif // !FEATURE_EH_FUNCLETS
// Returns a flowList representing the "EH predecessors" of "blk". These are the normal predecessors of
// "blk", plus one special case: if "blk" is the first block of a handler, considers the predecessor(s) of the first
// first block of the corresponding try region to be "EH predecessors". (If there is a single such predecessor,
// for example, we want to consider that the immediate dominator of the catch clause start block, so it's
// convenient to also consider it a predecessor.)
flowList* BlockPredsWithEH(BasicBlock* blk);
// This table is useful for memoization of the method above.
typedef JitHashTable<BasicBlock*, JitPtrKeyFuncs<BasicBlock>, flowList*> BlockToFlowListMap;
BlockToFlowListMap* m_blockToEHPreds;
BlockToFlowListMap* GetBlockToEHPreds()
{
if (m_blockToEHPreds == nullptr)
{
m_blockToEHPreds = new (getAllocator()) BlockToFlowListMap(getAllocator());
}
return m_blockToEHPreds;
}
void* ehEmitCookie(BasicBlock* block);
UNATIVE_OFFSET ehCodeOffset(BasicBlock* block);
EHblkDsc* ehInitHndRange(BasicBlock* src, IL_OFFSET* hndBeg, IL_OFFSET* hndEnd, bool* inFilter);
EHblkDsc* ehInitTryRange(BasicBlock* src, IL_OFFSET* tryBeg, IL_OFFSET* tryEnd);
EHblkDsc* ehInitHndBlockRange(BasicBlock* blk, BasicBlock** hndBeg, BasicBlock** hndLast, bool* inFilter);
EHblkDsc* ehInitTryBlockRange(BasicBlock* blk, BasicBlock** tryBeg, BasicBlock** tryLast);
void fgSetTryBeg(EHblkDsc* handlerTab, BasicBlock* newTryBeg);
void fgSetTryEnd(EHblkDsc* handlerTab, BasicBlock* newTryLast);
void fgSetHndEnd(EHblkDsc* handlerTab, BasicBlock* newHndLast);
void fgSkipRmvdBlocks(EHblkDsc* handlerTab);
void fgAllocEHTable();
void fgRemoveEHTableEntry(unsigned XTnum);
#if defined(FEATURE_EH_FUNCLETS)
EHblkDsc* fgAddEHTableEntry(unsigned XTnum);
#endif // FEATURE_EH_FUNCLETS
#if !FEATURE_EH
void fgRemoveEH();
#endif // !FEATURE_EH
void fgSortEHTable();
// Causes the EH table to obey some well-formedness conditions, by inserting
// empty BB's when necessary:
// * No block is both the first block of a handler and the first block of a try.
// * No block is the first block of multiple 'try' regions.
// * No block is the last block of multiple EH regions.
void fgNormalizeEH();
bool fgNormalizeEHCase1();
bool fgNormalizeEHCase2();
bool fgNormalizeEHCase3();
void fgCheckForLoopsInHandlers();
#ifdef DEBUG
void dispIncomingEHClause(unsigned num, const CORINFO_EH_CLAUSE& clause);
void dispOutgoingEHClause(unsigned num, const CORINFO_EH_CLAUSE& clause);
void fgVerifyHandlerTab();
void fgDispHandlerTab();
#endif // DEBUG
bool fgNeedToSortEHTable;
void verInitEHTree(unsigned numEHClauses);
void verInsertEhNode(CORINFO_EH_CLAUSE* clause, EHblkDsc* handlerTab);
void verInsertEhNodeInTree(EHNodeDsc** ppRoot, EHNodeDsc* node);
void verInsertEhNodeParent(EHNodeDsc** ppRoot, EHNodeDsc* node);
void verCheckNestingLevel(EHNodeDsc* initRoot);
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX GenTree and BasicBlock XX
XX XX
XX Functions to allocate and display the GenTrees and BasicBlocks XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
// Functions to create nodes
Statement* gtNewStmt(GenTree* expr = nullptr);
Statement* gtNewStmt(GenTree* expr, const DebugInfo& di);
// For unary opers.
GenTree* gtNewOperNode(genTreeOps oper, var_types type, GenTree* op1, bool doSimplifications = TRUE);
// For binary opers.
GenTree* gtNewOperNode(genTreeOps oper, var_types type, GenTree* op1, GenTree* op2);
GenTreeColon* gtNewColonNode(var_types type, GenTree* elseNode, GenTree* thenNode);
GenTreeQmark* gtNewQmarkNode(var_types type, GenTree* cond, GenTreeColon* colon);
GenTree* gtNewLargeOperNode(genTreeOps oper,
var_types type = TYP_I_IMPL,
GenTree* op1 = nullptr,
GenTree* op2 = nullptr);
GenTreeIntCon* gtNewIconNode(ssize_t value, var_types type = TYP_INT);
GenTreeIntCon* gtNewIconNode(unsigned fieldOffset, FieldSeqNode* fieldSeq);
GenTreeIntCon* gtNewNull();
GenTreeIntCon* gtNewTrue();
GenTreeIntCon* gtNewFalse();
GenTree* gtNewPhysRegNode(regNumber reg, var_types type);
GenTree* gtNewJmpTableNode();
GenTree* gtNewIndOfIconHandleNode(var_types indType, size_t value, GenTreeFlags iconFlags, bool isInvariant);
GenTree* gtNewIconHandleNode(size_t value, GenTreeFlags flags, FieldSeqNode* fields = nullptr);
GenTreeFlags gtTokenToIconFlags(unsigned token);
GenTree* gtNewIconEmbHndNode(void* value, void* pValue, GenTreeFlags flags, void* compileTimeHandle);
GenTree* gtNewIconEmbScpHndNode(CORINFO_MODULE_HANDLE scpHnd);
GenTree* gtNewIconEmbClsHndNode(CORINFO_CLASS_HANDLE clsHnd);
GenTree* gtNewIconEmbMethHndNode(CORINFO_METHOD_HANDLE methHnd);
GenTree* gtNewIconEmbFldHndNode(CORINFO_FIELD_HANDLE fldHnd);
GenTree* gtNewStringLiteralNode(InfoAccessType iat, void* pValue);
GenTreeIntCon* gtNewStringLiteralLength(GenTreeStrCon* node);
GenTree* gtNewLconNode(__int64 value);
GenTree* gtNewDconNode(double value, var_types type = TYP_DOUBLE);
GenTree* gtNewSconNode(int CPX, CORINFO_MODULE_HANDLE scpHandle);
GenTree* gtNewZeroConNode(var_types type);
GenTree* gtNewOneConNode(var_types type);
GenTreeLclVar* gtNewStoreLclVar(unsigned dstLclNum, GenTree* src);
#ifdef FEATURE_SIMD
GenTree* gtNewSIMDVectorZero(var_types simdType, CorInfoType simdBaseJitType, unsigned simdSize);
#endif
GenTree* gtNewBlkOpNode(GenTree* dst, GenTree* srcOrFillVal, bool isVolatile, bool isCopyBlock);
GenTree* gtNewPutArgReg(var_types type, GenTree* arg, regNumber argReg);
GenTree* gtNewBitCastNode(var_types type, GenTree* arg);
protected:
void gtBlockOpInit(GenTree* result, GenTree* dst, GenTree* srcOrFillVal, bool isVolatile);
public:
GenTreeObj* gtNewObjNode(CORINFO_CLASS_HANDLE structHnd, GenTree* addr);
void gtSetObjGcInfo(GenTreeObj* objNode);
GenTree* gtNewStructVal(CORINFO_CLASS_HANDLE structHnd, GenTree* addr);
GenTree* gtNewBlockVal(GenTree* addr, unsigned size);
GenTree* gtNewCpObjNode(GenTree* dst, GenTree* src, CORINFO_CLASS_HANDLE structHnd, bool isVolatile);
GenTreeCall::Use* gtNewCallArgs(GenTree* node);
GenTreeCall::Use* gtNewCallArgs(GenTree* node1, GenTree* node2);
GenTreeCall::Use* gtNewCallArgs(GenTree* node1, GenTree* node2, GenTree* node3);
GenTreeCall::Use* gtNewCallArgs(GenTree* node1, GenTree* node2, GenTree* node3, GenTree* node4);
GenTreeCall::Use* gtPrependNewCallArg(GenTree* node, GenTreeCall::Use* args);
GenTreeCall::Use* gtInsertNewCallArgAfter(GenTree* node, GenTreeCall::Use* after);
GenTreeCall* gtNewCallNode(gtCallTypes callType,
CORINFO_METHOD_HANDLE handle,
var_types type,
GenTreeCall::Use* args,
const DebugInfo& di = DebugInfo());
GenTreeCall* gtNewIndCallNode(GenTree* addr,
var_types type,
GenTreeCall::Use* args,
const DebugInfo& di = DebugInfo());
GenTreeCall* gtNewHelperCallNode(unsigned helper, var_types type, GenTreeCall::Use* args = nullptr);
GenTreeCall* gtNewRuntimeLookupHelperCallNode(CORINFO_RUNTIME_LOOKUP* pRuntimeLookup,
GenTree* ctxTree,
void* compileTimeHandle);
GenTreeLclVar* gtNewLclvNode(unsigned lnum, var_types type DEBUGARG(IL_OFFSET offs = BAD_IL_OFFSET));
GenTreeLclVar* gtNewLclLNode(unsigned lnum, var_types type DEBUGARG(IL_OFFSET offs = BAD_IL_OFFSET));
GenTreeLclVar* gtNewLclVarAddrNode(unsigned lclNum, var_types type = TYP_I_IMPL);
GenTreeLclFld* gtNewLclFldAddrNode(unsigned lclNum,
unsigned lclOffs,
FieldSeqNode* fieldSeq,
var_types type = TYP_I_IMPL);
#ifdef FEATURE_SIMD
GenTreeSIMD* gtNewSIMDNode(
var_types type, GenTree* op1, SIMDIntrinsicID simdIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize);
GenTreeSIMD* gtNewSIMDNode(var_types type,
GenTree* op1,
GenTree* op2,
SIMDIntrinsicID simdIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize);
void SetOpLclRelatedToSIMDIntrinsic(GenTree* op);
#endif
#ifdef FEATURE_HW_INTRINSICS
GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic = false);
GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type,
GenTree* op1,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic = false);
GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type,
GenTree* op1,
GenTree* op2,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic = false);
GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type,
GenTree* op1,
GenTree* op2,
GenTree* op3,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic = false);
GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type,
GenTree* op1,
GenTree* op2,
GenTree* op3,
GenTree* op4,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic = false);
GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type,
GenTree** operands,
size_t operandCount,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic = false);
GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type,
IntrinsicNodeBuilder&& nodeBuilder,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic = false);
GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(var_types type,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize)
{
bool isSimdAsHWIntrinsic = true;
return gtNewSimdHWIntrinsicNode(type, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(
var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize)
{
bool isSimdAsHWIntrinsic = true;
return gtNewSimdHWIntrinsicNode(type, op1, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(var_types type,
GenTree* op1,
GenTree* op2,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize)
{
bool isSimdAsHWIntrinsic = true;
return gtNewSimdHWIntrinsicNode(type, op1, op2, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(var_types type,
GenTree* op1,
GenTree* op2,
GenTree* op3,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize)
{
bool isSimdAsHWIntrinsic = true;
return gtNewSimdHWIntrinsicNode(type, op1, op2, op3, hwIntrinsicID, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
GenTree* gtNewSimdAbsNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdBinOpNode(genTreeOps op,
var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdCeilNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdCmpOpNode(genTreeOps op,
var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdCmpOpAllNode(genTreeOps op,
var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdCmpOpAnyNode(genTreeOps op,
var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdCndSelNode(var_types type,
GenTree* op1,
GenTree* op2,
GenTree* op3,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdCreateBroadcastNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdDotProdNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdFloorNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdGetElementNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdMaxNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdMinNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdNarrowNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdSqrtNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdSumNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdUnOpNode(genTreeOps op,
var_types type,
GenTree* op1,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdWidenLowerNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdWidenUpperNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdWithElementNode(var_types type,
GenTree* op1,
GenTree* op2,
GenTree* op3,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdZeroNode(var_types type,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic);
GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode(var_types type, NamedIntrinsic hwIntrinsicID);
GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode(var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID);
GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode(var_types type,
GenTree* op1,
GenTree* op2,
NamedIntrinsic hwIntrinsicID);
GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode(
var_types type, GenTree* op1, GenTree* op2, GenTree* op3, NamedIntrinsic hwIntrinsicID);
CORINFO_CLASS_HANDLE gtGetStructHandleForHWSIMD(var_types simdType, CorInfoType simdBaseJitType);
CorInfoType getBaseJitTypeFromArgIfNeeded(NamedIntrinsic intrinsic,
CORINFO_CLASS_HANDLE clsHnd,
CORINFO_SIG_INFO* sig,
CorInfoType simdBaseJitType);
#endif // FEATURE_HW_INTRINSICS
GenTree* gtNewMustThrowException(unsigned helper, var_types type, CORINFO_CLASS_HANDLE clsHnd);
GenTreeLclFld* gtNewLclFldNode(unsigned lnum, var_types type, unsigned offset);
GenTree* gtNewInlineCandidateReturnExpr(GenTree* inlineCandidate, var_types type, BasicBlockFlags bbFlags);
GenTreeField* gtNewFieldRef(var_types type, CORINFO_FIELD_HANDLE fldHnd, GenTree* obj = nullptr, DWORD offset = 0);
GenTree* gtNewIndexRef(var_types typ, GenTree* arrayOp, GenTree* indexOp);
GenTreeArrLen* gtNewArrLen(var_types typ, GenTree* arrayOp, int lenOffset, BasicBlock* block);
GenTreeIndir* gtNewIndir(var_types typ, GenTree* addr);
GenTree* gtNewNullCheck(GenTree* addr, BasicBlock* basicBlock);
var_types gtTypeForNullCheck(GenTree* tree);
void gtChangeOperToNullCheck(GenTree* tree, BasicBlock* block);
static fgArgTabEntry* gtArgEntryByArgNum(GenTreeCall* call, unsigned argNum);
static fgArgTabEntry* gtArgEntryByNode(GenTreeCall* call, GenTree* node);
fgArgTabEntry* gtArgEntryByLateArgIndex(GenTreeCall* call, unsigned lateArgInx);
static GenTree* gtArgNodeByLateArgInx(GenTreeCall* call, unsigned lateArgInx);
GenTreeOp* gtNewAssignNode(GenTree* dst, GenTree* src);
GenTree* gtNewTempAssign(unsigned tmp,
GenTree* val,
Statement** pAfterStmt = nullptr,
const DebugInfo& di = DebugInfo(),
BasicBlock* block = nullptr);
GenTree* gtNewRefCOMfield(GenTree* objPtr,
CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_ACCESS_FLAGS access,
CORINFO_FIELD_INFO* pFieldInfo,
var_types lclTyp,
CORINFO_CLASS_HANDLE structType,
GenTree* assg);
GenTree* gtNewNothingNode();
GenTree* gtNewArgPlaceHolderNode(var_types type, CORINFO_CLASS_HANDLE clsHnd);
GenTree* gtUnusedValNode(GenTree* expr);
GenTree* gtNewKeepAliveNode(GenTree* op);
GenTreeCast* gtNewCastNode(var_types typ, GenTree* op1, bool fromUnsigned, var_types castType);
GenTreeCast* gtNewCastNodeL(var_types typ, GenTree* op1, bool fromUnsigned, var_types castType);
GenTreeAllocObj* gtNewAllocObjNode(
unsigned int helper, bool helperHasSideEffects, CORINFO_CLASS_HANDLE clsHnd, var_types type, GenTree* op1);
GenTreeAllocObj* gtNewAllocObjNode(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool useParent);
GenTree* gtNewRuntimeLookup(CORINFO_GENERIC_HANDLE hnd, CorInfoGenericHandleType hndTyp, GenTree* lookupTree);
GenTreeIndir* gtNewMethodTableLookup(GenTree* obj);
//------------------------------------------------------------------------
// Other GenTree functions
GenTree* gtClone(GenTree* tree, bool complexOK = false);
// If `tree` is a lclVar with lclNum `varNum`, return an IntCns with value `varVal`; otherwise,
// create a copy of `tree`, adding specified flags, replacing uses of lclVar `deepVarNum` with
// IntCnses with value `deepVarVal`.
GenTree* gtCloneExpr(
GenTree* tree, GenTreeFlags addFlags, unsigned varNum, int varVal, unsigned deepVarNum, int deepVarVal);
// Create a copy of `tree`, optionally adding specifed flags, and optionally mapping uses of local
// `varNum` to int constants with value `varVal`.
GenTree* gtCloneExpr(GenTree* tree,
GenTreeFlags addFlags = GTF_EMPTY,
unsigned varNum = BAD_VAR_NUM,
int varVal = 0)
{
return gtCloneExpr(tree, addFlags, varNum, varVal, varNum, varVal);
}
Statement* gtCloneStmt(Statement* stmt)
{
GenTree* exprClone = gtCloneExpr(stmt->GetRootNode());
return gtNewStmt(exprClone, stmt->GetDebugInfo());
}
// Internal helper for cloning a call
GenTreeCall* gtCloneExprCallHelper(GenTreeCall* call,
GenTreeFlags addFlags = GTF_EMPTY,
unsigned deepVarNum = BAD_VAR_NUM,
int deepVarVal = 0);
// Create copy of an inline or guarded devirtualization candidate tree.
GenTreeCall* gtCloneCandidateCall(GenTreeCall* call);
void gtUpdateSideEffects(Statement* stmt, GenTree* tree);
void gtUpdateTreeAncestorsSideEffects(GenTree* tree);
void gtUpdateStmtSideEffects(Statement* stmt);
void gtUpdateNodeSideEffects(GenTree* tree);
void gtUpdateNodeOperSideEffects(GenTree* tree);
void gtUpdateNodeOperSideEffectsPost(GenTree* tree);
// Returns "true" iff the complexity (not formally defined, but first interpretation
// is #of nodes in subtree) of "tree" is greater than "limit".
// (This is somewhat redundant with the "GetCostEx()/GetCostSz()" fields, but can be used
// before they have been set.)
bool gtComplexityExceeds(GenTree** tree, unsigned limit);
GenTree* gtReverseCond(GenTree* tree);
static bool gtHasRef(GenTree* tree, ssize_t lclNum);
bool gtHasLocalsWithAddrOp(GenTree* tree);
unsigned gtSetCallArgsOrder(const GenTreeCall::UseList& args, bool lateArgs, int* callCostEx, int* callCostSz);
unsigned gtSetMultiOpOrder(GenTreeMultiOp* multiOp);
void gtWalkOp(GenTree** op1, GenTree** op2, GenTree* base, bool constOnly);
#ifdef DEBUG
unsigned gtHashValue(GenTree* tree);
GenTree* gtWalkOpEffectiveVal(GenTree* op);
#endif
void gtPrepareCost(GenTree* tree);
bool gtIsLikelyRegVar(GenTree* tree);
// Returns true iff the secondNode can be swapped with firstNode.
bool gtCanSwapOrder(GenTree* firstNode, GenTree* secondNode);
// Given an address expression, compute its costs and addressing mode opportunities,
// and mark addressing mode candidates as GTF_DONT_CSE.
// TODO-Throughput - Consider actually instantiating these early, to avoid
// having to re-run the algorithm that looks for them (might also improve CQ).
bool gtMarkAddrMode(GenTree* addr, int* costEx, int* costSz, var_types type);
unsigned gtSetEvalOrder(GenTree* tree);
void gtSetStmtInfo(Statement* stmt);
// Returns "true" iff "node" has any of the side effects in "flags".
bool gtNodeHasSideEffects(GenTree* node, GenTreeFlags flags);
// Returns "true" iff "tree" or its (transitive) children have any of the side effects in "flags".
bool gtTreeHasSideEffects(GenTree* tree, GenTreeFlags flags);
// Appends 'expr' in front of 'list'
// 'list' will typically start off as 'nullptr'
// when 'list' is non-null a GT_COMMA node is used to insert 'expr'
GenTree* gtBuildCommaList(GenTree* list, GenTree* expr);
void gtExtractSideEffList(GenTree* expr,
GenTree** pList,
GenTreeFlags GenTreeFlags = GTF_SIDE_EFFECT,
bool ignoreRoot = false);
GenTree* gtGetThisArg(GenTreeCall* call);
// Static fields of struct types (and sometimes the types that those are reduced to) are represented by having the
// static field contain an object pointer to the boxed struct. This simplifies the GC implementation...but
// complicates the JIT somewhat. This predicate returns "true" iff a node with type "fieldNodeType", representing
// the given "fldHnd", is such an object pointer.
bool gtIsStaticFieldPtrToBoxedStruct(var_types fieldNodeType, CORINFO_FIELD_HANDLE fldHnd);
// Return true if call is a recursive call; return false otherwise.
// Note when inlining, this looks for calls back to the root method.
bool gtIsRecursiveCall(GenTreeCall* call)
{
return gtIsRecursiveCall(call->gtCallMethHnd);
}
bool gtIsRecursiveCall(CORINFO_METHOD_HANDLE callMethodHandle)
{
return (callMethodHandle == impInlineRoot()->info.compMethodHnd);
}
//-------------------------------------------------------------------------
GenTree* gtFoldExpr(GenTree* tree);
GenTree* gtFoldExprConst(GenTree* tree);
GenTree* gtFoldExprSpecial(GenTree* tree);
GenTree* gtFoldBoxNullable(GenTree* tree);
GenTree* gtFoldExprCompare(GenTree* tree);
GenTree* gtCreateHandleCompare(genTreeOps oper,
GenTree* op1,
GenTree* op2,
CorInfoInlineTypeCheck typeCheckInliningResult);
GenTree* gtFoldExprCall(GenTreeCall* call);
GenTree* gtFoldTypeCompare(GenTree* tree);
GenTree* gtFoldTypeEqualityCall(bool isEq, GenTree* op1, GenTree* op2);
// Options to control behavior of gtTryRemoveBoxUpstreamEffects
enum BoxRemovalOptions
{
BR_REMOVE_AND_NARROW, // remove effects, minimize remaining work, return possibly narrowed source tree
BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE, // remove effects and minimize remaining work, return type handle tree
BR_REMOVE_BUT_NOT_NARROW, // remove effects, return original source tree
BR_DONT_REMOVE, // check if removal is possible, return copy source tree
BR_DONT_REMOVE_WANT_TYPE_HANDLE, // check if removal is possible, return type handle tree
BR_MAKE_LOCAL_COPY // revise box to copy to temp local and return local's address
};
GenTree* gtTryRemoveBoxUpstreamEffects(GenTree* tree, BoxRemovalOptions options = BR_REMOVE_AND_NARROW);
GenTree* gtOptimizeEnumHasFlag(GenTree* thisOp, GenTree* flagOp);
//-------------------------------------------------------------------------
// Get the handle, if any.
CORINFO_CLASS_HANDLE gtGetStructHandleIfPresent(GenTree* tree);
// Get the handle, and assert if not found.
CORINFO_CLASS_HANDLE gtGetStructHandle(GenTree* tree);
// Get the handle for a ref type.
CORINFO_CLASS_HANDLE gtGetClassHandle(GenTree* tree, bool* pIsExact, bool* pIsNonNull);
// Get the class handle for an helper call
CORINFO_CLASS_HANDLE gtGetHelperCallClassHandle(GenTreeCall* call, bool* pIsExact, bool* pIsNonNull);
// Get the element handle for an array of ref type.
CORINFO_CLASS_HANDLE gtGetArrayElementClassHandle(GenTree* array);
// Get a class handle from a helper call argument
CORINFO_CLASS_HANDLE gtGetHelperArgClassHandle(GenTree* array);
// Get the class handle for a field
CORINFO_CLASS_HANDLE gtGetFieldClassHandle(CORINFO_FIELD_HANDLE fieldHnd, bool* pIsExact, bool* pIsNonNull);
// Check if this tree is a gc static base helper call
bool gtIsStaticGCBaseHelperCall(GenTree* tree);
//-------------------------------------------------------------------------
// Functions to display the trees
#ifdef DEBUG
void gtDispNode(GenTree* tree, IndentStack* indentStack, _In_z_ const char* msg, bool isLIR);
void gtDispConst(GenTree* tree);
void gtDispLeaf(GenTree* tree, IndentStack* indentStack);
void gtDispNodeName(GenTree* tree);
#if FEATURE_MULTIREG_RET
unsigned gtDispMultiRegCount(GenTree* tree);
#endif
void gtDispRegVal(GenTree* tree);
void gtDispZeroFieldSeq(GenTree* tree);
void gtDispVN(GenTree* tree);
void gtDispCommonEndLine(GenTree* tree);
enum IndentInfo
{
IINone,
IIArc,
IIArcTop,
IIArcBottom,
IIEmbedded,
IIError,
IndentInfoCount
};
void gtDispChild(GenTree* child,
IndentStack* indentStack,
IndentInfo arcType,
_In_opt_ const char* msg = nullptr,
bool topOnly = false);
void gtDispTree(GenTree* tree,
IndentStack* indentStack = nullptr,
_In_opt_ const char* msg = nullptr,
bool topOnly = false,
bool isLIR = false);
void gtGetLclVarNameInfo(unsigned lclNum, const char** ilKindOut, const char** ilNameOut, unsigned* ilNumOut);
int gtGetLclVarName(unsigned lclNum, char* buf, unsigned buf_remaining);
char* gtGetLclVarName(unsigned lclNum);
void gtDispLclVar(unsigned lclNum, bool padForBiggestDisp = true);
void gtDispLclVarStructType(unsigned lclNum);
void gtDispClassLayout(ClassLayout* layout, var_types type);
void gtDispILLocation(const ILLocation& loc);
void gtDispStmt(Statement* stmt, const char* msg = nullptr);
void gtDispBlockStmts(BasicBlock* block);
void gtGetArgMsg(GenTreeCall* call, GenTree* arg, unsigned argNum, char* bufp, unsigned bufLength);
void gtGetLateArgMsg(GenTreeCall* call, GenTree* arg, int argNum, char* bufp, unsigned bufLength);
void gtDispArgList(GenTreeCall* call, GenTree* lastCallOperand, IndentStack* indentStack);
void gtDispAnyFieldSeq(FieldSeqNode* fieldSeq);
void gtDispFieldSeq(FieldSeqNode* pfsn);
void gtDispRange(LIR::ReadOnlyRange const& range);
void gtDispTreeRange(LIR::Range& containingRange, GenTree* tree);
void gtDispLIRNode(GenTree* node, const char* prefixMsg = nullptr);
#endif
// For tree walks
enum fgWalkResult
{
WALK_CONTINUE,
WALK_SKIP_SUBTREES,
WALK_ABORT
};
struct fgWalkData;
typedef fgWalkResult(fgWalkPreFn)(GenTree** pTree, fgWalkData* data);
typedef fgWalkResult(fgWalkPostFn)(GenTree** pTree, fgWalkData* data);
static fgWalkPreFn gtMarkColonCond;
static fgWalkPreFn gtClearColonCond;
struct FindLinkData
{
GenTree* nodeToFind;
GenTree** result;
GenTree* parent;
};
FindLinkData gtFindLink(Statement* stmt, GenTree* node);
bool gtHasCatchArg(GenTree* tree);
typedef ArrayStack<GenTree*> GenTreeStack;
static bool gtHasCallOnStack(GenTreeStack* parentStack);
//=========================================================================
// BasicBlock functions
#ifdef DEBUG
// This is a debug flag we will use to assert when creating block during codegen
// as this interferes with procedure splitting. If you know what you're doing, set
// it to true before creating the block. (DEBUG only)
bool fgSafeBasicBlockCreation;
#endif
BasicBlock* bbNewBasicBlock(BBjumpKinds jumpKind);
void placeLoopAlignInstructions();
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX LclVarsInfo XX
XX XX
XX The variables to be used by the code generator. XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
//
// For both PROMOTION_TYPE_NONE and PROMOTION_TYPE_DEPENDENT the struct will
// be placed in the stack frame and it's fields must be laid out sequentially.
//
// For PROMOTION_TYPE_INDEPENDENT each of the struct's fields is replaced by
// a local variable that can be enregistered or placed in the stack frame.
// The fields do not need to be laid out sequentially
//
enum lvaPromotionType
{
PROMOTION_TYPE_NONE, // The struct local is not promoted
PROMOTION_TYPE_INDEPENDENT, // The struct local is promoted,
// and its field locals are independent of its parent struct local.
PROMOTION_TYPE_DEPENDENT // The struct local is promoted,
// but its field locals depend on its parent struct local.
};
/*****************************************************************************/
enum FrameLayoutState
{
NO_FRAME_LAYOUT,
INITIAL_FRAME_LAYOUT,
PRE_REGALLOC_FRAME_LAYOUT,
REGALLOC_FRAME_LAYOUT,
TENTATIVE_FRAME_LAYOUT,
FINAL_FRAME_LAYOUT
};
public:
RefCountState lvaRefCountState; // Current local ref count state
bool lvaLocalVarRefCounted() const
{
return lvaRefCountState == RCS_NORMAL;
}
bool lvaTrackedFixed; // true: We cannot add new 'tracked' variable
unsigned lvaCount; // total number of locals, which includes function arguments,
// special arguments, IL local variables, and JIT temporary variables
LclVarDsc* lvaTable; // variable descriptor table
unsigned lvaTableCnt; // lvaTable size (>= lvaCount)
unsigned lvaTrackedCount; // actual # of locals being tracked
unsigned lvaTrackedCountInSizeTUnits; // min # of size_t's sufficient to hold a bit for all the locals being tracked
#ifdef DEBUG
VARSET_TP lvaTrackedVars; // set of tracked variables
#endif
#ifndef TARGET_64BIT
VARSET_TP lvaLongVars; // set of long (64-bit) variables
#endif
VARSET_TP lvaFloatVars; // set of floating-point (32-bit and 64-bit) variables
unsigned lvaCurEpoch; // VarSets are relative to a specific set of tracked var indices.
// It that changes, this changes. VarSets from different epochs
// cannot be meaningfully combined.
unsigned GetCurLVEpoch()
{
return lvaCurEpoch;
}
// reverse map of tracked number to var number
unsigned lvaTrackedToVarNumSize;
unsigned* lvaTrackedToVarNum;
#if DOUBLE_ALIGN
#ifdef DEBUG
// # of procs compiled a with double-aligned stack
static unsigned s_lvaDoubleAlignedProcsCount;
#endif
#endif
// Getters and setters for address-exposed and do-not-enregister local var properties.
bool lvaVarAddrExposed(unsigned varNum) const;
void lvaSetVarAddrExposed(unsigned varNum DEBUGARG(AddressExposedReason reason));
void lvaSetHiddenBufferStructArg(unsigned varNum);
void lvaSetVarLiveInOutOfHandler(unsigned varNum);
bool lvaVarDoNotEnregister(unsigned varNum);
void lvSetMinOptsDoNotEnreg();
bool lvaEnregEHVars;
bool lvaEnregMultiRegVars;
void lvaSetVarDoNotEnregister(unsigned varNum DEBUGARG(DoNotEnregisterReason reason));
unsigned lvaVarargsHandleArg;
#ifdef TARGET_X86
unsigned lvaVarargsBaseOfStkArgs; // Pointer (computed based on incoming varargs handle) to the start of the stack
// arguments
#endif // TARGET_X86
unsigned lvaInlinedPInvokeFrameVar; // variable representing the InlinedCallFrame
unsigned lvaReversePInvokeFrameVar; // variable representing the reverse PInvoke frame
#if FEATURE_FIXED_OUT_ARGS
unsigned lvaPInvokeFrameRegSaveVar; // variable representing the RegSave for PInvoke inlining.
#endif
unsigned lvaMonAcquired; // boolean variable introduced into in synchronized methods
// that tracks whether the lock has been taken
unsigned lvaArg0Var; // The lclNum of arg0. Normally this will be info.compThisArg.
// However, if there is a "ldarga 0" or "starg 0" in the IL,
// we will redirect all "ldarg(a) 0" and "starg 0" to this temp.
unsigned lvaInlineeReturnSpillTemp; // The temp to spill the non-VOID return expression
// in case there are multiple BBJ_RETURN blocks in the inlinee
// or if the inlinee has GC ref locals.
#if FEATURE_FIXED_OUT_ARGS
unsigned lvaOutgoingArgSpaceVar; // dummy TYP_LCLBLK var for fixed outgoing argument space
PhasedVar<unsigned> lvaOutgoingArgSpaceSize; // size of fixed outgoing argument space
#endif // FEATURE_FIXED_OUT_ARGS
static unsigned GetOutgoingArgByteSize(unsigned sizeWithoutPadding)
{
return roundUp(sizeWithoutPadding, TARGET_POINTER_SIZE);
}
// Variable representing the return address. The helper-based tailcall
// mechanism passes the address of the return address to a runtime helper
// where it is used to detect tail-call chains.
unsigned lvaRetAddrVar;
#if defined(DEBUG) && defined(TARGET_XARCH)
unsigned lvaReturnSpCheck; // Stores SP to confirm it is not corrupted on return.
#endif // defined(DEBUG) && defined(TARGET_XARCH)
#if defined(DEBUG) && defined(TARGET_X86)
unsigned lvaCallSpCheck; // Stores SP to confirm it is not corrupted after every call.
#endif // defined(DEBUG) && defined(TARGET_X86)
bool lvaGenericsContextInUse;
bool lvaKeepAliveAndReportThis(); // Synchronized instance method of a reference type, or
// CORINFO_GENERICS_CTXT_FROM_THIS?
bool lvaReportParamTypeArg(); // Exceptions and CORINFO_GENERICS_CTXT_FROM_PARAMTYPEARG?
//-------------------------------------------------------------------------
// All these frame offsets are inter-related and must be kept in sync
#if !defined(FEATURE_EH_FUNCLETS)
// This is used for the callable handlers
unsigned lvaShadowSPslotsVar; // TYP_BLK variable for all the shadow SP slots
#endif // FEATURE_EH_FUNCLETS
int lvaCachedGenericContextArgOffs;
int lvaCachedGenericContextArgOffset(); // For CORINFO_CALLCONV_PARAMTYPE and if generic context is passed as
// THIS pointer
#ifdef JIT32_GCENCODER
unsigned lvaLocAllocSPvar; // variable which stores the value of ESP after the the last alloca/localloc
#endif // JIT32_GCENCODER
unsigned lvaNewObjArrayArgs; // variable with arguments for new MD array helper
// TODO-Review: Prior to reg predict we reserve 24 bytes for Spill temps.
// after the reg predict we will use a computed maxTmpSize
// which is based upon the number of spill temps predicted by reg predict
// All this is necessary because if we under-estimate the size of the spill
// temps we could fail when encoding instructions that reference stack offsets for ARM.
//
// Pre codegen max spill temp size.
static const unsigned MAX_SPILL_TEMP_SIZE = 24;
//-------------------------------------------------------------------------
unsigned lvaGetMaxSpillTempSize();
#ifdef TARGET_ARM
bool lvaIsPreSpilled(unsigned lclNum, regMaskTP preSpillMask);
#endif // TARGET_ARM
void lvaAssignFrameOffsets(FrameLayoutState curState);
void lvaFixVirtualFrameOffsets();
void lvaUpdateArgWithInitialReg(LclVarDsc* varDsc);
void lvaUpdateArgsWithInitialReg();
void lvaAssignVirtualFrameOffsetsToArgs();
#ifdef UNIX_AMD64_ABI
int lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize, int argOffs, int* callerArgOffset);
#else // !UNIX_AMD64_ABI
int lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize, int argOffs);
#endif // !UNIX_AMD64_ABI
void lvaAssignVirtualFrameOffsetsToLocals();
int lvaAllocLocalAndSetVirtualOffset(unsigned lclNum, unsigned size, int stkOffs);
#ifdef TARGET_AMD64
// Returns true if compCalleeRegsPushed (including RBP if used as frame pointer) is even.
bool lvaIsCalleeSavedIntRegCountEven();
#endif
void lvaAlignFrame();
void lvaAssignFrameOffsetsToPromotedStructs();
int lvaAllocateTemps(int stkOffs, bool mustDoubleAlign);
#ifdef DEBUG
void lvaDumpRegLocation(unsigned lclNum);
void lvaDumpFrameLocation(unsigned lclNum);
void lvaDumpEntry(unsigned lclNum, FrameLayoutState curState, size_t refCntWtdWidth = 6);
void lvaTableDump(FrameLayoutState curState = NO_FRAME_LAYOUT); // NO_FRAME_LAYOUT means use the current frame
// layout state defined by lvaDoneFrameLayout
#endif
// Limit frames size to 1GB. The maximum is 2GB in theory - make it intentionally smaller
// to avoid bugs from borderline cases.
#define MAX_FrameSize 0x3FFFFFFF
void lvaIncrementFrameSize(unsigned size);
unsigned lvaFrameSize(FrameLayoutState curState);
// Returns the caller-SP-relative offset for the SP/FP relative offset determined by FP based.
int lvaToCallerSPRelativeOffset(int offs, bool isFpBased, bool forRootFrame = true) const;
// Returns the caller-SP-relative offset for the local variable "varNum."
int lvaGetCallerSPRelativeOffset(unsigned varNum);
// Returns the SP-relative offset for the local variable "varNum". Illegal to ask this for functions with localloc.
int lvaGetSPRelativeOffset(unsigned varNum);
int lvaToInitialSPRelativeOffset(unsigned offset, bool isFpBased);
int lvaGetInitialSPRelativeOffset(unsigned varNum);
// True if this is an OSR compilation and this local is potentially
// located on the original method stack frame.
bool lvaIsOSRLocal(unsigned varNum);
//------------------------ For splitting types ----------------------------
void lvaInitTypeRef();
void lvaInitArgs(InitVarDscInfo* varDscInfo);
void lvaInitThisPtr(InitVarDscInfo* varDscInfo);
void lvaInitRetBuffArg(InitVarDscInfo* varDscInfo, bool useFixedRetBufReg);
void lvaInitUserArgs(InitVarDscInfo* varDscInfo, unsigned skipArgs, unsigned takeArgs);
void lvaInitGenericsCtxt(InitVarDscInfo* varDscInfo);
void lvaInitVarArgsHandle(InitVarDscInfo* varDscInfo);
void lvaInitVarDsc(LclVarDsc* varDsc,
unsigned varNum,
CorInfoType corInfoType,
CORINFO_CLASS_HANDLE typeHnd,
CORINFO_ARG_LIST_HANDLE varList,
CORINFO_SIG_INFO* varSig);
static unsigned lvaTypeRefMask(var_types type);
var_types lvaGetActualType(unsigned lclNum);
var_types lvaGetRealType(unsigned lclNum);
//-------------------------------------------------------------------------
void lvaInit();
LclVarDsc* lvaGetDesc(unsigned lclNum)
{
assert(lclNum < lvaCount);
return &lvaTable[lclNum];
}
LclVarDsc* lvaGetDesc(unsigned lclNum) const
{
assert(lclNum < lvaCount);
return &lvaTable[lclNum];
}
LclVarDsc* lvaGetDesc(const GenTreeLclVarCommon* lclVar)
{
return lvaGetDesc(lclVar->GetLclNum());
}
unsigned lvaTrackedIndexToLclNum(unsigned trackedIndex)
{
assert(trackedIndex < lvaTrackedCount);
unsigned lclNum = lvaTrackedToVarNum[trackedIndex];
assert(lclNum < lvaCount);
return lclNum;
}
LclVarDsc* lvaGetDescByTrackedIndex(unsigned trackedIndex)
{
return lvaGetDesc(lvaTrackedIndexToLclNum(trackedIndex));
}
unsigned lvaGetLclNum(const LclVarDsc* varDsc)
{
assert((lvaTable <= varDsc) && (varDsc < lvaTable + lvaCount)); // varDsc must point within the table
assert(((char*)varDsc - (char*)lvaTable) % sizeof(LclVarDsc) ==
0); // varDsc better not point in the middle of a variable
unsigned varNum = (unsigned)(varDsc - lvaTable);
assert(varDsc == &lvaTable[varNum]);
return varNum;
}
unsigned lvaLclSize(unsigned varNum);
unsigned lvaLclExactSize(unsigned varNum);
bool lvaHaveManyLocals() const;
unsigned lvaGrabTemp(bool shortLifetime DEBUGARG(const char* reason));
unsigned lvaGrabTemps(unsigned cnt DEBUGARG(const char* reason));
unsigned lvaGrabTempWithImplicitUse(bool shortLifetime DEBUGARG(const char* reason));
void lvaSortByRefCount();
void lvaMarkLocalVars(); // Local variable ref-counting
void lvaComputeRefCounts(bool isRecompute, bool setSlotNumbers);
void lvaMarkLocalVars(BasicBlock* block, bool isRecompute);
void lvaAllocOutgoingArgSpaceVar(); // Set up lvaOutgoingArgSpaceVar
VARSET_VALRET_TP lvaStmtLclMask(Statement* stmt);
#ifdef DEBUG
struct lvaStressLclFldArgs
{
Compiler* m_pCompiler;
bool m_bFirstPass;
};
static fgWalkPreFn lvaStressLclFldCB;
void lvaStressLclFld();
void lvaDispVarSet(VARSET_VALARG_TP set, VARSET_VALARG_TP allVars);
void lvaDispVarSet(VARSET_VALARG_TP set);
#endif
#ifdef TARGET_ARM
int lvaFrameAddress(int varNum, bool mustBeFPBased, regNumber* pBaseReg, int addrModeOffset, bool isFloatUsage);
#else
int lvaFrameAddress(int varNum, bool* pFPbased);
#endif
bool lvaIsParameter(unsigned varNum);
bool lvaIsRegArgument(unsigned varNum);
bool lvaIsOriginalThisArg(unsigned varNum); // Is this varNum the original this argument?
bool lvaIsOriginalThisReadOnly(); // return true if there is no place in the code
// that writes to arg0
// For x64 this is 3, 5, 6, 7, >8 byte structs that are passed by reference.
// For ARM64, this is structs larger than 16 bytes that are passed by reference.
bool lvaIsImplicitByRefLocal(unsigned varNum)
{
#if defined(TARGET_AMD64) || defined(TARGET_ARM64)
LclVarDsc* varDsc = lvaGetDesc(varNum);
if (varDsc->lvIsImplicitByRef)
{
assert(varDsc->lvIsParam);
assert(varTypeIsStruct(varDsc) || (varDsc->lvType == TYP_BYREF));
return true;
}
#endif // defined(TARGET_AMD64) || defined(TARGET_ARM64)
return false;
}
// Returns true if this local var is a multireg struct
bool lvaIsMultiregStruct(LclVarDsc* varDsc, bool isVararg);
// If the local is a TYP_STRUCT, get/set a class handle describing it
CORINFO_CLASS_HANDLE lvaGetStruct(unsigned varNum);
void lvaSetStruct(unsigned varNum, CORINFO_CLASS_HANDLE typeHnd, bool unsafeValueClsCheck, bool setTypeInfo = true);
void lvaSetStructUsedAsVarArg(unsigned varNum);
// If the local is TYP_REF, set or update the associated class information.
void lvaSetClass(unsigned varNum, CORINFO_CLASS_HANDLE clsHnd, bool isExact = false);
void lvaSetClass(unsigned varNum, GenTree* tree, CORINFO_CLASS_HANDLE stackHandle = nullptr);
void lvaUpdateClass(unsigned varNum, CORINFO_CLASS_HANDLE clsHnd, bool isExact = false);
void lvaUpdateClass(unsigned varNum, GenTree* tree, CORINFO_CLASS_HANDLE stackHandle = nullptr);
#define MAX_NumOfFieldsInPromotableStruct 4 // Maximum number of fields in promotable struct
// Info about struct type fields.
struct lvaStructFieldInfo
{
CORINFO_FIELD_HANDLE fldHnd;
unsigned char fldOffset;
unsigned char fldOrdinal;
var_types fldType;
unsigned fldSize;
CORINFO_CLASS_HANDLE fldTypeHnd;
lvaStructFieldInfo()
: fldHnd(nullptr), fldOffset(0), fldOrdinal(0), fldType(TYP_UNDEF), fldSize(0), fldTypeHnd(nullptr)
{
}
};
// Info about a struct type, instances of which may be candidates for promotion.
struct lvaStructPromotionInfo
{
CORINFO_CLASS_HANDLE typeHnd;
bool canPromote;
bool containsHoles;
bool customLayout;
bool fieldsSorted;
unsigned char fieldCnt;
lvaStructFieldInfo fields[MAX_NumOfFieldsInPromotableStruct];
lvaStructPromotionInfo(CORINFO_CLASS_HANDLE typeHnd = nullptr)
: typeHnd(typeHnd)
, canPromote(false)
, containsHoles(false)
, customLayout(false)
, fieldsSorted(false)
, fieldCnt(0)
{
}
};
struct lvaFieldOffsetCmp
{
bool operator()(const lvaStructFieldInfo& field1, const lvaStructFieldInfo& field2);
};
// This class is responsible for checking validity and profitability of struct promotion.
// If it is both legal and profitable, then TryPromoteStructVar promotes the struct and initializes
// nessesary information for fgMorphStructField to use.
class StructPromotionHelper
{
public:
StructPromotionHelper(Compiler* compiler);
bool CanPromoteStructType(CORINFO_CLASS_HANDLE typeHnd);
bool TryPromoteStructVar(unsigned lclNum);
void Clear()
{
structPromotionInfo.typeHnd = NO_CLASS_HANDLE;
}
#ifdef DEBUG
void CheckRetypedAsScalar(CORINFO_FIELD_HANDLE fieldHnd, var_types requestedType);
#endif // DEBUG
private:
bool CanPromoteStructVar(unsigned lclNum);
bool ShouldPromoteStructVar(unsigned lclNum);
void PromoteStructVar(unsigned lclNum);
void SortStructFields();
lvaStructFieldInfo GetFieldInfo(CORINFO_FIELD_HANDLE fieldHnd, BYTE ordinal);
bool TryPromoteStructField(lvaStructFieldInfo& outerFieldInfo);
private:
Compiler* compiler;
lvaStructPromotionInfo structPromotionInfo;
#ifdef DEBUG
typedef JitHashTable<CORINFO_FIELD_HANDLE, JitPtrKeyFuncs<CORINFO_FIELD_STRUCT_>, var_types>
RetypedAsScalarFieldsMap;
RetypedAsScalarFieldsMap retypedFieldsMap;
#endif // DEBUG
};
StructPromotionHelper* structPromotionHelper;
unsigned lvaGetFieldLocal(const LclVarDsc* varDsc, unsigned int fldOffset);
lvaPromotionType lvaGetPromotionType(const LclVarDsc* varDsc);
lvaPromotionType lvaGetPromotionType(unsigned varNum);
lvaPromotionType lvaGetParentPromotionType(const LclVarDsc* varDsc);
lvaPromotionType lvaGetParentPromotionType(unsigned varNum);
bool lvaIsFieldOfDependentlyPromotedStruct(const LclVarDsc* varDsc);
bool lvaIsGCTracked(const LclVarDsc* varDsc);
#if defined(FEATURE_SIMD)
bool lvaMapSimd12ToSimd16(const LclVarDsc* varDsc)
{
assert(varDsc->lvType == TYP_SIMD12);
assert(varDsc->lvExactSize == 12);
#if defined(TARGET_64BIT)
assert(compMacOsArm64Abi() || varDsc->lvSize() == 16);
#endif // defined(TARGET_64BIT)
// We make local variable SIMD12 types 16 bytes instead of just 12.
// lvSize() will return 16 bytes for SIMD12, even for fields.
// However, we can't do that mapping if the var is a dependently promoted struct field.
// Such a field must remain its exact size within its parent struct unless it is a single
// field *and* it is the only field in a struct of 16 bytes.
if (varDsc->lvSize() != 16)
{
return false;
}
if (lvaIsFieldOfDependentlyPromotedStruct(varDsc))
{
LclVarDsc* parentVarDsc = lvaGetDesc(varDsc->lvParentLcl);
return (parentVarDsc->lvFieldCnt == 1) && (parentVarDsc->lvSize() == 16);
}
return true;
}
#endif // defined(FEATURE_SIMD)
unsigned lvaGSSecurityCookie; // LclVar number
bool lvaTempsHaveLargerOffsetThanVars();
// Returns "true" iff local variable "lclNum" is in SSA form.
bool lvaInSsa(unsigned lclNum)
{
assert(lclNum < lvaCount);
return lvaTable[lclNum].lvInSsa;
}
unsigned lvaStubArgumentVar; // variable representing the secret stub argument coming in EAX
#if defined(FEATURE_EH_FUNCLETS)
unsigned lvaPSPSym; // variable representing the PSPSym
#endif
InlineInfo* impInlineInfo; // Only present for inlinees
InlineStrategy* m_inlineStrategy;
InlineContext* compInlineContext; // Always present
// The Compiler* that is the root of the inlining tree of which "this" is a member.
Compiler* impInlineRoot();
#if defined(DEBUG) || defined(INLINE_DATA)
unsigned __int64 getInlineCycleCount()
{
return m_compCycles;
}
#endif // defined(DEBUG) || defined(INLINE_DATA)
bool fgNoStructPromotion; // Set to TRUE to turn off struct promotion for this method.
bool fgNoStructParamPromotion; // Set to TRUE to turn off struct promotion for parameters this method.
//=========================================================================
// PROTECTED
//=========================================================================
protected:
//---------------- Local variable ref-counting ----------------------------
void lvaMarkLclRefs(GenTree* tree, BasicBlock* block, Statement* stmt, bool isRecompute);
bool IsDominatedByExceptionalEntry(BasicBlock* block);
void SetVolatileHint(LclVarDsc* varDsc);
// Keeps the mapping from SSA #'s to VN's for the implicit memory variables.
SsaDefArray<SsaMemDef> lvMemoryPerSsaData;
public:
// Returns the address of the per-Ssa data for memory at the given ssaNum (which is required
// not to be the SsaConfig::RESERVED_SSA_NUM, which indicates that the variable is
// not an SSA variable).
SsaMemDef* GetMemoryPerSsaData(unsigned ssaNum)
{
return lvMemoryPerSsaData.GetSsaDef(ssaNum);
}
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Importer XX
XX XX
XX Imports the given method and converts it to semantic trees XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
private:
// For prefixFlags
enum
{
PREFIX_TAILCALL_EXPLICIT = 0x00000001, // call has "tail" IL prefix
PREFIX_TAILCALL_IMPLICIT =
0x00000010, // call is treated as having "tail" prefix even though there is no "tail" IL prefix
PREFIX_TAILCALL_STRESS =
0x00000100, // call doesn't "tail" IL prefix but is treated as explicit because of tail call stress
PREFIX_TAILCALL = (PREFIX_TAILCALL_EXPLICIT | PREFIX_TAILCALL_IMPLICIT | PREFIX_TAILCALL_STRESS),
PREFIX_VOLATILE = 0x00001000,
PREFIX_UNALIGNED = 0x00010000,
PREFIX_CONSTRAINED = 0x00100000,
PREFIX_READONLY = 0x01000000
};
static void impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix);
static OPCODE impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp);
static bool impOpcodeIsCallOpcode(OPCODE opcode);
public:
void impInit();
void impImport();
CORINFO_CLASS_HANDLE impGetRefAnyClass();
CORINFO_CLASS_HANDLE impGetRuntimeArgumentHandle();
CORINFO_CLASS_HANDLE impGetTypeHandleClass();
CORINFO_CLASS_HANDLE impGetStringClass();
CORINFO_CLASS_HANDLE impGetObjectClass();
// Returns underlying type of handles returned by ldtoken instruction
var_types GetRuntimeHandleUnderlyingType()
{
// RuntimeTypeHandle is backed by raw pointer on CoreRT and by object reference on other runtimes
return IsTargetAbi(CORINFO_CORERT_ABI) ? TYP_I_IMPL : TYP_REF;
}
void impDevirtualizeCall(GenTreeCall* call,
CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_METHOD_HANDLE* method,
unsigned* methodFlags,
CORINFO_CONTEXT_HANDLE* contextHandle,
CORINFO_CONTEXT_HANDLE* exactContextHandle,
bool isLateDevirtualization,
bool isExplicitTailCall,
IL_OFFSET ilOffset = BAD_IL_OFFSET);
//=========================================================================
// PROTECTED
//=========================================================================
protected:
//-------------------- Stack manipulation ---------------------------------
unsigned impStkSize; // Size of the full stack
#define SMALL_STACK_SIZE 16 // number of elements in impSmallStack
struct SavedStack // used to save/restore stack contents.
{
unsigned ssDepth; // number of values on stack
StackEntry* ssTrees; // saved tree values
};
bool impIsPrimitive(CorInfoType type);
bool impILConsumesAddr(const BYTE* codeAddr);
void impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind);
void impPushOnStack(GenTree* tree, typeInfo ti);
void impPushNullObjRefOnStack();
StackEntry impPopStack();
StackEntry& impStackTop(unsigned n = 0);
unsigned impStackHeight();
void impSaveStackState(SavedStack* savePtr, bool copy);
void impRestoreStackState(SavedStack* savePtr);
GenTree* impImportLdvirtftn(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo);
int impBoxPatternMatch(CORINFO_RESOLVED_TOKEN* pResolvedToken,
const BYTE* codeAddr,
const BYTE* codeEndp,
bool makeInlineObservation = false);
void impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken);
void impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo);
bool impCanPInvokeInline();
bool impCanPInvokeInlineCallSite(BasicBlock* block);
void impCheckForPInvokeCall(
GenTreeCall* call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block);
GenTreeCall* impImportIndirectCall(CORINFO_SIG_INFO* sig, const DebugInfo& di = DebugInfo());
void impPopArgsForUnmanagedCall(GenTree* call, CORINFO_SIG_INFO* sig);
void impInsertHelperCall(CORINFO_HELPER_DESC* helperCall);
void impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall);
void impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall);
var_types impImportCall(OPCODE opcode,
CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a
// type parameter?
GenTree* newobjThis,
int prefixFlags,
CORINFO_CALL_INFO* callInfo,
IL_OFFSET rawILOffset);
CORINFO_CLASS_HANDLE impGetSpecialIntrinsicExactReturnType(CORINFO_METHOD_HANDLE specialIntrinsicHandle);
bool impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo, CorInfoCallConvExtension callConv);
GenTree* impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HANDLE retClsHnd);
GenTree* impFixupStructReturnType(GenTree* op,
CORINFO_CLASS_HANDLE retClsHnd,
CorInfoCallConvExtension unmgdCallConv);
#ifdef DEBUG
var_types impImportJitTestLabelMark(int numArgs);
#endif // DEBUG
GenTree* impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken);
GenTree* impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp);
GenTree* impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_ACCESS_FLAGS access,
CORINFO_FIELD_INFO* pFieldInfo,
var_types lclTyp);
static void impBashVarAddrsToI(GenTree* tree1, GenTree* tree2 = nullptr);
GenTree* impImplicitIorI4Cast(GenTree* tree, var_types dstTyp);
GenTree* impImplicitR4orR8Cast(GenTree* tree, var_types dstTyp);
void impImportLeave(BasicBlock* block);
void impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr);
GenTree* impTypeIsAssignable(GenTree* typeTo, GenTree* typeFrom);
// Mirrors StringComparison.cs
enum StringComparison
{
Ordinal = 4,
OrdinalIgnoreCase = 5
};
enum StringComparisonJoint
{
Eq, // (d1 == cns1) && (s2 == cns2)
Xor, // (d1 ^ cns1) | (s2 ^ cns2)
};
GenTree* impStringEqualsOrStartsWith(bool startsWith, CORINFO_SIG_INFO* sig, unsigned methodFlags);
GenTree* impSpanEqualsOrStartsWith(bool startsWith, CORINFO_SIG_INFO* sig, unsigned methodFlags);
GenTree* impExpandHalfConstEquals(GenTreeLclVar* data,
GenTree* lengthFld,
bool checkForNull,
bool startsWith,
WCHAR* cnsData,
int len,
int dataOffset,
StringComparison cmpMode);
GenTree* impCreateCompareInd(GenTreeLclVar* obj,
var_types type,
ssize_t offset,
ssize_t value,
StringComparison ignoreCase,
StringComparisonJoint joint = Eq);
GenTree* impExpandHalfConstEqualsSWAR(
GenTreeLclVar* data, WCHAR* cns, int len, int dataOffset, StringComparison cmpMode);
GenTree* impExpandHalfConstEqualsSIMD(
GenTreeLclVar* data, WCHAR* cns, int len, int dataOffset, StringComparison cmpMode);
GenTreeStrCon* impGetStrConFromSpan(GenTree* span);
GenTree* impIntrinsic(GenTree* newobjThis,
CORINFO_CLASS_HANDLE clsHnd,
CORINFO_METHOD_HANDLE method,
CORINFO_SIG_INFO* sig,
unsigned methodFlags,
int memberRef,
bool readonlyCall,
bool tailCall,
CORINFO_RESOLVED_TOKEN* pContstrainedResolvedToken,
CORINFO_THIS_TRANSFORM constraintCallThisTransform,
NamedIntrinsic* pIntrinsicName,
bool* isSpecialIntrinsic = nullptr);
GenTree* impMathIntrinsic(CORINFO_METHOD_HANDLE method,
CORINFO_SIG_INFO* sig,
var_types callType,
NamedIntrinsic intrinsicName,
bool tailCall);
NamedIntrinsic lookupNamedIntrinsic(CORINFO_METHOD_HANDLE method);
GenTree* impUnsupportedNamedIntrinsic(unsigned helper,
CORINFO_METHOD_HANDLE method,
CORINFO_SIG_INFO* sig,
bool mustExpand);
#ifdef FEATURE_HW_INTRINSICS
GenTree* impHWIntrinsic(NamedIntrinsic intrinsic,
CORINFO_CLASS_HANDLE clsHnd,
CORINFO_METHOD_HANDLE method,
CORINFO_SIG_INFO* sig,
bool mustExpand);
GenTree* impSimdAsHWIntrinsic(NamedIntrinsic intrinsic,
CORINFO_CLASS_HANDLE clsHnd,
CORINFO_METHOD_HANDLE method,
CORINFO_SIG_INFO* sig,
GenTree* newobjThis);
protected:
bool compSupportsHWIntrinsic(CORINFO_InstructionSet isa);
GenTree* impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic,
CORINFO_CLASS_HANDLE clsHnd,
CORINFO_SIG_INFO* sig,
var_types retType,
CorInfoType simdBaseJitType,
unsigned simdSize,
GenTree* newobjThis);
GenTree* impSpecialIntrinsic(NamedIntrinsic intrinsic,
CORINFO_CLASS_HANDLE clsHnd,
CORINFO_METHOD_HANDLE method,
CORINFO_SIG_INFO* sig,
CorInfoType simdBaseJitType,
var_types retType,
unsigned simdSize);
GenTree* getArgForHWIntrinsic(var_types argType,
CORINFO_CLASS_HANDLE argClass,
bool expectAddr = false,
GenTree* newobjThis = nullptr);
GenTree* impNonConstFallback(NamedIntrinsic intrinsic, var_types simdType, CorInfoType simdBaseJitType);
GenTree* addRangeCheckIfNeeded(
NamedIntrinsic intrinsic, GenTree* immOp, bool mustExpand, int immLowerBound, int immUpperBound);
GenTree* addRangeCheckForHWIntrinsic(GenTree* immOp, int immLowerBound, int immUpperBound);
#ifdef TARGET_XARCH
GenTree* impBaseIntrinsic(NamedIntrinsic intrinsic,
CORINFO_CLASS_HANDLE clsHnd,
CORINFO_METHOD_HANDLE method,
CORINFO_SIG_INFO* sig,
CorInfoType simdBaseJitType,
var_types retType,
unsigned simdSize);
GenTree* impSSEIntrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig);
GenTree* impSSE2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig);
GenTree* impAvxOrAvx2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig);
GenTree* impBMI1OrBMI2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig);
#endif // TARGET_XARCH
#endif // FEATURE_HW_INTRINSICS
GenTree* impArrayAccessIntrinsic(CORINFO_CLASS_HANDLE clsHnd,
CORINFO_SIG_INFO* sig,
int memberRef,
bool readonlyCall,
NamedIntrinsic intrinsicName);
GenTree* impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig);
GenTree* impCreateSpanIntrinsic(CORINFO_SIG_INFO* sig);
GenTree* impKeepAliveIntrinsic(GenTree* objToKeepAlive);
GenTree* impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo);
GenTree* impTransformThis(GenTree* thisPtr,
CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
CORINFO_THIS_TRANSFORM transform);
//----------------- Manipulating the trees and stmts ----------------------
Statement* impStmtList; // Statements for the BB being imported.
Statement* impLastStmt; // The last statement for the current BB.
public:
enum
{
CHECK_SPILL_ALL = -1,
CHECK_SPILL_NONE = -2
};
void impBeginTreeList();
void impEndTreeList(BasicBlock* block, Statement* firstStmt, Statement* lastStmt);
void impEndTreeList(BasicBlock* block);
void impAppendStmtCheck(Statement* stmt, unsigned chkLevel);
void impAppendStmt(Statement* stmt, unsigned chkLevel, bool checkConsumedDebugInfo = true);
void impAppendStmt(Statement* stmt);
void impInsertStmtBefore(Statement* stmt, Statement* stmtBefore);
Statement* impAppendTree(GenTree* tree, unsigned chkLevel, const DebugInfo& di, bool checkConsumedDebugInfo = true);
void impInsertTreeBefore(GenTree* tree, const DebugInfo& di, Statement* stmtBefore);
void impAssignTempGen(unsigned tmp,
GenTree* val,
unsigned curLevel = (unsigned)CHECK_SPILL_NONE,
Statement** pAfterStmt = nullptr,
const DebugInfo& di = DebugInfo(),
BasicBlock* block = nullptr);
void impAssignTempGen(unsigned tmpNum,
GenTree* val,
CORINFO_CLASS_HANDLE structHnd,
unsigned curLevel,
Statement** pAfterStmt = nullptr,
const DebugInfo& di = DebugInfo(),
BasicBlock* block = nullptr);
Statement* impExtractLastStmt();
GenTree* impCloneExpr(GenTree* tree,
GenTree** clone,
CORINFO_CLASS_HANDLE structHnd,
unsigned curLevel,
Statement** pAfterStmt DEBUGARG(const char* reason));
GenTree* impAssignStruct(GenTree* dest,
GenTree* src,
CORINFO_CLASS_HANDLE structHnd,
unsigned curLevel,
Statement** pAfterStmt = nullptr,
const DebugInfo& di = DebugInfo(),
BasicBlock* block = nullptr);
GenTree* impAssignStructPtr(GenTree* dest,
GenTree* src,
CORINFO_CLASS_HANDLE structHnd,
unsigned curLevel,
Statement** pAfterStmt = nullptr,
const DebugInfo& di = DebugInfo(),
BasicBlock* block = nullptr);
GenTree* impGetStructAddr(GenTree* structVal, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, bool willDeref);
var_types impNormStructType(CORINFO_CLASS_HANDLE structHnd, CorInfoType* simdBaseJitType = nullptr);
GenTree* impNormStructVal(GenTree* structVal,
CORINFO_CLASS_HANDLE structHnd,
unsigned curLevel,
bool forceNormalization = false);
GenTree* impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken,
bool* pRuntimeLookup = nullptr,
bool mustRestoreHandle = false,
bool importParent = false);
GenTree* impParentClassTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken,
bool* pRuntimeLookup = nullptr,
bool mustRestoreHandle = false)
{
return impTokenToHandle(pResolvedToken, pRuntimeLookup, mustRestoreHandle, true);
}
GenTree* impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_LOOKUP* pLookup,
GenTreeFlags flags,
void* compileTimeHandle);
GenTree* getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind);
GenTree* impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_LOOKUP* pLookup,
void* compileTimeHandle);
GenTree* impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup, GenTreeFlags flags, void* compileTimeHandle);
GenTreeCall* impReadyToRunHelperToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
CorInfoHelpFunc helper,
var_types type,
GenTreeCall::Use* args = nullptr,
CORINFO_LOOKUP_KIND* pGenericLookupKind = nullptr);
bool impIsCastHelperEligibleForClassProbe(GenTree* tree);
bool impIsCastHelperMayHaveProfileData(GenTree* tree);
GenTree* impCastClassOrIsInstToTree(
GenTree* op1, GenTree* op2, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass, IL_OFFSET ilOffset);
GenTree* impOptimizeCastClassOrIsInst(GenTree* op1, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass);
bool VarTypeIsMultiByteAndCanEnreg(var_types type,
CORINFO_CLASS_HANDLE typeClass,
unsigned* typeSize,
bool forReturn,
bool isVarArg,
CorInfoCallConvExtension callConv);
bool IsIntrinsicImplementedByUserCall(NamedIntrinsic intrinsicName);
bool IsTargetIntrinsic(NamedIntrinsic intrinsicName);
bool IsMathIntrinsic(NamedIntrinsic intrinsicName);
bool IsMathIntrinsic(GenTree* tree);
private:
//----------------- Importing the method ----------------------------------
CORINFO_CONTEXT_HANDLE impTokenLookupContextHandle; // The context used for looking up tokens.
#ifdef DEBUG
unsigned impCurOpcOffs;
const char* impCurOpcName;
bool impNestedStackSpill;
// For displaying instrs with generated native code (-n:B)
Statement* impLastILoffsStmt; // oldest stmt added for which we did not call SetLastILOffset().
void impNoteLastILoffs();
#endif
// Debug info of current statement being imported. It gets set to contain
// no IL location (!impCurStmtDI.GetLocation().IsValid) after it has been
// set in the appended trees. Then it gets updated at IL instructions for
// which we have to report mapping info.
// It will always contain the current inline context.
DebugInfo impCurStmtDI;
DebugInfo impCreateDIWithCurrentStackInfo(IL_OFFSET offs, bool isCall);
void impCurStmtOffsSet(IL_OFFSET offs);
void impNoteBranchOffs();
unsigned impInitBlockLineInfo();
bool impIsThis(GenTree* obj);
bool impIsLDFTN_TOKEN(const BYTE* delegateCreateStart, const BYTE* newobjCodeAddr);
bool impIsDUP_LDVIRTFTN_TOKEN(const BYTE* delegateCreateStart, const BYTE* newobjCodeAddr);
bool impIsAnySTLOC(OPCODE opcode)
{
return ((opcode == CEE_STLOC) || (opcode == CEE_STLOC_S) ||
((opcode >= CEE_STLOC_0) && (opcode <= CEE_STLOC_3)));
}
GenTreeCall::Use* impPopCallArgs(unsigned count, CORINFO_SIG_INFO* sig, GenTreeCall::Use* prefixArgs = nullptr);
bool impCheckImplicitArgumentCoercion(var_types sigType, var_types nodeType) const;
GenTreeCall::Use* impPopReverseCallArgs(unsigned count, CORINFO_SIG_INFO* sig, unsigned skipReverseCount = 0);
//---------------- Spilling the importer stack ----------------------------
// The maximum number of bytes of IL processed without clean stack state.
// It allows to limit the maximum tree size and depth.
static const unsigned MAX_TREE_SIZE = 200;
bool impCanSpillNow(OPCODE prevOpcode);
struct PendingDsc
{
PendingDsc* pdNext;
BasicBlock* pdBB;
SavedStack pdSavedStack;
ThisInitState pdThisPtrInit;
};
PendingDsc* impPendingList; // list of BBs currently waiting to be imported.
PendingDsc* impPendingFree; // Freed up dscs that can be reused
// We keep a byte-per-block map (dynamically extended) in the top-level Compiler object of a compilation.
JitExpandArray<BYTE> impPendingBlockMembers;
// Return the byte for "b" (allocating/extending impPendingBlockMembers if necessary.)
// Operates on the map in the top-level ancestor.
BYTE impGetPendingBlockMember(BasicBlock* blk)
{
return impInlineRoot()->impPendingBlockMembers.Get(blk->bbInd());
}
// Set the byte for "b" to "val" (allocating/extending impPendingBlockMembers if necessary.)
// Operates on the map in the top-level ancestor.
void impSetPendingBlockMember(BasicBlock* blk, BYTE val)
{
impInlineRoot()->impPendingBlockMembers.Set(blk->bbInd(), val);
}
bool impCanReimport;
bool impSpillStackEntry(unsigned level,
unsigned varNum
#ifdef DEBUG
,
bool bAssertOnRecursion,
const char* reason
#endif
);
void impSpillStackEnsure(bool spillLeaves = false);
void impEvalSideEffects();
void impSpillSpecialSideEff();
void impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason));
void impSpillValueClasses();
void impSpillEvalStack();
static fgWalkPreFn impFindValueClasses;
void impSpillLclRefs(ssize_t lclNum);
BasicBlock* impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd, bool isSingleBlockFilter);
bool impBlockIsInALoop(BasicBlock* block);
void impImportBlockCode(BasicBlock* block);
void impReimportMarkBlock(BasicBlock* block);
void impReimportMarkSuccessors(BasicBlock* block);
void impVerifyEHBlock(BasicBlock* block, bool isTryStart);
void impImportBlockPending(BasicBlock* block);
// Similar to impImportBlockPending, but assumes that block has already been imported once and is being
// reimported for some reason. It specifically does *not* look at verCurrentState to set the EntryState
// for the block, but instead, just re-uses the block's existing EntryState.
void impReimportBlockPending(BasicBlock* block);
var_types impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTree** pOp1, GenTree** pOp2);
void impImportBlock(BasicBlock* block);
// Assumes that "block" is a basic block that completes with a non-empty stack. We will assign the values
// on the stack to local variables (the "spill temp" variables). The successor blocks will assume that
// its incoming stack contents are in those locals. This requires "block" and its successors to agree on
// the variables that will be used -- and for all the predecessors of those successors, and the
// successors of those predecessors, etc. Call such a set of blocks closed under alternating
// successor/predecessor edges a "spill clique." A block is a "predecessor" or "successor" member of the
// clique (or, conceivably, both). Each block has a specified sequence of incoming and outgoing spill
// temps. If "block" already has its outgoing spill temps assigned (they are always a contiguous series
// of local variable numbers, so we represent them with the base local variable number), returns that.
// Otherwise, picks a set of spill temps, and propagates this choice to all blocks in the spill clique of
// which "block" is a member (asserting, in debug mode, that no block in this clique had its spill temps
// chosen already. More precisely, that the incoming or outgoing spill temps are not chosen, depending
// on which kind of member of the clique the block is).
unsigned impGetSpillTmpBase(BasicBlock* block);
// Assumes that "block" is a basic block that completes with a non-empty stack. We have previously
// assigned the values on the stack to local variables (the "spill temp" variables). The successor blocks
// will assume that its incoming stack contents are in those locals. This requires "block" and its
// successors to agree on the variables and their types that will be used. The CLI spec allows implicit
// conversions between 'int' and 'native int' or 'float' and 'double' stack types. So one predecessor can
// push an int and another can push a native int. For 64-bit we have chosen to implement this by typing
// the "spill temp" as native int, and then importing (or re-importing as needed) so that all the
// predecessors in the "spill clique" push a native int (sign-extending if needed), and all the
// successors receive a native int. Similarly float and double are unified to double.
// This routine is called after a type-mismatch is detected, and it will walk the spill clique to mark
// blocks for re-importation as appropriate (both successors, so they get the right incoming type, and
// predecessors, so they insert an upcast if needed).
void impReimportSpillClique(BasicBlock* block);
// When we compute a "spill clique" (see above) these byte-maps are allocated to have a byte per basic
// block, and represent the predecessor and successor members of the clique currently being computed.
// *** Access to these will need to be locked in a parallel compiler.
JitExpandArray<BYTE> impSpillCliquePredMembers;
JitExpandArray<BYTE> impSpillCliqueSuccMembers;
enum SpillCliqueDir
{
SpillCliquePred,
SpillCliqueSucc
};
// Abstract class for receiving a callback while walking a spill clique
class SpillCliqueWalker
{
public:
virtual void Visit(SpillCliqueDir predOrSucc, BasicBlock* blk) = 0;
};
// This class is used for setting the bbStkTempsIn and bbStkTempsOut on the blocks within a spill clique
class SetSpillTempsBase : public SpillCliqueWalker
{
unsigned m_baseTmp;
public:
SetSpillTempsBase(unsigned baseTmp) : m_baseTmp(baseTmp)
{
}
virtual void Visit(SpillCliqueDir predOrSucc, BasicBlock* blk);
};
// This class is used for implementing impReimportSpillClique part on each block within the spill clique
class ReimportSpillClique : public SpillCliqueWalker
{
Compiler* m_pComp;
public:
ReimportSpillClique(Compiler* pComp) : m_pComp(pComp)
{
}
virtual void Visit(SpillCliqueDir predOrSucc, BasicBlock* blk);
};
// This is the heart of the algorithm for walking spill cliques. It invokes callback->Visit for each
// predecessor or successor within the spill clique
void impWalkSpillCliqueFromPred(BasicBlock* pred, SpillCliqueWalker* callback);
// For a BasicBlock that has already been imported, the EntryState has an array of GenTrees for the
// incoming locals. This walks that list an resets the types of the GenTrees to match the types of
// the VarDscs. They get out of sync when we have int/native int issues (see impReimportSpillClique).
void impRetypeEntryStateTemps(BasicBlock* blk);
BYTE impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk);
void impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val);
void impPushVar(GenTree* op, typeInfo tiRetVal);
GenTreeLclVar* impCreateLocalNode(unsigned lclNum DEBUGARG(IL_OFFSET offset));
void impLoadVar(unsigned lclNum, IL_OFFSET offset, const typeInfo& tiRetVal);
void impLoadVar(unsigned lclNum, IL_OFFSET offset)
{
impLoadVar(lclNum, offset, lvaGetDesc(lclNum)->lvVerTypeInfo);
}
void impLoadArg(unsigned ilArgNum, IL_OFFSET offset);
void impLoadLoc(unsigned ilLclNum, IL_OFFSET offset);
bool impReturnInstruction(int prefixFlags, OPCODE& opcode);
#ifdef TARGET_ARM
void impMarkLclDstNotPromotable(unsigned tmpNum, GenTree* op, CORINFO_CLASS_HANDLE hClass);
#endif
// A free list of linked list nodes used to represent to-do stacks of basic blocks.
struct BlockListNode
{
BasicBlock* m_blk;
BlockListNode* m_next;
BlockListNode(BasicBlock* blk, BlockListNode* next = nullptr) : m_blk(blk), m_next(next)
{
}
void* operator new(size_t sz, Compiler* comp);
};
BlockListNode* impBlockListNodeFreeList;
void FreeBlockListNode(BlockListNode* node);
bool impIsValueType(typeInfo* pTypeInfo);
var_types mangleVarArgsType(var_types type);
regNumber getCallArgIntRegister(regNumber floatReg);
regNumber getCallArgFloatRegister(regNumber intReg);
#if defined(DEBUG)
static unsigned jitTotalMethodCompiled;
#endif
#ifdef DEBUG
static LONG jitNestingLevel;
#endif // DEBUG
static bool impIsAddressInLocal(const GenTree* tree, GenTree** lclVarTreeOut = nullptr);
void impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult);
// STATIC inlining decision based on the IL code.
void impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle,
CORINFO_METHOD_INFO* methInfo,
bool forceInline,
InlineResult* inlineResult);
void impCheckCanInline(GenTreeCall* call,
CORINFO_METHOD_HANDLE fncHandle,
unsigned methAttr,
CORINFO_CONTEXT_HANDLE exactContextHnd,
InlineCandidateInfo** ppInlineCandidateInfo,
InlineResult* inlineResult);
void impInlineRecordArgInfo(InlineInfo* pInlineInfo,
GenTree* curArgVal,
unsigned argNum,
InlineResult* inlineResult);
void impInlineInitVars(InlineInfo* pInlineInfo);
unsigned impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason));
GenTree* impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclTypeInfo);
bool impInlineIsThis(GenTree* tree, InlArgInfo* inlArgInfo);
bool impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTree* additionalTree,
GenTreeCall::Use* additionalCallArgs,
GenTree* dereferencedAddress,
InlArgInfo* inlArgInfo);
void impMarkInlineCandidate(GenTree* call,
CORINFO_CONTEXT_HANDLE exactContextHnd,
bool exactContextNeedsRuntimeLookup,
CORINFO_CALL_INFO* callInfo);
void impMarkInlineCandidateHelper(GenTreeCall* call,
CORINFO_CONTEXT_HANDLE exactContextHnd,
bool exactContextNeedsRuntimeLookup,
CORINFO_CALL_INFO* callInfo);
bool impTailCallRetTypeCompatible(bool allowWidening,
var_types callerRetType,
CORINFO_CLASS_HANDLE callerRetTypeClass,
CorInfoCallConvExtension callerCallConv,
var_types calleeRetType,
CORINFO_CLASS_HANDLE calleeRetTypeClass,
CorInfoCallConvExtension calleeCallConv);
bool impIsTailCallILPattern(
bool tailPrefixed, OPCODE curOpcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, bool isRecursive);
bool impIsImplicitTailCallCandidate(
OPCODE curOpcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive);
bool impIsClassExact(CORINFO_CLASS_HANDLE classHnd);
bool impCanSkipCovariantStoreCheck(GenTree* value, GenTree* array);
methodPointerInfo* impAllocateMethodPointerInfo(const CORINFO_RESOLVED_TOKEN& token, mdToken tokenConstrained);
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX FlowGraph XX
XX XX
XX Info about the basic-blocks, their contents and the flow analysis XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
public:
BasicBlock* fgFirstBB; // Beginning of the basic block list
BasicBlock* fgLastBB; // End of the basic block list
BasicBlock* fgFirstColdBlock; // First block to be placed in the cold section
BasicBlock* fgEntryBB; // For OSR, the original method's entry point
BasicBlock* fgOSREntryBB; // For OSR, the logical entry point (~ patchpoint)
#if defined(FEATURE_EH_FUNCLETS)
BasicBlock* fgFirstFuncletBB; // First block of outlined funclets (to allow block insertion before the funclets)
#endif
BasicBlock* fgFirstBBScratch; // Block inserted for initialization stuff. Is nullptr if no such block has been
// created.
BasicBlockList* fgReturnBlocks; // list of BBJ_RETURN blocks
unsigned fgEdgeCount; // # of control flow edges between the BBs
unsigned fgBBcount; // # of BBs in the method
#ifdef DEBUG
unsigned fgBBcountAtCodegen; // # of BBs in the method at the start of codegen
#endif
unsigned fgBBNumMax; // The max bbNum that has been assigned to basic blocks
unsigned fgDomBBcount; // # of BBs for which we have dominator and reachability information
BasicBlock** fgBBInvPostOrder; // The flow graph stored in an array sorted in topological order, needed to compute
// dominance. Indexed by block number. Size: fgBBNumMax + 1.
// After the dominance tree is computed, we cache a DFS preorder number and DFS postorder number to compute
// dominance queries in O(1). fgDomTreePreOrder and fgDomTreePostOrder are arrays giving the block's preorder and
// postorder number, respectively. The arrays are indexed by basic block number. (Note that blocks are numbered
// starting from one. Thus, we always waste element zero. This makes debugging easier and makes the code less likely
// to suffer from bugs stemming from forgetting to add or subtract one from the block number to form an array
// index). The arrays are of size fgBBNumMax + 1.
unsigned* fgDomTreePreOrder;
unsigned* fgDomTreePostOrder;
// Dominator tree used by SSA construction and copy propagation (the two are expected to use the same tree
// in order to avoid the need for SSA reconstruction and an "out of SSA" phase).
DomTreeNode* fgSsaDomTree;
bool fgBBVarSetsInited;
// Allocate array like T* a = new T[fgBBNumMax + 1];
// Using helper so we don't keep forgetting +1.
template <typename T>
T* fgAllocateTypeForEachBlk(CompMemKind cmk = CMK_Unknown)
{
return getAllocator(cmk).allocate<T>(fgBBNumMax + 1);
}
// BlockSets are relative to a specific set of BasicBlock numbers. If that changes
// (if the blocks are renumbered), this changes. BlockSets from different epochs
// cannot be meaningfully combined. Note that new blocks can be created with higher
// block numbers without changing the basic block epoch. These blocks *cannot*
// participate in a block set until the blocks are all renumbered, causing the epoch
// to change. This is useful if continuing to use previous block sets is valuable.
// If the epoch is zero, then it is uninitialized, and block sets can't be used.
unsigned fgCurBBEpoch;
unsigned GetCurBasicBlockEpoch()
{
return fgCurBBEpoch;
}
// The number of basic blocks in the current epoch. When the blocks are renumbered,
// this is fgBBcount. As blocks are added, fgBBcount increases, fgCurBBEpochSize remains
// the same, until a new BasicBlock epoch is created, such as when the blocks are all renumbered.
unsigned fgCurBBEpochSize;
// The number of "size_t" elements required to hold a bitset large enough for fgCurBBEpochSize
// bits. This is precomputed to avoid doing math every time BasicBlockBitSetTraits::GetArrSize() is called.
unsigned fgBBSetCountInSizeTUnits;
void NewBasicBlockEpoch()
{
INDEBUG(unsigned oldEpochArrSize = fgBBSetCountInSizeTUnits);
// We have a new epoch. Compute and cache the size needed for new BlockSets.
fgCurBBEpoch++;
fgCurBBEpochSize = fgBBNumMax + 1;
fgBBSetCountInSizeTUnits =
roundUp(fgCurBBEpochSize, (unsigned)(sizeof(size_t) * 8)) / unsigned(sizeof(size_t) * 8);
#ifdef DEBUG
// All BlockSet objects are now invalid!
fgReachabilitySetsValid = false; // the bbReach sets are now invalid!
fgEnterBlksSetValid = false; // the fgEnterBlks set is now invalid!
if (verbose)
{
unsigned epochArrSize = BasicBlockBitSetTraits::GetArrSize(this, sizeof(size_t));
printf("\nNew BlockSet epoch %d, # of blocks (including unused BB00): %u, bitset array size: %u (%s)",
fgCurBBEpoch, fgCurBBEpochSize, epochArrSize, (epochArrSize <= 1) ? "short" : "long");
if ((fgCurBBEpoch != 1) && ((oldEpochArrSize <= 1) != (epochArrSize <= 1)))
{
// If we're not just establishing the first epoch, and the epoch array size has changed such that we're
// going to change our bitset representation from short (just a size_t bitset) to long (a pointer to an
// array of size_t bitsets), then print that out.
printf("; NOTE: BlockSet size was previously %s!", (oldEpochArrSize <= 1) ? "short" : "long");
}
printf("\n");
}
#endif // DEBUG
}
void EnsureBasicBlockEpoch()
{
if (fgCurBBEpochSize != fgBBNumMax + 1)
{
NewBasicBlockEpoch();
}
}
BasicBlock* fgNewBasicBlock(BBjumpKinds jumpKind);
void fgEnsureFirstBBisScratch();
bool fgFirstBBisScratch();
bool fgBBisScratch(BasicBlock* block);
void fgExtendEHRegionBefore(BasicBlock* block);
void fgExtendEHRegionAfter(BasicBlock* block);
BasicBlock* fgNewBBbefore(BBjumpKinds jumpKind, BasicBlock* block, bool extendRegion);
BasicBlock* fgNewBBafter(BBjumpKinds jumpKind, BasicBlock* block, bool extendRegion);
BasicBlock* fgNewBBinRegion(BBjumpKinds jumpKind,
unsigned tryIndex,
unsigned hndIndex,
BasicBlock* nearBlk,
bool putInFilter = false,
bool runRarely = false,
bool insertAtEnd = false);
BasicBlock* fgNewBBinRegion(BBjumpKinds jumpKind,
BasicBlock* srcBlk,
bool runRarely = false,
bool insertAtEnd = false);
BasicBlock* fgNewBBinRegion(BBjumpKinds jumpKind);
BasicBlock* fgNewBBinRegionWorker(BBjumpKinds jumpKind,
BasicBlock* afterBlk,
unsigned xcptnIndex,
bool putInTryRegion);
void fgInsertBBbefore(BasicBlock* insertBeforeBlk, BasicBlock* newBlk);
void fgInsertBBafter(BasicBlock* insertAfterBlk, BasicBlock* newBlk);
void fgUnlinkBlock(BasicBlock* block);
#ifdef FEATURE_JIT_METHOD_PERF
unsigned fgMeasureIR();
#endif // FEATURE_JIT_METHOD_PERF
bool fgModified; // True if the flow graph has been modified recently
bool fgComputePredsDone; // Have we computed the bbPreds list
bool fgCheapPredsValid; // Is the bbCheapPreds list valid?
bool fgDomsComputed; // Have we computed the dominator sets?
bool fgReturnBlocksComputed; // Have we computed the return blocks list?
bool fgOptimizedFinally; // Did we optimize any try-finallys?
bool fgHasSwitch; // any BBJ_SWITCH jumps?
BlockSet fgEnterBlks; // Set of blocks which have a special transfer of control; the "entry" blocks plus EH handler
// begin blocks.
#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM)
BlockSet fgAlwaysBlks; // Set of blocks which are BBJ_ALWAYS part of BBJ_CALLFINALLY/BBJ_ALWAYS pair that should
// never be removed due to a requirement to use the BBJ_ALWAYS for generating code and
// not have "retless" blocks.
#endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM)
#ifdef DEBUG
bool fgReachabilitySetsValid; // Are the bbReach sets valid?
bool fgEnterBlksSetValid; // Is the fgEnterBlks set valid?
#endif // DEBUG
bool fgRemoveRestOfBlock; // true if we know that we will throw
bool fgStmtRemoved; // true if we remove statements -> need new DFA
// There are two modes for ordering of the trees.
// - In FGOrderTree, the dominant ordering is the tree order, and the nodes contained in
// each tree and sub-tree are contiguous, and can be traversed (in gtNext/gtPrev order)
// by traversing the tree according to the order of the operands.
// - In FGOrderLinear, the dominant ordering is the linear order.
enum FlowGraphOrder
{
FGOrderTree,
FGOrderLinear
};
FlowGraphOrder fgOrder;
// The following are boolean flags that keep track of the state of internal data structures
bool fgStmtListThreaded; // true if the node list is now threaded
bool fgCanRelocateEHRegions; // true if we are allowed to relocate the EH regions
bool fgEdgeWeightsComputed; // true after we have called fgComputeEdgeWeights
bool fgHaveValidEdgeWeights; // true if we were successful in computing all of the edge weights
bool fgSlopUsedInEdgeWeights; // true if their was some slop used when computing the edge weights
bool fgRangeUsedInEdgeWeights; // true if some of the edgeWeight are expressed in Min..Max form
weight_t fgCalledCount; // count of the number of times this method was called
// This is derived from the profile data
// or is BB_UNITY_WEIGHT when we don't have profile data
#if defined(FEATURE_EH_FUNCLETS)
bool fgFuncletsCreated; // true if the funclet creation phase has been run
#endif // FEATURE_EH_FUNCLETS
bool fgGlobalMorph; // indicates if we are during the global morphing phase
// since fgMorphTree can be called from several places
bool impBoxTempInUse; // the temp below is valid and available
unsigned impBoxTemp; // a temporary that is used for boxing
#ifdef DEBUG
bool jitFallbackCompile; // Are we doing a fallback compile? That is, have we executed a NO_WAY assert,
// and we are trying to compile again in a "safer", minopts mode?
#endif
#if defined(DEBUG)
unsigned impInlinedCodeSize;
bool fgPrintInlinedMethods;
#endif
jitstd::vector<flowList*>* fgPredListSortVector;
//-------------------------------------------------------------------------
void fgInit();
PhaseStatus fgImport();
PhaseStatus fgTransformIndirectCalls();
PhaseStatus fgTransformPatchpoints();
PhaseStatus fgInline();
PhaseStatus fgRemoveEmptyTry();
PhaseStatus fgRemoveEmptyFinally();
PhaseStatus fgMergeFinallyChains();
PhaseStatus fgCloneFinally();
void fgCleanupContinuation(BasicBlock* continuation);
#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM)
PhaseStatus fgUpdateFinallyTargetFlags();
void fgClearAllFinallyTargetBits();
void fgAddFinallyTargetFlags();
#endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM)
PhaseStatus fgTailMergeThrows();
void fgTailMergeThrowsFallThroughHelper(BasicBlock* predBlock,
BasicBlock* nonCanonicalBlock,
BasicBlock* canonicalBlock,
flowList* predEdge);
void fgTailMergeThrowsJumpToHelper(BasicBlock* predBlock,
BasicBlock* nonCanonicalBlock,
BasicBlock* canonicalBlock,
flowList* predEdge);
GenTree* fgCheckCallArgUpdate(GenTree* parent, GenTree* child, var_types origType);
#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM)
// Sometimes we need to defer updating the BBF_FINALLY_TARGET bit. fgNeedToAddFinallyTargetBits signals
// when this is necessary.
bool fgNeedToAddFinallyTargetBits;
#endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM)
bool fgRetargetBranchesToCanonicalCallFinally(BasicBlock* block,
BasicBlock* handler,
BlockToBlockMap& continuationMap);
GenTree* fgGetCritSectOfStaticMethod();
#if defined(FEATURE_EH_FUNCLETS)
void fgAddSyncMethodEnterExit();
GenTree* fgCreateMonitorTree(unsigned lvaMonitorBool, unsigned lvaThisVar, BasicBlock* block, bool enter);
void fgConvertSyncReturnToLeave(BasicBlock* block);
#endif // FEATURE_EH_FUNCLETS
void fgAddReversePInvokeEnterExit();
bool fgMoreThanOneReturnBlock();
// The number of separate return points in the method.
unsigned fgReturnCount;
void fgAddInternal();
enum class FoldResult
{
FOLD_DID_NOTHING,
FOLD_CHANGED_CONTROL_FLOW,
FOLD_REMOVED_LAST_STMT,
FOLD_ALTERED_LAST_STMT,
};
FoldResult fgFoldConditional(BasicBlock* block);
void fgMorphStmts(BasicBlock* block);
void fgMorphBlocks();
void fgMergeBlockReturn(BasicBlock* block);
bool fgMorphBlockStmt(BasicBlock* block, Statement* stmt DEBUGARG(const char* msg));
void fgSetOptions();
#ifdef DEBUG
static fgWalkPreFn fgAssertNoQmark;
void fgPreExpandQmarkChecks(GenTree* expr);
void fgPostExpandQmarkChecks();
static void fgCheckQmarkAllowedForm(GenTree* tree);
#endif
IL_OFFSET fgFindBlockILOffset(BasicBlock* block);
void fgFixEntryFlowForOSR();
BasicBlock* fgSplitBlockAtBeginning(BasicBlock* curr);
BasicBlock* fgSplitBlockAtEnd(BasicBlock* curr);
BasicBlock* fgSplitBlockAfterStatement(BasicBlock* curr, Statement* stmt);
BasicBlock* fgSplitBlockAfterNode(BasicBlock* curr, GenTree* node); // for LIR
BasicBlock* fgSplitEdge(BasicBlock* curr, BasicBlock* succ);
Statement* fgNewStmtFromTree(GenTree* tree, BasicBlock* block, const DebugInfo& di);
Statement* fgNewStmtFromTree(GenTree* tree);
Statement* fgNewStmtFromTree(GenTree* tree, BasicBlock* block);
Statement* fgNewStmtFromTree(GenTree* tree, const DebugInfo& di);
GenTree* fgGetTopLevelQmark(GenTree* expr, GenTree** ppDst = nullptr);
void fgExpandQmarkForCastInstOf(BasicBlock* block, Statement* stmt);
void fgExpandQmarkStmt(BasicBlock* block, Statement* stmt);
void fgExpandQmarkNodes();
// Do "simple lowering." This functionality is (conceptually) part of "general"
// lowering that is distributed between fgMorph and the lowering phase of LSRA.
void fgSimpleLowering();
GenTree* fgInitThisClass();
GenTreeCall* fgGetStaticsCCtorHelper(CORINFO_CLASS_HANDLE cls, CorInfoHelpFunc helper);
GenTreeCall* fgGetSharedCCtor(CORINFO_CLASS_HANDLE cls);
bool backendRequiresLocalVarLifetimes()
{
return !opts.MinOpts() || m_pLinearScan->willEnregisterLocalVars();
}
void fgLocalVarLiveness();
void fgLocalVarLivenessInit();
void fgPerNodeLocalVarLiveness(GenTree* node);
void fgPerBlockLocalVarLiveness();
VARSET_VALRET_TP fgGetHandlerLiveVars(BasicBlock* block);
void fgLiveVarAnalysis(bool updateInternalOnly = false);
void fgComputeLifeCall(VARSET_TP& life, GenTreeCall* call);
void fgComputeLifeTrackedLocalUse(VARSET_TP& life, LclVarDsc& varDsc, GenTreeLclVarCommon* node);
bool fgComputeLifeTrackedLocalDef(VARSET_TP& life,
VARSET_VALARG_TP keepAliveVars,
LclVarDsc& varDsc,
GenTreeLclVarCommon* node);
bool fgComputeLifeUntrackedLocal(VARSET_TP& life,
VARSET_VALARG_TP keepAliveVars,
LclVarDsc& varDsc,
GenTreeLclVarCommon* lclVarNode);
bool fgComputeLifeLocal(VARSET_TP& life, VARSET_VALARG_TP keepAliveVars, GenTree* lclVarNode);
void fgComputeLife(VARSET_TP& life,
GenTree* startNode,
GenTree* endNode,
VARSET_VALARG_TP volatileVars,
bool* pStmtInfoDirty DEBUGARG(bool* treeModf));
void fgComputeLifeLIR(VARSET_TP& life, BasicBlock* block, VARSET_VALARG_TP volatileVars);
bool fgTryRemoveNonLocal(GenTree* node, LIR::Range* blockRange);
void fgRemoveDeadStoreLIR(GenTree* store, BasicBlock* block);
bool fgRemoveDeadStore(GenTree** pTree,
LclVarDsc* varDsc,
VARSET_VALARG_TP life,
bool* doAgain,
bool* pStmtInfoDirty,
bool* pStoreRemoved DEBUGARG(bool* treeModf));
void fgInterBlockLocalVarLiveness();
// Blocks: convenience methods for enabling range-based `for` iteration over the function's blocks, e.g.:
// 1. for (BasicBlock* const block : compiler->Blocks()) ...
// 2. for (BasicBlock* const block : compiler->Blocks(startBlock)) ...
// 3. for (BasicBlock* const block : compiler->Blocks(startBlock, endBlock)) ...
// In case (1), the block list can be empty. In case (2), `startBlock` can be nullptr. In case (3),
// both `startBlock` and `endBlock` must be non-null.
//
BasicBlockSimpleList Blocks() const
{
return BasicBlockSimpleList(fgFirstBB);
}
BasicBlockSimpleList Blocks(BasicBlock* startBlock) const
{
return BasicBlockSimpleList(startBlock);
}
BasicBlockRangeList Blocks(BasicBlock* startBlock, BasicBlock* endBlock) const
{
return BasicBlockRangeList(startBlock, endBlock);
}
// The presence of a partial definition presents some difficulties for SSA: this is both a use of some SSA name
// of "x", and a def of a new SSA name for "x". The tree only has one local variable for "x", so it has to choose
// whether to treat that as the use or def. It chooses the "use", and thus the old SSA name. This map allows us
// to record/recover the "def" SSA number, given the lcl var node for "x" in such a tree.
typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, unsigned> NodeToUnsignedMap;
NodeToUnsignedMap* m_opAsgnVarDefSsaNums;
NodeToUnsignedMap* GetOpAsgnVarDefSsaNums()
{
if (m_opAsgnVarDefSsaNums == nullptr)
{
m_opAsgnVarDefSsaNums = new (getAllocator()) NodeToUnsignedMap(getAllocator());
}
return m_opAsgnVarDefSsaNums;
}
// This map tracks nodes whose value numbers explicitly or implicitly depend on memory states.
// The map provides the entry block of the most closely enclosing loop that
// defines the memory region accessed when defining the nodes's VN.
//
// This information should be consulted when considering hoisting node out of a loop, as the VN
// for the node will only be valid within the indicated loop.
//
// It is not fine-grained enough to track memory dependence within loops, so cannot be used
// for more general code motion.
//
// If a node does not have an entry in the map we currently assume the VN is not memory dependent
// and so memory does not constrain hoisting.
//
typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, BasicBlock*> NodeToLoopMemoryBlockMap;
NodeToLoopMemoryBlockMap* m_nodeToLoopMemoryBlockMap;
NodeToLoopMemoryBlockMap* GetNodeToLoopMemoryBlockMap()
{
if (m_nodeToLoopMemoryBlockMap == nullptr)
{
m_nodeToLoopMemoryBlockMap = new (getAllocator()) NodeToLoopMemoryBlockMap(getAllocator());
}
return m_nodeToLoopMemoryBlockMap;
}
void optRecordLoopMemoryDependence(GenTree* tree, BasicBlock* block, ValueNum memoryVN);
void optCopyLoopMemoryDependence(GenTree* fromTree, GenTree* toTree);
// Requires value numbering phase to have completed. Returns the value number ("gtVN") of the
// "tree," EXCEPT in the case of GTF_VAR_USEASG, because the tree node's gtVN member is the
// "use" VN. Performs a lookup into the map of (use asg tree -> def VN.) to return the "def's"
// VN.
inline ValueNum GetUseAsgDefVNOrTreeVN(GenTree* tree);
// Requires that "lcl" has the GTF_VAR_DEF flag set. Returns the SSA number of "lcl".
// Except: assumes that lcl is a def, and if it is
// a partial def (GTF_VAR_USEASG), looks up and returns the SSA number for the "def",
// rather than the "use" SSA number recorded in the tree "lcl".
inline unsigned GetSsaNumForLocalVarDef(GenTree* lcl);
inline bool PreciseRefCountsRequired();
// Performs SSA conversion.
void fgSsaBuild();
// Reset any data structures to the state expected by "fgSsaBuild", so it can be run again.
void fgResetForSsa();
unsigned fgSsaPassesCompleted; // Number of times fgSsaBuild has been run.
// Returns "true" if this is a special variable that is never zero initialized in the prolog.
inline bool fgVarIsNeverZeroInitializedInProlog(unsigned varNum);
// Returns "true" if the variable needs explicit zero initialization.
inline bool fgVarNeedsExplicitZeroInit(unsigned varNum, bool bbInALoop, bool bbIsReturn);
// The value numbers for this compilation.
ValueNumStore* vnStore;
public:
ValueNumStore* GetValueNumStore()
{
return vnStore;
}
// Do value numbering (assign a value number to each
// tree node).
void fgValueNumber();
// Computes new GcHeap VN via the assignment H[elemTypeEq][arrVN][inx][fldSeq] = rhsVN.
// Assumes that "elemTypeEq" is the (equivalence class rep) of the array element type.
// The 'indType' is the indirection type of the lhs of the assignment and will typically
// match the element type of the array or fldSeq. When this type doesn't match
// or if the fldSeq is 'NotAField' we invalidate the array contents H[elemTypeEq][arrVN]
//
ValueNum fgValueNumberArrIndexAssign(CORINFO_CLASS_HANDLE elemTypeEq,
ValueNum arrVN,
ValueNum inxVN,
FieldSeqNode* fldSeq,
ValueNum rhsVN,
var_types indType);
// Requires that "tree" is a GT_IND marked as an array index, and that its address argument
// has been parsed to yield the other input arguments. If evaluation of the address
// can raise exceptions, those should be captured in the exception set "addrXvnp".
// Assumes that "elemTypeEq" is the (equivalence class rep) of the array element type.
// Marks "tree" with the VN for H[elemTypeEq][arrVN][inx][fldSeq] (for the liberal VN; a new unique
// VN for the conservative VN.) Also marks the tree's argument as the address of an array element.
// The type tree->TypeGet() will typically match the element type of the array or fldSeq.
// When this type doesn't match or if the fldSeq is 'NotAField' we return a new unique VN
//
ValueNum fgValueNumberArrIndexVal(GenTree* tree,
CORINFO_CLASS_HANDLE elemTypeEq,
ValueNum arrVN,
ValueNum inxVN,
ValueNumPair addrXvnp,
FieldSeqNode* fldSeq);
// Requires "funcApp" to be a VNF_PtrToArrElem, and "addrXvnp" to represent the exception set thrown
// by evaluating the array index expression "tree". Returns the value number resulting from
// dereferencing the array in the current GcHeap state. If "tree" is non-null, it must be the
// "GT_IND" that does the dereference, and it is given the returned value number.
ValueNum fgValueNumberArrIndexVal(GenTree* tree, VNFuncApp* funcApp, ValueNumPair addrXvnp);
// Compute the value number for a byref-exposed load of the given type via the given pointerVN.
ValueNum fgValueNumberByrefExposedLoad(var_types type, ValueNum pointerVN);
unsigned fgVNPassesCompleted; // Number of times fgValueNumber has been run.
// Utility functions for fgValueNumber.
// Perform value-numbering for the trees in "blk".
void fgValueNumberBlock(BasicBlock* blk);
// Requires that "entryBlock" is the entry block of loop "loopNum", and that "loopNum" is the
// innermost loop of which "entryBlock" is the entry. Returns the value number that should be
// assumed for the memoryKind at the start "entryBlk".
ValueNum fgMemoryVNForLoopSideEffects(MemoryKind memoryKind, BasicBlock* entryBlock, unsigned loopNum);
// Called when an operation (performed by "tree", described by "msg") may cause the GcHeap to be mutated.
// As GcHeap is a subset of ByrefExposed, this will also annotate the ByrefExposed mutation.
void fgMutateGcHeap(GenTree* tree DEBUGARG(const char* msg));
// Called when an operation (performed by "tree", described by "msg") may cause an address-exposed local to be
// mutated.
void fgMutateAddressExposedLocal(GenTree* tree DEBUGARG(const char* msg));
// For a GC heap store at curTree, record the new curMemoryVN's and update curTree's MemorySsaMap.
// As GcHeap is a subset of ByrefExposed, this will also record the ByrefExposed store.
void recordGcHeapStore(GenTree* curTree, ValueNum gcHeapVN DEBUGARG(const char* msg));
// For a store to an address-exposed local at curTree, record the new curMemoryVN and update curTree's MemorySsaMap.
void recordAddressExposedLocalStore(GenTree* curTree, ValueNum memoryVN DEBUGARG(const char* msg));
void fgSetCurrentMemoryVN(MemoryKind memoryKind, ValueNum newMemoryVN);
// Tree caused an update in the current memory VN. If "tree" has an associated heap SSA #, record that
// value in that SSA #.
void fgValueNumberRecordMemorySsa(MemoryKind memoryKind, GenTree* tree);
// The input 'tree' is a leaf node that is a constant
// Assign the proper value number to the tree
void fgValueNumberTreeConst(GenTree* tree);
// If the VN store has been initialized, reassign the
// proper value number to the constant tree.
void fgUpdateConstTreeValueNumber(GenTree* tree);
// Assumes that all inputs to "tree" have had value numbers assigned; assigns a VN to tree.
// (With some exceptions: the VN of the lhs of an assignment is assigned as part of the
// assignment.)
void fgValueNumberTree(GenTree* tree);
void fgValueNumberAssignment(GenTreeOp* tree);
// Does value-numbering for a block assignment.
void fgValueNumberBlockAssignment(GenTree* tree);
bool fgValueNumberBlockAssignmentTypeCheck(LclVarDsc* dstVarDsc, FieldSeqNode* dstFldSeq, GenTree* src);
// Does value-numbering for a cast tree.
void fgValueNumberCastTree(GenTree* tree);
// Does value-numbering for an intrinsic tree.
void fgValueNumberIntrinsic(GenTree* tree);
void fgValueNumberArrIndexAddr(GenTreeArrAddr* arrAddr);
#ifdef FEATURE_SIMD
// Does value-numbering for a GT_SIMD tree
void fgValueNumberSimd(GenTreeSIMD* tree);
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
// Does value-numbering for a GT_HWINTRINSIC tree
void fgValueNumberHWIntrinsic(GenTreeHWIntrinsic* tree);
#endif // FEATURE_HW_INTRINSICS
// Does value-numbering for a call. We interpret some helper calls.
void fgValueNumberCall(GenTreeCall* call);
// Does value-numbering for a helper representing a cast operation.
void fgValueNumberCastHelper(GenTreeCall* call);
// Does value-numbering for a helper "call" that has a VN function symbol "vnf".
void fgValueNumberHelperCallFunc(GenTreeCall* call, VNFunc vnf, ValueNumPair vnpExc);
// Requires "helpCall" to be a helper call. Assigns it a value number;
// we understand the semantics of some of the calls. Returns "true" if
// the call may modify the heap (we assume arbitrary memory side effects if so).
bool fgValueNumberHelperCall(GenTreeCall* helpCall);
// Requires that "helpFunc" is one of the pure Jit Helper methods.
// Returns the corresponding VNFunc to use for value numbering
VNFunc fgValueNumberJitHelperMethodVNFunc(CorInfoHelpFunc helpFunc);
// Adds the exception set for the current tree node which has a memory indirection operation
void fgValueNumberAddExceptionSetForIndirection(GenTree* tree, GenTree* baseAddr);
// Adds the exception sets for the current tree node which is performing a division or modulus operation
void fgValueNumberAddExceptionSetForDivision(GenTree* tree);
// Adds the exception set for the current tree node which is performing a overflow checking operation
void fgValueNumberAddExceptionSetForOverflow(GenTree* tree);
// Adds the exception set for the current tree node which is performing a bounds check operation
void fgValueNumberAddExceptionSetForBoundsCheck(GenTree* tree);
// Adds the exception set for the current tree node which is performing a ckfinite operation
void fgValueNumberAddExceptionSetForCkFinite(GenTree* tree);
// Adds the exception sets for the current tree node
void fgValueNumberAddExceptionSet(GenTree* tree);
#ifdef DEBUG
void fgDebugCheckExceptionSets();
void fgDebugCheckValueNumberedTree(GenTree* tree);
#endif
// These are the current value number for the memory implicit variables while
// doing value numbering. These are the value numbers under the "liberal" interpretation
// of memory values; the "conservative" interpretation needs no VN, since every access of
// memory yields an unknown value.
ValueNum fgCurMemoryVN[MemoryKindCount];
// Return a "pseudo"-class handle for an array element type. If "elemType" is TYP_STRUCT,
// requires "elemStructType" to be non-null (and to have a low-order zero). Otherwise, low order bit
// is 1, and the rest is an encoding of "elemTyp".
static CORINFO_CLASS_HANDLE EncodeElemType(var_types elemTyp, CORINFO_CLASS_HANDLE elemStructType)
{
if (elemStructType != nullptr)
{
assert(varTypeIsStruct(elemTyp) || elemTyp == TYP_REF || elemTyp == TYP_BYREF ||
varTypeIsIntegral(elemTyp));
assert((size_t(elemStructType) & 0x1) == 0x0); // Make sure the encoding below is valid.
return elemStructType;
}
else
{
assert(elemTyp != TYP_STRUCT);
elemTyp = varTypeToSigned(elemTyp);
return CORINFO_CLASS_HANDLE(size_t(elemTyp) << 1 | 0x1);
}
}
// If "clsHnd" is the result of an "EncodePrim" call, returns true and sets "*pPrimType" to the
// var_types it represents. Otherwise, returns TYP_STRUCT (on the assumption that "clsHnd" is
// the struct type of the element).
static var_types DecodeElemType(CORINFO_CLASS_HANDLE clsHnd)
{
size_t clsHndVal = size_t(clsHnd);
if (clsHndVal & 0x1)
{
return var_types(clsHndVal >> 1);
}
else
{
return TYP_STRUCT;
}
}
// Convert a BYTE which represents the VM's CorInfoGCtype to the JIT's var_types
var_types getJitGCType(BYTE gcType);
// Returns true if the provided type should be treated as a primitive type
// for the unmanaged calling conventions.
bool isNativePrimitiveStructType(CORINFO_CLASS_HANDLE clsHnd);
enum structPassingKind
{
SPK_Unknown, // Invalid value, never returned
SPK_PrimitiveType, // The struct is passed/returned using a primitive type.
SPK_EnclosingType, // Like SPK_Primitive type, but used for return types that
// require a primitive type temp that is larger than the struct size.
// Currently used for structs of size 3, 5, 6, or 7 bytes.
SPK_ByValue, // The struct is passed/returned by value (using the ABI rules)
// for ARM64 and UNIX_X64 in multiple registers. (when all of the
// parameters registers are used, then the stack will be used)
// for X86 passed on the stack, for ARM32 passed in registers
// or the stack or split between registers and the stack.
SPK_ByValueAsHfa, // The struct is passed/returned as an HFA in multiple registers.
SPK_ByReference
}; // The struct is passed/returned by reference to a copy/buffer.
// Get the "primitive" type that is is used when we are given a struct of size 'structSize'.
// For pointer sized structs the 'clsHnd' is used to determine if the struct contains GC ref.
// A "primitive" type is one of the scalar types: byte, short, int, long, ref, float, double
// If we can't or shouldn't use a "primitive" type then TYP_UNKNOWN is returned.
//
// isVarArg is passed for use on Windows Arm64 to change the decision returned regarding
// hfa types.
//
var_types getPrimitiveTypeForStruct(unsigned structSize, CORINFO_CLASS_HANDLE clsHnd, bool isVarArg);
// Get the type that is used to pass values of the given struct type.
// isVarArg is passed for use on Windows Arm64 to change the decision returned regarding
// hfa types.
//
var_types getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
structPassingKind* wbPassStruct,
bool isVarArg,
unsigned structSize);
// Get the type that is used to return values of the given struct type.
// If the size is unknown, pass 0 and it will be determined from 'clsHnd'.
var_types getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
CorInfoCallConvExtension callConv,
structPassingKind* wbPassStruct = nullptr,
unsigned structSize = 0);
#ifdef DEBUG
// Print a representation of "vnp" or "vn" on standard output.
// If "level" is non-zero, we also print out a partial expansion of the value.
void vnpPrint(ValueNumPair vnp, unsigned level);
void vnPrint(ValueNum vn, unsigned level);
#endif
bool fgDominate(BasicBlock* b1, BasicBlock* b2); // Return true if b1 dominates b2
// Dominator computation member functions
// Not exposed outside Compiler
protected:
bool fgReachable(BasicBlock* b1, BasicBlock* b2); // Returns true if block b1 can reach block b2
// Compute immediate dominators, the dominator tree and and its pre/post-order travsersal numbers.
void fgComputeDoms();
void fgCompDominatedByExceptionalEntryBlocks();
BlockSet_ValRet_T fgGetDominatorSet(BasicBlock* block); // Returns a set of blocks that dominate the given block.
// Note: this is relatively slow compared to calling fgDominate(),
// especially if dealing with a single block versus block check.
void fgComputeReachabilitySets(); // Compute bbReach sets. (Also sets BBF_GC_SAFE_POINT flag on blocks.)
void fgComputeReturnBlocks(); // Initialize fgReturnBlocks to a list of BBJ_RETURN blocks.
void fgComputeEnterBlocksSet(); // Compute the set of entry blocks, 'fgEnterBlks'.
// Remove blocks determined to be unreachable by the 'canRemoveBlock'.
template <typename CanRemoveBlockBody>
bool fgRemoveUnreachableBlocks(CanRemoveBlockBody canRemoveBlock);
void fgComputeReachability(); // Perform flow graph node reachability analysis.
void fgRemoveDeadBlocks(); // Identify and remove dead blocks.
BasicBlock* fgIntersectDom(BasicBlock* a, BasicBlock* b); // Intersect two immediate dominator sets.
void fgDfsInvPostOrder(); // In order to compute dominance using fgIntersectDom, the flow graph nodes must be
// processed in topological sort, this function takes care of that.
void fgDfsInvPostOrderHelper(BasicBlock* block, BlockSet& visited, unsigned* count);
BlockSet_ValRet_T fgDomFindStartNodes(); // Computes which basic blocks don't have incoming edges in the flow graph.
// Returns this as a set.
INDEBUG(void fgDispDomTree(DomTreeNode* domTree);) // Helper that prints out the Dominator Tree in debug builds.
DomTreeNode* fgBuildDomTree(); // Once we compute all the immediate dominator sets for each node in the flow graph
// (performed by fgComputeDoms), this procedure builds the dominance tree represented
// adjacency lists.
// In order to speed up the queries of the form 'Does A dominates B', we can perform a DFS preorder and postorder
// traversal of the dominance tree and the dominance query will become A dominates B iif preOrder(A) <= preOrder(B)
// && postOrder(A) >= postOrder(B) making the computation O(1).
void fgNumberDomTree(DomTreeNode* domTree);
// When the flow graph changes, we need to update the block numbers, predecessor lists, reachability sets,
// dominators, and possibly loops.
void fgUpdateChangedFlowGraph(const bool computePreds = true,
const bool computeDoms = true,
const bool computeReturnBlocks = false,
const bool computeLoops = false);
public:
// Compute the predecessors of the blocks in the control flow graph.
void fgComputePreds();
// Remove all predecessor information.
void fgRemovePreds();
// Compute the cheap flow graph predecessors lists. This is used in some early phases
// before the full predecessors lists are computed.
void fgComputeCheapPreds();
private:
void fgAddCheapPred(BasicBlock* block, BasicBlock* blockPred);
void fgRemoveCheapPred(BasicBlock* block, BasicBlock* blockPred);
public:
enum GCPollType
{
GCPOLL_NONE,
GCPOLL_CALL,
GCPOLL_INLINE
};
// Initialize the per-block variable sets (used for liveness analysis).
void fgInitBlockVarSets();
PhaseStatus fgInsertGCPolls();
BasicBlock* fgCreateGCPoll(GCPollType pollType, BasicBlock* block);
// Requires that "block" is a block that returns from
// a finally. Returns the number of successors (jump targets of
// of blocks in the covered "try" that did a "LEAVE".)
unsigned fgNSuccsOfFinallyRet(BasicBlock* block);
// Requires that "block" is a block that returns (in the sense of BBJ_EHFINALLYRET) from
// a finally. Returns its "i"th successor (jump targets of
// of blocks in the covered "try" that did a "LEAVE".)
// Requires that "i" < fgNSuccsOfFinallyRet(block).
BasicBlock* fgSuccOfFinallyRet(BasicBlock* block, unsigned i);
private:
// Factor out common portions of the impls of the methods above.
void fgSuccOfFinallyRetWork(BasicBlock* block, unsigned i, BasicBlock** bres, unsigned* nres);
public:
// For many purposes, it is desirable to be able to enumerate the *distinct* targets of a switch statement,
// skipping duplicate targets. (E.g., in flow analyses that are only interested in the set of possible targets.)
// SwitchUniqueSuccSet contains the non-duplicated switch targets.
// (Code that modifies the jump table of a switch has an obligation to call Compiler::UpdateSwitchTableTarget,
// which in turn will call the "UpdateTarget" method of this type if a SwitchUniqueSuccSet has already
// been computed for the switch block. If a switch block is deleted or is transformed into a non-switch,
// we leave the entry associated with the block, but it will no longer be accessed.)
struct SwitchUniqueSuccSet
{
unsigned numDistinctSuccs; // Number of distinct targets of the switch.
BasicBlock** nonDuplicates; // Array of "numDistinctSuccs", containing all the distinct switch target
// successors.
// The switch block "switchBlk" just had an entry with value "from" modified to the value "to".
// Update "this" as necessary: if "from" is no longer an element of the jump table of "switchBlk",
// remove it from "this", and ensure that "to" is a member. Use "alloc" to do any required allocation.
void UpdateTarget(CompAllocator alloc, BasicBlock* switchBlk, BasicBlock* from, BasicBlock* to);
};
typedef JitHashTable<BasicBlock*, JitPtrKeyFuncs<BasicBlock>, SwitchUniqueSuccSet> BlockToSwitchDescMap;
private:
// Maps BasicBlock*'s that end in switch statements to SwitchUniqueSuccSets that allow
// iteration over only the distinct successors.
BlockToSwitchDescMap* m_switchDescMap;
public:
BlockToSwitchDescMap* GetSwitchDescMap(bool createIfNull = true)
{
if ((m_switchDescMap == nullptr) && createIfNull)
{
m_switchDescMap = new (getAllocator()) BlockToSwitchDescMap(getAllocator());
}
return m_switchDescMap;
}
// Invalidate the map of unique switch block successors. For example, since the hash key of the map
// depends on block numbers, we must invalidate the map when the blocks are renumbered, to ensure that
// we don't accidentally look up and return the wrong switch data.
void InvalidateUniqueSwitchSuccMap()
{
m_switchDescMap = nullptr;
}
// Requires "switchBlock" to be a block that ends in a switch. Returns
// the corresponding SwitchUniqueSuccSet.
SwitchUniqueSuccSet GetDescriptorForSwitch(BasicBlock* switchBlk);
// The switch block "switchBlk" just had an entry with value "from" modified to the value "to".
// Update "this" as necessary: if "from" is no longer an element of the jump table of "switchBlk",
// remove it from "this", and ensure that "to" is a member.
void UpdateSwitchTableTarget(BasicBlock* switchBlk, BasicBlock* from, BasicBlock* to);
// Remove the "SwitchUniqueSuccSet" of "switchBlk" in the BlockToSwitchDescMap.
void fgInvalidateSwitchDescMapEntry(BasicBlock* switchBlk);
BasicBlock* fgFirstBlockOfHandler(BasicBlock* block);
bool fgIsFirstBlockOfFilterOrHandler(BasicBlock* block);
flowList* fgGetPredForBlock(BasicBlock* block, BasicBlock* blockPred);
flowList* fgGetPredForBlock(BasicBlock* block, BasicBlock* blockPred, flowList*** ptrToPred);
flowList* fgRemoveRefPred(BasicBlock* block, BasicBlock* blockPred);
flowList* fgRemoveAllRefPreds(BasicBlock* block, BasicBlock* blockPred);
void fgRemoveBlockAsPred(BasicBlock* block);
void fgChangeSwitchBlock(BasicBlock* oldSwitchBlock, BasicBlock* newSwitchBlock);
void fgReplaceSwitchJumpTarget(BasicBlock* blockSwitch, BasicBlock* newTarget, BasicBlock* oldTarget);
void fgReplaceJumpTarget(BasicBlock* block, BasicBlock* newTarget, BasicBlock* oldTarget);
void fgReplacePred(BasicBlock* block, BasicBlock* oldPred, BasicBlock* newPred);
flowList* fgAddRefPred(BasicBlock* block,
BasicBlock* blockPred,
flowList* oldEdge = nullptr,
bool initializingPreds = false); // Only set to 'true' when we are computing preds in
// fgComputePreds()
void fgFindBasicBlocks();
bool fgIsBetterFallThrough(BasicBlock* bCur, BasicBlock* bAlt);
bool fgCheckEHCanInsertAfterBlock(BasicBlock* blk, unsigned regionIndex, bool putInTryRegion);
BasicBlock* fgFindInsertPoint(unsigned regionIndex,
bool putInTryRegion,
BasicBlock* startBlk,
BasicBlock* endBlk,
BasicBlock* nearBlk,
BasicBlock* jumpBlk,
bool runRarely);
unsigned fgGetNestingLevel(BasicBlock* block, unsigned* pFinallyNesting = nullptr);
void fgPostImportationCleanup();
void fgRemoveStmt(BasicBlock* block, Statement* stmt DEBUGARG(bool isUnlink = false));
void fgUnlinkStmt(BasicBlock* block, Statement* stmt);
bool fgCheckRemoveStmt(BasicBlock* block, Statement* stmt);
void fgCreateLoopPreHeader(unsigned lnum);
void fgUnreachableBlock(BasicBlock* block);
void fgRemoveConditionalJump(BasicBlock* block);
BasicBlock* fgLastBBInMainFunction();
BasicBlock* fgEndBBAfterMainFunction();
void fgUnlinkRange(BasicBlock* bBeg, BasicBlock* bEnd);
void fgRemoveBlock(BasicBlock* block, bool unreachable);
bool fgCanCompactBlocks(BasicBlock* block, BasicBlock* bNext);
void fgCompactBlocks(BasicBlock* block, BasicBlock* bNext);
void fgUpdateLoopsAfterCompacting(BasicBlock* block, BasicBlock* bNext);
BasicBlock* fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst);
bool fgRenumberBlocks();
bool fgExpandRarelyRunBlocks();
bool fgEhAllowsMoveBlock(BasicBlock* bBefore, BasicBlock* bAfter);
void fgMoveBlocksAfter(BasicBlock* bStart, BasicBlock* bEnd, BasicBlock* insertAfterBlk);
enum FG_RELOCATE_TYPE
{
FG_RELOCATE_TRY, // relocate the 'try' region
FG_RELOCATE_HANDLER // relocate the handler region (including the filter if necessary)
};
BasicBlock* fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE relocateType);
#if defined(FEATURE_EH_FUNCLETS)
#if defined(TARGET_ARM)
void fgClearFinallyTargetBit(BasicBlock* block);
#endif // defined(TARGET_ARM)
bool fgIsIntraHandlerPred(BasicBlock* predBlock, BasicBlock* block);
bool fgAnyIntraHandlerPreds(BasicBlock* block);
void fgInsertFuncletPrologBlock(BasicBlock* block);
void fgCreateFuncletPrologBlocks();
void fgCreateFunclets();
#else // !FEATURE_EH_FUNCLETS
bool fgRelocateEHRegions();
#endif // !FEATURE_EH_FUNCLETS
bool fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock* target);
bool fgBlockEndFavorsTailDuplication(BasicBlock* block, unsigned lclNum);
bool fgBlockIsGoodTailDuplicationCandidate(BasicBlock* block, unsigned* lclNum);
bool fgOptimizeEmptyBlock(BasicBlock* block);
bool fgOptimizeBranchToEmptyUnconditional(BasicBlock* block, BasicBlock* bDest);
bool fgOptimizeBranch(BasicBlock* bJump);
bool fgOptimizeSwitchBranches(BasicBlock* block);
bool fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, BasicBlock* bPrev);
bool fgOptimizeSwitchJumps();
#ifdef DEBUG
void fgPrintEdgeWeights();
#endif
void fgComputeBlockAndEdgeWeights();
weight_t fgComputeMissingBlockWeights();
void fgComputeCalledCount(weight_t returnWeight);
void fgComputeEdgeWeights();
bool fgReorderBlocks();
PhaseStatus fgDetermineFirstColdBlock();
bool fgIsForwardBranch(BasicBlock* bJump, BasicBlock* bSrc = nullptr);
bool fgUpdateFlowGraph(bool doTailDup = false);
void fgFindOperOrder();
// method that returns if you should split here
typedef bool(fgSplitPredicate)(GenTree* tree, GenTree* parent, fgWalkData* data);
void fgSetBlockOrder();
void fgRemoveReturnBlock(BasicBlock* block);
/* Helper code that has been factored out */
inline void fgConvertBBToThrowBB(BasicBlock* block);
bool fgCastNeeded(GenTree* tree, var_types toType);
GenTree* fgDoNormalizeOnStore(GenTree* tree);
GenTree* fgMakeTmpArgNode(fgArgTabEntry* curArgTabEntry);
// The following check for loops that don't execute calls
bool fgLoopCallMarked;
void fgLoopCallTest(BasicBlock* srcBB, BasicBlock* dstBB);
void fgLoopCallMark();
void fgMarkLoopHead(BasicBlock* block);
unsigned fgGetCodeEstimate(BasicBlock* block);
#if DUMP_FLOWGRAPHS
enum class PhasePosition
{
PrePhase,
PostPhase
};
const char* fgProcessEscapes(const char* nameIn, escapeMapping_t* map);
static void fgDumpTree(FILE* fgxFile, GenTree* const tree);
FILE* fgOpenFlowGraphFile(bool* wbDontClose, Phases phase, PhasePosition pos, LPCWSTR type);
bool fgDumpFlowGraph(Phases phase, PhasePosition pos);
#endif // DUMP_FLOWGRAPHS
#ifdef DEBUG
void fgDispDoms();
void fgDispReach();
void fgDispBBLiveness(BasicBlock* block);
void fgDispBBLiveness();
void fgTableDispBasicBlock(BasicBlock* block, int ibcColWidth = 0);
void fgDispBasicBlocks(BasicBlock* firstBlock, BasicBlock* lastBlock, bool dumpTrees);
void fgDispBasicBlocks(bool dumpTrees = false);
void fgDumpStmtTree(Statement* stmt, unsigned bbNum);
void fgDumpBlock(BasicBlock* block);
void fgDumpTrees(BasicBlock* firstBlock, BasicBlock* lastBlock);
static fgWalkPreFn fgStress64RsltMulCB;
void fgStress64RsltMul();
void fgDebugCheckUpdate();
void fgDebugCheckBBNumIncreasing();
void fgDebugCheckBBlist(bool checkBBNum = false, bool checkBBRefs = true);
void fgDebugCheckBlockLinks();
void fgDebugCheckLinks(bool morphTrees = false);
void fgDebugCheckStmtsList(BasicBlock* block, bool morphTrees);
void fgDebugCheckNodeLinks(BasicBlock* block, Statement* stmt);
void fgDebugCheckNodesUniqueness();
void fgDebugCheckLoopTable();
void fgDebugCheckFlags(GenTree* tree);
void fgDebugCheckDispFlags(GenTree* tree, GenTreeFlags dispFlags, GenTreeDebugFlags debugFlags);
void fgDebugCheckFlagsHelper(GenTree* tree, GenTreeFlags actualFlags, GenTreeFlags expectedFlags);
void fgDebugCheckTryFinallyExits();
void fgDebugCheckProfileData();
bool fgDebugCheckIncomingProfileData(BasicBlock* block);
bool fgDebugCheckOutgoingProfileData(BasicBlock* block);
#endif // DEBUG
static bool fgProfileWeightsEqual(weight_t weight1, weight_t weight2);
static bool fgProfileWeightsConsistent(weight_t weight1, weight_t weight2);
static GenTree* fgGetFirstNode(GenTree* tree);
//--------------------- Walking the trees in the IR -----------------------
struct fgWalkData
{
Compiler* compiler;
fgWalkPreFn* wtprVisitorFn;
fgWalkPostFn* wtpoVisitorFn;
void* pCallbackData; // user-provided data
GenTree* parent; // parent of current node, provided to callback
GenTreeStack* parentStack; // stack of parent nodes, if asked for
bool wtprLclsOnly; // whether to only visit lclvar nodes
#ifdef DEBUG
bool printModified; // callback can use this
#endif
};
fgWalkResult fgWalkTreePre(GenTree** pTree,
fgWalkPreFn* visitor,
void* pCallBackData = nullptr,
bool lclVarsOnly = false,
bool computeStack = false);
fgWalkResult fgWalkTree(GenTree** pTree,
fgWalkPreFn* preVisitor,
fgWalkPostFn* postVisitor,
void* pCallBackData = nullptr);
void fgWalkAllTreesPre(fgWalkPreFn* visitor, void* pCallBackData);
//----- Postorder
fgWalkResult fgWalkTreePost(GenTree** pTree,
fgWalkPostFn* visitor,
void* pCallBackData = nullptr,
bool computeStack = false);
// An fgWalkPreFn that looks for expressions that have inline throws in
// minopts mode. Basically it looks for tress with gtOverflowEx() or
// GTF_IND_RNGCHK. It returns WALK_ABORT if one is found. It
// returns WALK_SKIP_SUBTREES if GTF_EXCEPT is not set (assumes flags
// properly propagated to parent trees). It returns WALK_CONTINUE
// otherwise.
static fgWalkResult fgChkThrowCB(GenTree** pTree, Compiler::fgWalkData* data);
static fgWalkResult fgChkLocAllocCB(GenTree** pTree, Compiler::fgWalkData* data);
static fgWalkResult fgChkQmarkCB(GenTree** pTree, Compiler::fgWalkData* data);
/**************************************************************************
* PROTECTED
*************************************************************************/
protected:
friend class SsaBuilder;
friend struct ValueNumberState;
//--------------------- Detect the basic blocks ---------------------------
BasicBlock** fgBBs; // Table of pointers to the BBs
void fgInitBBLookup();
BasicBlock* fgLookupBB(unsigned addr);
bool fgCanSwitchToOptimized();
void fgSwitchToOptimized(const char* reason);
bool fgMayExplicitTailCall();
void fgFindJumpTargets(const BYTE* codeAddr, IL_OFFSET codeSize, FixedBitVect* jumpTarget);
void fgMarkBackwardJump(BasicBlock* startBlock, BasicBlock* endBlock);
void fgLinkBasicBlocks();
unsigned fgMakeBasicBlocks(const BYTE* codeAddr, IL_OFFSET codeSize, FixedBitVect* jumpTarget);
void fgCheckBasicBlockControlFlow();
void fgControlFlowPermitted(BasicBlock* blkSrc,
BasicBlock* blkDest,
bool IsLeave = false /* is the src a leave block */);
bool fgFlowToFirstBlockOfInnerTry(BasicBlock* blkSrc, BasicBlock* blkDest, bool sibling);
void fgObserveInlineConstants(OPCODE opcode, const FgStack& stack, bool isInlining);
void fgAdjustForAddressExposedOrWrittenThis();
unsigned fgStressBBProf()
{
#ifdef DEBUG
unsigned result = JitConfig.JitStressBBProf();
if (result == 0)
{
if (compStressCompile(STRESS_BB_PROFILE, 15))
{
result = 1;
}
}
return result;
#else
return 0;
#endif
}
bool fgHaveProfileData();
bool fgGetProfileWeightForBasicBlock(IL_OFFSET offset, weight_t* weight);
Instrumentor* fgCountInstrumentor;
Instrumentor* fgClassInstrumentor;
PhaseStatus fgPrepareToInstrumentMethod();
PhaseStatus fgInstrumentMethod();
PhaseStatus fgIncorporateProfileData();
void fgIncorporateBlockCounts();
void fgIncorporateEdgeCounts();
CORINFO_CLASS_HANDLE getRandomClass(ICorJitInfo::PgoInstrumentationSchema* schema,
UINT32 countSchemaItems,
BYTE* pInstrumentationData,
int32_t ilOffset,
CLRRandom* random);
public:
const char* fgPgoFailReason;
bool fgPgoDisabled;
ICorJitInfo::PgoSource fgPgoSource;
ICorJitInfo::PgoInstrumentationSchema* fgPgoSchema;
BYTE* fgPgoData;
UINT32 fgPgoSchemaCount;
HRESULT fgPgoQueryResult;
UINT32 fgNumProfileRuns;
UINT32 fgPgoBlockCounts;
UINT32 fgPgoEdgeCounts;
UINT32 fgPgoClassProfiles;
unsigned fgPgoInlineePgo;
unsigned fgPgoInlineeNoPgo;
unsigned fgPgoInlineeNoPgoSingleBlock;
void WalkSpanningTree(SpanningTreeVisitor* visitor);
void fgSetProfileWeight(BasicBlock* block, weight_t weight);
void fgApplyProfileScale();
bool fgHaveSufficientProfileData();
bool fgHaveTrustedProfileData();
// fgIsUsingProfileWeights - returns true if we have real profile data for this method
// or if we have some fake profile data for the stress mode
bool fgIsUsingProfileWeights()
{
return (fgHaveProfileData() || fgStressBBProf());
}
// fgProfileRunsCount - returns total number of scenario runs for the profile data
// or BB_UNITY_WEIGHT_UNSIGNED when we aren't using profile data.
unsigned fgProfileRunsCount()
{
return fgIsUsingProfileWeights() ? fgNumProfileRuns : BB_UNITY_WEIGHT_UNSIGNED;
}
//-------- Insert a statement at the start or end of a basic block --------
#ifdef DEBUG
public:
static bool fgBlockContainsStatementBounded(BasicBlock* block, Statement* stmt, bool answerOnBoundExceeded = true);
#endif
public:
Statement* fgNewStmtAtBeg(BasicBlock* block, GenTree* tree, const DebugInfo& di = DebugInfo());
void fgInsertStmtAtEnd(BasicBlock* block, Statement* stmt);
Statement* fgNewStmtAtEnd(BasicBlock* block, GenTree* tree, const DebugInfo& di = DebugInfo());
Statement* fgNewStmtNearEnd(BasicBlock* block, GenTree* tree, const DebugInfo& di = DebugInfo());
private:
void fgInsertStmtNearEnd(BasicBlock* block, Statement* stmt);
void fgInsertStmtAtBeg(BasicBlock* block, Statement* stmt);
void fgInsertStmtAfter(BasicBlock* block, Statement* insertionPoint, Statement* stmt);
public:
void fgInsertStmtBefore(BasicBlock* block, Statement* insertionPoint, Statement* stmt);
private:
Statement* fgInsertStmtListAfter(BasicBlock* block, Statement* stmtAfter, Statement* stmtList);
// Create a new temporary variable to hold the result of *ppTree,
// and transform the graph accordingly.
GenTree* fgInsertCommaFormTemp(GenTree** ppTree, CORINFO_CLASS_HANDLE structType = nullptr);
GenTree* fgMakeMultiUse(GenTree** ppTree);
private:
// Recognize a bitwise rotation pattern and convert into a GT_ROL or a GT_ROR node.
GenTree* fgRecognizeAndMorphBitwiseRotation(GenTree* tree);
bool fgOperIsBitwiseRotationRoot(genTreeOps oper);
#if !defined(TARGET_64BIT)
// Recognize and morph a long multiplication with 32 bit operands.
GenTreeOp* fgRecognizeAndMorphLongMul(GenTreeOp* mul);
GenTreeOp* fgMorphLongMul(GenTreeOp* mul);
#endif
//-------- Determine the order in which the trees will be evaluated -------
unsigned fgTreeSeqNum;
GenTree* fgTreeSeqLst;
GenTree* fgTreeSeqBeg;
GenTree* fgSetTreeSeq(GenTree* tree, GenTree* prev = nullptr, bool isLIR = false);
void fgSetTreeSeqHelper(GenTree* tree, bool isLIR);
void fgSetTreeSeqFinish(GenTree* tree, bool isLIR);
void fgSetStmtSeq(Statement* stmt);
void fgSetBlockOrder(BasicBlock* block);
//------------------------- Morphing --------------------------------------
unsigned fgPtrArgCntMax;
public:
//------------------------------------------------------------------------
// fgGetPtrArgCntMax: Return the maximum number of pointer-sized stack arguments that calls inside this method
// can push on the stack. This value is calculated during morph.
//
// Return Value:
// Returns fgPtrArgCntMax, that is a private field.
//
unsigned fgGetPtrArgCntMax() const
{
return fgPtrArgCntMax;
}
//------------------------------------------------------------------------
// fgSetPtrArgCntMax: Set the maximum number of pointer-sized stack arguments that calls inside this method
// can push on the stack. This function is used during StackLevelSetter to fix incorrect morph calculations.
//
void fgSetPtrArgCntMax(unsigned argCntMax)
{
fgPtrArgCntMax = argCntMax;
}
bool compCanEncodePtrArgCntMax();
private:
hashBv* fgOutgoingArgTemps;
hashBv* fgCurrentlyInUseArgTemps;
void fgSetRngChkTarget(GenTree* tree, bool delay = true);
BasicBlock* fgSetRngChkTargetInner(SpecialCodeKind kind, bool delay);
#if REARRANGE_ADDS
void fgMoveOpsLeft(GenTree* tree);
#endif
bool fgIsCommaThrow(GenTree* tree, bool forFolding = false);
bool fgIsThrow(GenTree* tree);
bool fgInDifferentRegions(BasicBlock* blk1, BasicBlock* blk2);
bool fgIsBlockCold(BasicBlock* block);
GenTree* fgMorphCastIntoHelper(GenTree* tree, int helper, GenTree* oper);
GenTree* fgMorphIntoHelperCall(GenTree* tree, int helper, GenTreeCall::Use* args, bool morphArgs = true);
GenTree* fgMorphStackArgForVarArgs(unsigned lclNum, var_types varType, unsigned lclOffs);
// A "MorphAddrContext" carries information from the surrounding context. If we are evaluating a byref address,
// it is useful to know whether the address will be immediately dereferenced, or whether the address value will
// be used, perhaps by passing it as an argument to a called method. This affects how null checking is done:
// for sufficiently small offsets, we can rely on OS page protection to implicitly null-check addresses that we
// know will be dereferenced. To know that reliance on implicit null checking is sound, we must further know that
// all offsets between the top-level indirection and the bottom are constant, and that their sum is sufficiently
// small; hence the other fields of MorphAddrContext.
enum MorphAddrContextKind
{
MACK_Ind,
MACK_Addr,
};
struct MorphAddrContext
{
MorphAddrContextKind m_kind;
bool m_allConstantOffsets; // Valid only for "m_kind == MACK_Ind". True iff all offsets between
// top-level indirection and here have been constants.
size_t m_totalOffset; // Valid only for "m_kind == MACK_Ind", and if "m_allConstantOffsets" is true.
// In that case, is the sum of those constant offsets.
MorphAddrContext(MorphAddrContextKind kind) : m_kind(kind), m_allConstantOffsets(true), m_totalOffset(0)
{
}
};
// A MACK_CopyBlock context is immutable, so we can just make one of these and share it.
static MorphAddrContext s_CopyBlockMAC;
#ifdef FEATURE_SIMD
GenTree* getSIMDStructFromField(GenTree* tree,
CorInfoType* simdBaseJitTypeOut,
unsigned* indexOut,
unsigned* simdSizeOut,
bool ignoreUsedInSIMDIntrinsic = false);
GenTree* fgMorphFieldAssignToSimdSetElement(GenTree* tree);
GenTree* fgMorphFieldToSimdGetElement(GenTree* tree);
bool fgMorphCombineSIMDFieldAssignments(BasicBlock* block, Statement* stmt);
void impMarkContiguousSIMDFieldAssignments(Statement* stmt);
// fgPreviousCandidateSIMDFieldAsgStmt is only used for tracking previous simd field assignment
// in function: Complier::impMarkContiguousSIMDFieldAssignments.
Statement* fgPreviousCandidateSIMDFieldAsgStmt;
#endif // FEATURE_SIMD
GenTree* fgMorphArrayIndex(GenTree* tree);
GenTree* fgMorphExpandCast(GenTreeCast* tree);
GenTreeFieldList* fgMorphLclArgToFieldlist(GenTreeLclVarCommon* lcl);
void fgInitArgInfo(GenTreeCall* call);
GenTreeCall* fgMorphArgs(GenTreeCall* call);
void fgMakeOutgoingStructArgCopy(GenTreeCall* call, GenTreeCall::Use* args, CORINFO_CLASS_HANDLE copyBlkClass);
GenTree* fgMorphLocalVar(GenTree* tree, bool forceRemorph);
public:
bool fgAddrCouldBeNull(GenTree* addr);
private:
GenTree* fgMorphField(GenTree* tree, MorphAddrContext* mac);
bool fgCanFastTailCall(GenTreeCall* call, const char** failReason);
#if FEATURE_FASTTAILCALL
bool fgCallHasMustCopyByrefParameter(GenTreeCall* callee);
#endif
bool fgCheckStmtAfterTailCall();
GenTree* fgMorphTailCallViaHelpers(GenTreeCall* call, CORINFO_TAILCALL_HELPERS& help);
bool fgCanTailCallViaJitHelper();
void fgMorphTailCallViaJitHelper(GenTreeCall* call);
GenTree* fgCreateCallDispatcherAndGetResult(GenTreeCall* origCall,
CORINFO_METHOD_HANDLE callTargetStubHnd,
CORINFO_METHOD_HANDLE dispatcherHnd);
GenTree* getLookupTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_LOOKUP* pLookup,
GenTreeFlags handleFlags,
void* compileTimeHandle);
GenTree* getRuntimeLookupTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_LOOKUP* pLookup,
void* compileTimeHandle);
GenTree* getVirtMethodPointerTree(GenTree* thisPtr,
CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_CALL_INFO* pCallInfo);
GenTree* getTokenHandleTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool parent);
GenTree* fgMorphPotentialTailCall(GenTreeCall* call);
GenTree* fgGetStubAddrArg(GenTreeCall* call);
unsigned fgGetArgTabEntryParameterLclNum(GenTreeCall* call, fgArgTabEntry* argTabEntry);
void fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCall* recursiveTailCall);
Statement* fgAssignRecursiveCallArgToCallerParam(GenTree* arg,
fgArgTabEntry* argTabEntry,
unsigned lclParamNum,
BasicBlock* block,
const DebugInfo& callDI,
Statement* tmpAssignmentInsertionPoint,
Statement* paramAssignmentInsertionPoint);
GenTree* fgMorphCall(GenTreeCall* call);
GenTree* fgExpandVirtualVtableCallTarget(GenTreeCall* call);
void fgMorphCallInline(GenTreeCall* call, InlineResult* result);
void fgMorphCallInlineHelper(GenTreeCall* call, InlineResult* result, InlineContext** createdContext);
#if DEBUG
void fgNoteNonInlineCandidate(Statement* stmt, GenTreeCall* call);
static fgWalkPreFn fgFindNonInlineCandidate;
#endif
GenTree* fgOptimizeDelegateConstructor(GenTreeCall* call,
CORINFO_CONTEXT_HANDLE* ExactContextHnd,
methodPointerInfo* ldftnToken);
GenTree* fgMorphLeaf(GenTree* tree);
void fgAssignSetVarDef(GenTree* tree);
GenTree* fgMorphOneAsgBlockOp(GenTree* tree);
GenTree* fgMorphInitBlock(GenTree* tree);
GenTree* fgMorphPromoteLocalInitBlock(GenTreeLclVar* destLclNode, GenTree* initVal, unsigned blockSize);
GenTree* fgMorphGetStructAddr(GenTree** pTree, CORINFO_CLASS_HANDLE clsHnd, bool isRValue = false);
GenTree* fgMorphBlockOperand(GenTree* tree, var_types asgType, unsigned blockWidth, bool isBlkReqd);
GenTree* fgMorphCopyBlock(GenTree* tree);
GenTree* fgMorphStoreDynBlock(GenTreeStoreDynBlk* tree);
GenTree* fgMorphForRegisterFP(GenTree* tree);
GenTree* fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac = nullptr);
GenTree* fgOptimizeCast(GenTreeCast* cast);
GenTree* fgOptimizeEqualityComparisonWithConst(GenTreeOp* cmp);
GenTree* fgOptimizeRelationalComparisonWithConst(GenTreeOp* cmp);
#ifdef FEATURE_HW_INTRINSICS
GenTree* fgOptimizeHWIntrinsic(GenTreeHWIntrinsic* node);
#endif
GenTree* fgOptimizeCommutativeArithmetic(GenTreeOp* tree);
GenTree* fgOptimizeRelationalComparisonWithCasts(GenTreeOp* cmp);
GenTree* fgOptimizeAddition(GenTreeOp* add);
GenTree* fgOptimizeMultiply(GenTreeOp* mul);
GenTree* fgOptimizeBitwiseAnd(GenTreeOp* andOp);
GenTree* fgOptimizeBitwiseXor(GenTreeOp* xorOp);
GenTree* fgPropagateCommaThrow(GenTree* parent, GenTreeOp* commaThrow, GenTreeFlags precedingSideEffects);
GenTree* fgMorphRetInd(GenTreeUnOp* tree);
GenTree* fgMorphModToSubMulDiv(GenTreeOp* tree);
GenTree* fgMorphUModToAndSub(GenTreeOp* tree);
GenTree* fgMorphSmpOpOptional(GenTreeOp* tree);
GenTree* fgMorphMultiOp(GenTreeMultiOp* multiOp);
GenTree* fgMorphConst(GenTree* tree);
bool fgMorphCanUseLclFldForCopy(unsigned lclNum1, unsigned lclNum2);
GenTreeLclVar* fgMorphTryFoldObjAsLclVar(GenTreeObj* obj, bool destroyNodes = true);
GenTreeOp* fgMorphCommutative(GenTreeOp* tree);
GenTree* fgMorphCastedBitwiseOp(GenTreeOp* tree);
GenTree* fgMorphReduceAddOps(GenTree* tree);
public:
GenTree* fgMorphTree(GenTree* tree, MorphAddrContext* mac = nullptr);
private:
void fgKillDependentAssertionsSingle(unsigned lclNum DEBUGARG(GenTree* tree));
void fgKillDependentAssertions(unsigned lclNum DEBUGARG(GenTree* tree));
void fgMorphTreeDone(GenTree* tree, GenTree* oldTree = nullptr DEBUGARG(int morphNum = 0));
Statement* fgMorphStmt;
unsigned fgGetBigOffsetMorphingTemp(var_types type); // We cache one temp per type to be
// used when morphing big offset.
//----------------------- Liveness analysis -------------------------------
VARSET_TP fgCurUseSet; // vars used by block (before an assignment)
VARSET_TP fgCurDefSet; // vars assigned by block (before a use)
MemoryKindSet fgCurMemoryUse; // True iff the current basic block uses memory.
MemoryKindSet fgCurMemoryDef; // True iff the current basic block modifies memory.
MemoryKindSet fgCurMemoryHavoc; // True if the current basic block is known to set memory to a "havoc" value.
bool byrefStatesMatchGcHeapStates; // True iff GcHeap and ByrefExposed memory have all the same def points.
void fgMarkUseDef(GenTreeLclVarCommon* tree);
void fgBeginScopeLife(VARSET_TP* inScope, VarScopeDsc* var);
void fgEndScopeLife(VARSET_TP* inScope, VarScopeDsc* var);
void fgMarkInScope(BasicBlock* block, VARSET_VALARG_TP inScope);
void fgUnmarkInScope(BasicBlock* block, VARSET_VALARG_TP unmarkScope);
void fgExtendDbgScopes();
void fgExtendDbgLifetimes();
#ifdef DEBUG
void fgDispDebugScopes();
#endif // DEBUG
//-------------------------------------------------------------------------
//
// The following keeps track of any code we've added for things like array
// range checking or explicit calls to enable GC, and so on.
//
public:
struct AddCodeDsc
{
AddCodeDsc* acdNext;
BasicBlock* acdDstBlk; // block to which we jump
unsigned acdData;
SpecialCodeKind acdKind; // what kind of a special block is this?
#if !FEATURE_FIXED_OUT_ARGS
bool acdStkLvlInit; // has acdStkLvl value been already set?
unsigned acdStkLvl; // stack level in stack slots.
#endif // !FEATURE_FIXED_OUT_ARGS
};
private:
static unsigned acdHelper(SpecialCodeKind codeKind);
AddCodeDsc* fgAddCodeList;
bool fgAddCodeModf;
bool fgRngChkThrowAdded;
AddCodeDsc* fgExcptnTargetCache[SCK_COUNT];
BasicBlock* fgRngChkTarget(BasicBlock* block, SpecialCodeKind kind);
BasicBlock* fgAddCodeRef(BasicBlock* srcBlk, unsigned refData, SpecialCodeKind kind);
public:
AddCodeDsc* fgFindExcptnTarget(SpecialCodeKind kind, unsigned refData);
bool fgUseThrowHelperBlocks();
AddCodeDsc* fgGetAdditionalCodeDescriptors()
{
return fgAddCodeList;
}
private:
bool fgIsCodeAdded();
bool fgIsThrowHlpBlk(BasicBlock* block);
#if !FEATURE_FIXED_OUT_ARGS
unsigned fgThrowHlpBlkStkLevel(BasicBlock* block);
#endif // !FEATURE_FIXED_OUT_ARGS
unsigned fgBigOffsetMorphingTemps[TYP_COUNT];
unsigned fgCheckInlineDepthAndRecursion(InlineInfo* inlineInfo);
void fgInvokeInlineeCompiler(GenTreeCall* call, InlineResult* result, InlineContext** createdContext);
void fgInsertInlineeBlocks(InlineInfo* pInlineInfo);
Statement* fgInlinePrependStatements(InlineInfo* inlineInfo);
void fgInlineAppendStatements(InlineInfo* inlineInfo, BasicBlock* block, Statement* stmt);
#if FEATURE_MULTIREG_RET
GenTree* fgGetStructAsStructPtr(GenTree* tree);
GenTree* fgAssignStructInlineeToVar(GenTree* child, CORINFO_CLASS_HANDLE retClsHnd);
void fgAttachStructInlineeToAsg(GenTree* tree, GenTree* child, CORINFO_CLASS_HANDLE retClsHnd);
#endif // FEATURE_MULTIREG_RET
static fgWalkPreFn fgUpdateInlineReturnExpressionPlaceHolder;
static fgWalkPostFn fgLateDevirtualization;
#ifdef DEBUG
static fgWalkPreFn fgDebugCheckInlineCandidates;
void CheckNoTransformableIndirectCallsRemain();
static fgWalkPreFn fgDebugCheckForTransformableIndirectCalls;
#endif
void fgPromoteStructs();
void fgMorphStructField(GenTree* tree, GenTree* parent);
void fgMorphLocalField(GenTree* tree, GenTree* parent);
// Reset the refCount for implicit byrefs.
void fgResetImplicitByRefRefCount();
// Change implicit byrefs' types from struct to pointer, and for any that were
// promoted, create new promoted struct temps.
void fgRetypeImplicitByRefArgs();
// Rewrite appearances of implicit byrefs (manifest the implied additional level of indirection).
bool fgMorphImplicitByRefArgs(GenTree* tree);
GenTree* fgMorphImplicitByRefArgs(GenTree* tree, bool isAddr);
// Clear up annotations for any struct promotion temps created for implicit byrefs.
void fgMarkDemotedImplicitByRefArgs();
void fgMarkAddressExposedLocals();
void fgMarkAddressExposedLocals(Statement* stmt);
PhaseStatus fgForwardSub();
bool fgForwardSubBlock(BasicBlock* block);
bool fgForwardSubStatement(Statement* statement);
static fgWalkPreFn fgUpdateSideEffectsPre;
static fgWalkPostFn fgUpdateSideEffectsPost;
// The given local variable, required to be a struct variable, is being assigned via
// a "lclField", to make it masquerade as an integral type in the ABI. Make sure that
// the variable is not enregistered, and is therefore not promoted independently.
void fgLclFldAssign(unsigned lclNum);
static fgWalkPreFn gtHasLocalsWithAddrOpCB;
enum TypeProducerKind
{
TPK_Unknown = 0, // May not be a RuntimeType
TPK_Handle = 1, // RuntimeType via handle
TPK_GetType = 2, // RuntimeType via Object.get_Type()
TPK_Null = 3, // Tree value is null
TPK_Other = 4 // RuntimeType via other means
};
TypeProducerKind gtGetTypeProducerKind(GenTree* tree);
bool gtIsTypeHandleToRuntimeTypeHelper(GenTreeCall* call);
bool gtIsTypeHandleToRuntimeTypeHandleHelper(GenTreeCall* call, CorInfoHelpFunc* pHelper = nullptr);
bool gtIsActiveCSE_Candidate(GenTree* tree);
bool fgIsBigOffset(size_t offset);
bool fgNeedReturnSpillTemp();
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Optimizer XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
public:
void optInit();
GenTree* optRemoveRangeCheck(GenTreeBoundsChk* check, GenTree* comma, Statement* stmt);
GenTree* optRemoveStandaloneRangeCheck(GenTreeBoundsChk* check, Statement* stmt);
void optRemoveCommaBasedRangeCheck(GenTree* comma, Statement* stmt);
protected:
// Do hoisting for all loops.
void optHoistLoopCode();
// To represent sets of VN's that have already been hoisted in outer loops.
typedef JitHashTable<ValueNum, JitSmallPrimitiveKeyFuncs<ValueNum>, bool> VNSet;
struct LoopHoistContext
{
private:
// The set of variables hoisted in the current loop (or nullptr if there are none).
VNSet* m_pHoistedInCurLoop;
public:
// Value numbers of expressions that have been hoisted in parent loops in the loop nest.
VNSet m_hoistedInParentLoops;
// Value numbers of expressions that have been hoisted in the current (or most recent) loop in the nest.
// Previous decisions on loop-invariance of value numbers in the current loop.
VNSet m_curLoopVnInvariantCache;
VNSet* GetHoistedInCurLoop(Compiler* comp)
{
if (m_pHoistedInCurLoop == nullptr)
{
m_pHoistedInCurLoop = new (comp->getAllocatorLoopHoist()) VNSet(comp->getAllocatorLoopHoist());
}
return m_pHoistedInCurLoop;
}
VNSet* ExtractHoistedInCurLoop()
{
VNSet* res = m_pHoistedInCurLoop;
m_pHoistedInCurLoop = nullptr;
return res;
}
LoopHoistContext(Compiler* comp)
: m_pHoistedInCurLoop(nullptr)
, m_hoistedInParentLoops(comp->getAllocatorLoopHoist())
, m_curLoopVnInvariantCache(comp->getAllocatorLoopHoist())
{
}
};
// Do hoisting for loop "lnum" (an index into the optLoopTable), and all loops nested within it.
// Tracks the expressions that have been hoisted by containing loops by temporarily recording their
// value numbers in "m_hoistedInParentLoops". This set is not modified by the call.
void optHoistLoopNest(unsigned lnum, LoopHoistContext* hoistCtxt);
// Do hoisting for a particular loop ("lnum" is an index into the optLoopTable.)
// Assumes that expressions have been hoisted in containing loops if their value numbers are in
// "m_hoistedInParentLoops".
//
void optHoistThisLoop(unsigned lnum, LoopHoistContext* hoistCtxt);
// Hoist all expressions in "blocks" that are invariant in loop "loopNum" (an index into the optLoopTable)
// outside of that loop. Exempt expressions whose value number is in "m_hoistedInParentLoops"; add VN's of hoisted
// expressions to "hoistInLoop".
void optHoistLoopBlocks(unsigned loopNum, ArrayStack<BasicBlock*>* blocks, LoopHoistContext* hoistContext);
// Return true if the tree looks profitable to hoist out of loop 'lnum'.
bool optIsProfitableToHoistTree(GenTree* tree, unsigned lnum);
// Performs the hoisting 'tree' into the PreHeader for loop 'lnum'
void optHoistCandidate(GenTree* tree, BasicBlock* treeBb, unsigned lnum, LoopHoistContext* hoistCtxt);
// Returns true iff the ValueNum "vn" represents a value that is loop-invariant in "lnum".
// Constants and init values are always loop invariant.
// VNPhi's connect VN's to the SSA definition, so we can know if the SSA def occurs in the loop.
bool optVNIsLoopInvariant(ValueNum vn, unsigned lnum, VNSet* recordedVNs);
// If "blk" is the entry block of a natural loop, returns true and sets "*pLnum" to the index of the loop
// in the loop table.
bool optBlockIsLoopEntry(BasicBlock* blk, unsigned* pLnum);
// Records the set of "side effects" of all loops: fields (object instance and static)
// written to, and SZ-array element type equivalence classes updated.
void optComputeLoopSideEffects();
#ifdef DEBUG
bool optAnyChildNotRemoved(unsigned loopNum);
#endif // DEBUG
// Mark a loop as removed.
void optMarkLoopRemoved(unsigned loopNum);
private:
// Requires "lnum" to be the index of an outermost loop in the loop table. Traverses the body of that loop,
// including all nested loops, and records the set of "side effects" of the loop: fields (object instance and
// static) written to, and SZ-array element type equivalence classes updated.
void optComputeLoopNestSideEffects(unsigned lnum);
// Given a loop number 'lnum' mark it and any nested loops as having 'memoryHavoc'
void optRecordLoopNestsMemoryHavoc(unsigned lnum, MemoryKindSet memoryHavoc);
// Add the side effects of "blk" (which is required to be within a loop) to all loops of which it is a part.
// Returns false if we encounter a block that is not marked as being inside a loop.
//
bool optComputeLoopSideEffectsOfBlock(BasicBlock* blk);
// Hoist the expression "expr" out of loop "lnum".
void optPerformHoistExpr(GenTree* expr, BasicBlock* exprBb, unsigned lnum);
public:
void optOptimizeBools();
public:
PhaseStatus optInvertLoops(); // Invert loops so they're entered at top and tested at bottom.
PhaseStatus optOptimizeLayout(); // Optimize the BasicBlock layout of the method
PhaseStatus optSetBlockWeights();
PhaseStatus optFindLoopsPhase(); // Finds loops and records them in the loop table
void optFindLoops();
PhaseStatus optCloneLoops();
void optCloneLoop(unsigned loopInd, LoopCloneContext* context);
void optEnsureUniqueHead(unsigned loopInd, weight_t ambientWeight);
PhaseStatus optUnrollLoops(); // Unrolls loops (needs to have cost info)
void optRemoveRedundantZeroInits();
protected:
// This enumeration describes what is killed by a call.
enum callInterf
{
CALLINT_NONE, // no interference (most helpers)
CALLINT_REF_INDIRS, // kills GC ref indirections (SETFIELD OBJ)
CALLINT_SCL_INDIRS, // kills non GC ref indirections (SETFIELD non-OBJ)
CALLINT_ALL_INDIRS, // kills both GC ref and non GC ref indirections (SETFIELD STRUCT)
CALLINT_ALL, // kills everything (normal method call)
};
enum class FieldKindForVN
{
SimpleStatic,
WithBaseAddr
};
public:
// A "LoopDsc" describes a ("natural") loop. We (currently) require the body of a loop to be a contiguous (in
// bbNext order) sequence of basic blocks. (At times, we may require the blocks in a loop to be "properly numbered"
// in bbNext order; we use comparisons on the bbNum to decide order.)
// The blocks that define the body are
// top <= entry <= bottom
// The "head" of the loop is a block outside the loop that has "entry" as a successor. We only support loops with a
// single 'head' block. The meanings of these blocks are given in the definitions below. Also see the picture at
// Compiler::optFindNaturalLoops().
struct LoopDsc
{
BasicBlock* lpHead; // HEAD of the loop (not part of the looping of the loop) -- has ENTRY as a successor.
BasicBlock* lpTop; // loop TOP (the back edge from lpBottom reaches here). Lexically first block (in bbNext
// order) reachable in this loop.
BasicBlock* lpEntry; // the ENTRY in the loop (in most cases TOP or BOTTOM)
BasicBlock* lpBottom; // loop BOTTOM (from here we have a back edge to the TOP)
BasicBlock* lpExit; // if a single exit loop this is the EXIT (in most cases BOTTOM)
callInterf lpAsgCall; // "callInterf" for calls in the loop
ALLVARSET_TP lpAsgVars; // set of vars assigned within the loop (all vars, not just tracked)
varRefKinds lpAsgInds : 8; // set of inds modified within the loop
LoopFlags lpFlags;
unsigned char lpExitCnt; // number of exits from the loop
unsigned char lpParent; // The index of the most-nested loop that completely contains this one,
// or else BasicBlock::NOT_IN_LOOP if no such loop exists.
unsigned char lpChild; // The index of a nested loop, or else BasicBlock::NOT_IN_LOOP if no child exists.
// (Actually, an "immediately" nested loop --
// no other child of this loop is a parent of lpChild.)
unsigned char lpSibling; // The index of another loop that is an immediate child of lpParent,
// or else BasicBlock::NOT_IN_LOOP. One can enumerate all the children of a loop
// by following "lpChild" then "lpSibling" links.
bool lpLoopHasMemoryHavoc[MemoryKindCount]; // The loop contains an operation that we assume has arbitrary
// memory side effects. If this is set, the fields below
// may not be accurate (since they become irrelevant.)
VARSET_TP lpVarInOut; // The set of variables that are IN or OUT during the execution of this loop
VARSET_TP lpVarUseDef; // The set of variables that are USE or DEF during the execution of this loop
// The following counts are used for hoisting profitability checks.
int lpHoistedExprCount; // The register count for the non-FP expressions from inside this loop that have been
// hoisted
int lpLoopVarCount; // The register count for the non-FP LclVars that are read/written inside this loop
int lpVarInOutCount; // The register count for the non-FP LclVars that are alive inside or across this loop
int lpHoistedFPExprCount; // The register count for the FP expressions from inside this loop that have been
// hoisted
int lpLoopVarFPCount; // The register count for the FP LclVars that are read/written inside this loop
int lpVarInOutFPCount; // The register count for the FP LclVars that are alive inside or across this loop
typedef JitHashTable<CORINFO_FIELD_HANDLE, JitPtrKeyFuncs<struct CORINFO_FIELD_STRUCT_>, FieldKindForVN>
FieldHandleSet;
FieldHandleSet* lpFieldsModified; // This has entries for all static field and object instance fields modified
// in the loop.
typedef JitHashTable<CORINFO_CLASS_HANDLE, JitPtrKeyFuncs<struct CORINFO_CLASS_STRUCT_>, bool> ClassHandleSet;
ClassHandleSet* lpArrayElemTypesModified; // Bits set indicate the set of sz array element types such that
// arrays of that type are modified
// in the loop.
// Adds the variable liveness information for 'blk' to 'this' LoopDsc
void AddVariableLiveness(Compiler* comp, BasicBlock* blk);
inline void AddModifiedField(Compiler* comp, CORINFO_FIELD_HANDLE fldHnd, FieldKindForVN fieldKind);
// This doesn't *always* take a class handle -- it can also take primitive types, encoded as class handles
// (shifted left, with a low-order bit set to distinguish.)
// Use the {Encode/Decode}ElemType methods to construct/destruct these.
inline void AddModifiedElemType(Compiler* comp, CORINFO_CLASS_HANDLE structHnd);
/* The following values are set only for iterator loops, i.e. has the flag LPFLG_ITER set */
GenTree* lpIterTree; // The "i = i <op> const" tree
unsigned lpIterVar() const; // iterator variable #
int lpIterConst() const; // the constant with which the iterator is incremented
genTreeOps lpIterOper() const; // the type of the operation on the iterator (ASG_ADD, ASG_SUB, etc.)
void VERIFY_lpIterTree() const;
var_types lpIterOperType() const; // For overflow instructions
// Set to the block where we found the initialization for LPFLG_CONST_INIT loops.
// Initially, this will be 'head', but 'head' might change if we insert a loop pre-header block.
BasicBlock* lpInitBlock;
int lpConstInit; // initial constant value of iterator : Valid if LPFLG_CONST_INIT
// The following is for LPFLG_ITER loops only (i.e. the loop condition is "i RELOP const or var")
GenTree* lpTestTree; // pointer to the node containing the loop test
genTreeOps lpTestOper() const; // the type of the comparison between the iterator and the limit (GT_LE, GT_GE,
// etc.)
void VERIFY_lpTestTree() const;
bool lpIsReversed() const; // true if the iterator node is the second operand in the loop condition
GenTree* lpIterator() const; // the iterator node in the loop test
GenTree* lpLimit() const; // the limit node in the loop test
// Limit constant value of iterator - loop condition is "i RELOP const"
// : Valid if LPFLG_CONST_LIMIT
int lpConstLimit() const;
// The lclVar # in the loop condition ( "i RELOP lclVar" )
// : Valid if LPFLG_VAR_LIMIT
unsigned lpVarLimit() const;
// The array length in the loop condition ( "i RELOP arr.len" or "i RELOP arr[i][j].len" )
// : Valid if LPFLG_ARRLEN_LIMIT
bool lpArrLenLimit(Compiler* comp, ArrIndex* index) const;
// Returns "true" iff this is a "top entry" loop.
bool lpIsTopEntry() const
{
if (lpHead->bbNext == lpEntry)
{
assert(lpHead->bbFallsThrough());
assert(lpTop == lpEntry);
return true;
}
else
{
return false;
}
}
// Returns "true" iff "*this" contains the blk.
bool lpContains(BasicBlock* blk) const
{
return lpTop->bbNum <= blk->bbNum && blk->bbNum <= lpBottom->bbNum;
}
// Returns "true" iff "*this" (properly) contains the range [top, bottom] (allowing tops
// to be equal, but requiring bottoms to be different.)
bool lpContains(BasicBlock* top, BasicBlock* bottom) const
{
return lpTop->bbNum <= top->bbNum && bottom->bbNum < lpBottom->bbNum;
}
// Returns "true" iff "*this" (properly) contains "lp2" (allowing tops to be equal, but requiring
// bottoms to be different.)
bool lpContains(const LoopDsc& lp2) const
{
return lpContains(lp2.lpTop, lp2.lpBottom);
}
// Returns "true" iff "*this" is (properly) contained by the range [top, bottom]
// (allowing tops to be equal, but requiring bottoms to be different.)
bool lpContainedBy(BasicBlock* top, BasicBlock* bottom) const
{
return top->bbNum <= lpTop->bbNum && lpBottom->bbNum < bottom->bbNum;
}
// Returns "true" iff "*this" is (properly) contained by "lp2"
// (allowing tops to be equal, but requiring bottoms to be different.)
bool lpContainedBy(const LoopDsc& lp2) const
{
return lpContainedBy(lp2.lpTop, lp2.lpBottom);
}
// Returns "true" iff "*this" is disjoint from the range [top, bottom].
bool lpDisjoint(BasicBlock* top, BasicBlock* bottom) const
{
return bottom->bbNum < lpTop->bbNum || lpBottom->bbNum < top->bbNum;
}
// Returns "true" iff "*this" is disjoint from "lp2".
bool lpDisjoint(const LoopDsc& lp2) const
{
return lpDisjoint(lp2.lpTop, lp2.lpBottom);
}
// Returns "true" iff the loop is well-formed (see code for defn).
bool lpWellFormed() const
{
return lpTop->bbNum <= lpEntry->bbNum && lpEntry->bbNum <= lpBottom->bbNum &&
(lpHead->bbNum < lpTop->bbNum || lpHead->bbNum > lpBottom->bbNum);
}
#ifdef DEBUG
void lpValidatePreHeader() const
{
// If this is called, we expect there to be a pre-header.
assert(lpFlags & LPFLG_HAS_PREHEAD);
// The pre-header must unconditionally enter the loop.
assert(lpHead->GetUniqueSucc() == lpEntry);
// The loop block must be marked as a pre-header.
assert(lpHead->bbFlags & BBF_LOOP_PREHEADER);
// The loop entry must have a single non-loop predecessor, which is the pre-header.
// We can't assume here that the bbNum are properly ordered, so we can't do a simple lpContained()
// check. So, we defer this check, which will be done by `fgDebugCheckLoopTable()`.
}
#endif // DEBUG
// LoopBlocks: convenience method for enabling range-based `for` iteration over all the
// blocks in a loop, e.g.:
// for (BasicBlock* const block : loop->LoopBlocks()) ...
// Currently, the loop blocks are expected to be in linear, lexical, `bbNext` order
// from `lpTop` through `lpBottom`, inclusive. All blocks in this range are considered
// to be part of the loop.
//
BasicBlockRangeList LoopBlocks() const
{
return BasicBlockRangeList(lpTop, lpBottom);
}
};
protected:
bool fgMightHaveLoop(); // returns true if there are any back edges
bool fgHasLoops; // True if this method has any loops, set in fgComputeReachability
public:
LoopDsc* optLoopTable; // loop descriptor table
unsigned char optLoopCount; // number of tracked loops
unsigned char loopAlignCandidates; // number of loops identified for alignment
// Every time we rebuild the loop table, we increase the global "loop epoch". Any loop indices or
// loop table pointers from the previous epoch are invalid.
// TODO: validate this in some way?
unsigned optCurLoopEpoch;
void NewLoopEpoch()
{
++optCurLoopEpoch;
JITDUMP("New loop epoch %d\n", optCurLoopEpoch);
}
#ifdef DEBUG
unsigned char loopsAligned; // number of loops actually aligned
#endif // DEBUG
bool optRecordLoop(BasicBlock* head,
BasicBlock* top,
BasicBlock* entry,
BasicBlock* bottom,
BasicBlock* exit,
unsigned char exitCnt);
void optClearLoopIterInfo();
#ifdef DEBUG
void optPrintLoopInfo(unsigned lnum, bool printVerbose = false);
void optPrintLoopInfo(const LoopDsc* loop, bool printVerbose = false);
void optPrintLoopTable();
#endif
protected:
unsigned optCallCount; // number of calls made in the method
unsigned optIndirectCallCount; // number of virtual, interface and indirect calls made in the method
unsigned optNativeCallCount; // number of Pinvoke/Native calls made in the method
unsigned optLoopsCloned; // number of loops cloned in the current method.
#ifdef DEBUG
void optCheckPreds();
#endif
void optResetLoopInfo();
void optFindAndScaleGeneralLoopBlocks();
// Determine if there are any potential loops, and set BBF_LOOP_HEAD on potential loop heads.
void optMarkLoopHeads();
void optScaleLoopBlocks(BasicBlock* begBlk, BasicBlock* endBlk);
void optUnmarkLoopBlocks(BasicBlock* begBlk, BasicBlock* endBlk);
void optUpdateLoopsBeforeRemoveBlock(BasicBlock* block, bool skipUnmarkLoop = false);
bool optIsLoopTestEvalIntoTemp(Statement* testStmt, Statement** newTestStmt);
unsigned optIsLoopIncrTree(GenTree* incr);
bool optCheckIterInLoopTest(unsigned loopInd, GenTree* test, BasicBlock* from, BasicBlock* to, unsigned iterVar);
bool optComputeIterInfo(GenTree* incr, BasicBlock* from, BasicBlock* to, unsigned* pIterVar);
bool optPopulateInitInfo(unsigned loopInd, BasicBlock* initBlock, GenTree* init, unsigned iterVar);
bool optExtractInitTestIncr(
BasicBlock* head, BasicBlock* bottom, BasicBlock* exit, GenTree** ppInit, GenTree** ppTest, GenTree** ppIncr);
void optFindNaturalLoops();
void optIdentifyLoopsForAlignment();
// Ensures that all the loops in the loop nest rooted at "loopInd" (an index into the loop table) are 'canonical' --
// each loop has a unique "top." Returns "true" iff the flowgraph has been modified.
bool optCanonicalizeLoopNest(unsigned char loopInd);
// Ensures that the loop "loopInd" (an index into the loop table) is 'canonical' -- it has a unique "top,"
// unshared with any other loop. Returns "true" iff the flowgraph has been modified
bool optCanonicalizeLoop(unsigned char loopInd);
// Requires "l1" to be a valid loop table index, and not "BasicBlock::NOT_IN_LOOP".
// Requires "l2" to be a valid loop table index, or else "BasicBlock::NOT_IN_LOOP".
// Returns true iff "l2" is not NOT_IN_LOOP, and "l1" contains "l2".
// A loop contains itself.
bool optLoopContains(unsigned l1, unsigned l2) const;
// Updates the loop table by changing loop "loopInd", whose head is required
// to be "from", to be "to". Also performs this transformation for any
// loop nested in "loopInd" that shares the same head as "loopInd".
void optUpdateLoopHead(unsigned loopInd, BasicBlock* from, BasicBlock* to);
void optRedirectBlock(BasicBlock* blk, BlockToBlockMap* redirectMap, const bool updatePreds = false);
// Marks the containsCall information to "lnum" and any parent loops.
void AddContainsCallAllContainingLoops(unsigned lnum);
// Adds the variable liveness information from 'blk' to "lnum" and any parent loops.
void AddVariableLivenessAllContainingLoops(unsigned lnum, BasicBlock* blk);
// Adds "fldHnd" to the set of modified fields of "lnum" and any parent loops.
void AddModifiedFieldAllContainingLoops(unsigned lnum, CORINFO_FIELD_HANDLE fldHnd, FieldKindForVN fieldKind);
// Adds "elemType" to the set of modified array element types of "lnum" and any parent loops.
void AddModifiedElemTypeAllContainingLoops(unsigned lnum, CORINFO_CLASS_HANDLE elemType);
// Requires that "from" and "to" have the same "bbJumpKind" (perhaps because "to" is a clone
// of "from".) Copies the jump destination from "from" to "to".
void optCopyBlkDest(BasicBlock* from, BasicBlock* to);
// Returns true if 'block' is an entry block for any loop in 'optLoopTable'
bool optIsLoopEntry(BasicBlock* block) const;
// The depth of the loop described by "lnum" (an index into the loop table.) (0 == top level)
unsigned optLoopDepth(unsigned lnum)
{
assert(lnum < optLoopCount);
unsigned depth = 0;
while ((lnum = optLoopTable[lnum].lpParent) != BasicBlock::NOT_IN_LOOP)
{
++depth;
}
return depth;
}
// Struct used in optInvertWhileLoop to count interesting constructs to boost the profitability score.
struct OptInvertCountTreeInfoType
{
int sharedStaticHelperCount;
int arrayLengthCount;
};
static fgWalkResult optInvertCountTreeInfo(GenTree** pTree, fgWalkData* data);
bool optInvertWhileLoop(BasicBlock* block);
private:
static bool optIterSmallOverflow(int iterAtExit, var_types incrType);
static bool optIterSmallUnderflow(int iterAtExit, var_types decrType);
bool optComputeLoopRep(int constInit,
int constLimit,
int iterInc,
genTreeOps iterOper,
var_types iterType,
genTreeOps testOper,
bool unsignedTest,
bool dupCond,
unsigned* iterCount);
static fgWalkPreFn optIsVarAssgCB;
protected:
bool optIsVarAssigned(BasicBlock* beg, BasicBlock* end, GenTree* skip, unsigned var);
bool optIsVarAssgLoop(unsigned lnum, unsigned var);
int optIsSetAssgLoop(unsigned lnum, ALLVARSET_VALARG_TP vars, varRefKinds inds = VR_NONE);
bool optNarrowTree(GenTree* tree, var_types srct, var_types dstt, ValueNumPair vnpNarrow, bool doit);
protected:
// The following is the upper limit on how many expressions we'll keep track
// of for the CSE analysis.
//
static const unsigned MAX_CSE_CNT = EXPSET_SZ;
static const int MIN_CSE_COST = 2;
// BitVec trait information only used by the optCSE_canSwap() method, for the CSE_defMask and CSE_useMask.
// This BitVec uses one bit per CSE candidate
BitVecTraits* cseMaskTraits; // one bit per CSE candidate
// BitVec trait information for computing CSE availability using the CSE_DataFlow algorithm.
// Two bits are allocated per CSE candidate to compute CSE availability
// plus an extra bit to handle the initial unvisited case.
// (See CSE_DataFlow::EndMerge for an explanation of why this is necessary.)
//
// The two bits per CSE candidate have the following meanings:
// 11 - The CSE is available, and is also available when considering calls as killing availability.
// 10 - The CSE is available, but is not available when considering calls as killing availability.
// 00 - The CSE is not available
// 01 - An illegal combination
//
BitVecTraits* cseLivenessTraits;
//-----------------------------------------------------------------------------------------------------------------
// getCSEnum2bit: Return the normalized index to use in the EXPSET_TP for the CSE with the given CSE index.
// Each GenTree has a `gtCSEnum` field. Zero is reserved to mean this node is not a CSE, positive values indicate
// CSE uses, and negative values indicate CSE defs. The caller must pass a non-zero positive value, as from
// GET_CSE_INDEX().
//
static unsigned genCSEnum2bit(unsigned CSEnum)
{
assert((CSEnum > 0) && (CSEnum <= MAX_CSE_CNT));
return CSEnum - 1;
}
//-----------------------------------------------------------------------------------------------------------------
// getCSEAvailBit: Return the bit used by CSE dataflow sets (bbCseGen, etc.) for the availability bit for a CSE.
//
static unsigned getCSEAvailBit(unsigned CSEnum)
{
return genCSEnum2bit(CSEnum) * 2;
}
//-----------------------------------------------------------------------------------------------------------------
// getCSEAvailCrossCallBit: Return the bit used by CSE dataflow sets (bbCseGen, etc.) for the availability bit
// for a CSE considering calls as killing availability bit (see description above).
//
static unsigned getCSEAvailCrossCallBit(unsigned CSEnum)
{
return getCSEAvailBit(CSEnum) + 1;
}
void optPrintCSEDataFlowSet(EXPSET_VALARG_TP cseDataFlowSet, bool includeBits = true);
EXPSET_TP cseCallKillsMask; // Computed once - A mask that is used to kill available CSEs at callsites
/* Generic list of nodes - used by the CSE logic */
struct treeLst
{
treeLst* tlNext;
GenTree* tlTree;
};
struct treeStmtLst
{
treeStmtLst* tslNext;
GenTree* tslTree; // tree node
Statement* tslStmt; // statement containing the tree
BasicBlock* tslBlock; // block containing the statement
};
// The following logic keeps track of expressions via a simple hash table.
struct CSEdsc
{
CSEdsc* csdNextInBucket; // used by the hash table
size_t csdHashKey; // the orginal hashkey
ssize_t csdConstDefValue; // When we CSE similar constants, this is the value that we use as the def
ValueNum csdConstDefVN; // When we CSE similar constants, this is the ValueNumber that we use for the LclVar
// assignment
unsigned csdIndex; // 1..optCSECandidateCount
bool csdIsSharedConst; // true if this CSE is a shared const
bool csdLiveAcrossCall;
unsigned short csdDefCount; // definition count
unsigned short csdUseCount; // use count (excluding the implicit uses at defs)
weight_t csdDefWtCnt; // weighted def count
weight_t csdUseWtCnt; // weighted use count (excluding the implicit uses at defs)
GenTree* csdTree; // treenode containing the 1st occurrence
Statement* csdStmt; // stmt containing the 1st occurrence
BasicBlock* csdBlock; // block containing the 1st occurrence
treeStmtLst* csdTreeList; // list of matching tree nodes: head
treeStmtLst* csdTreeLast; // list of matching tree nodes: tail
// ToDo: This can be removed when gtGetStructHandleIfPresent stops guessing
// and GT_IND nodes always have valid struct handle.
//
CORINFO_CLASS_HANDLE csdStructHnd; // The class handle, currently needed to create a SIMD LclVar in PerformCSE
bool csdStructHndMismatch;
ValueNum defExcSetPromise; // The exception set that is now required for all defs of this CSE.
// This will be set to NoVN if we decide to abandon this CSE
ValueNum defExcSetCurrent; // The set of exceptions we currently can use for CSE uses.
ValueNum defConservNormVN; // if all def occurrences share the same conservative normal value
// number, this will reflect it; otherwise, NoVN.
// not used for shared const CSE's
};
static const size_t s_optCSEhashSizeInitial;
static const size_t s_optCSEhashGrowthFactor;
static const size_t s_optCSEhashBucketSize;
size_t optCSEhashSize; // The current size of hashtable
size_t optCSEhashCount; // Number of entries in hashtable
size_t optCSEhashMaxCountBeforeResize; // Number of entries before resize
CSEdsc** optCSEhash;
CSEdsc** optCSEtab;
typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, GenTree*> NodeToNodeMap;
NodeToNodeMap* optCseCheckedBoundMap; // Maps bound nodes to ancestor compares that should be
// re-numbered with the bound to improve range check elimination
// Given a compare, look for a cse candidate checked bound feeding it and add a map entry if found.
void optCseUpdateCheckedBoundMap(GenTree* compare);
void optCSEstop();
CSEdsc* optCSEfindDsc(unsigned index);
bool optUnmarkCSE(GenTree* tree);
// user defined callback data for the tree walk function optCSE_MaskHelper()
struct optCSE_MaskData
{
EXPSET_TP CSE_defMask;
EXPSET_TP CSE_useMask;
};
// Treewalk helper for optCSE_DefMask and optCSE_UseMask
static fgWalkPreFn optCSE_MaskHelper;
// This function walks all the node for an given tree
// and return the mask of CSE definitions and uses for the tree
//
void optCSE_GetMaskData(GenTree* tree, optCSE_MaskData* pMaskData);
// Given a binary tree node return true if it is safe to swap the order of evaluation for op1 and op2.
bool optCSE_canSwap(GenTree* firstNode, GenTree* secondNode);
struct optCSEcostCmpEx
{
bool operator()(const CSEdsc* op1, const CSEdsc* op2);
};
struct optCSEcostCmpSz
{
bool operator()(const CSEdsc* op1, const CSEdsc* op2);
};
void optCleanupCSEs();
#ifdef DEBUG
void optEnsureClearCSEInfo();
#endif // DEBUG
static bool Is_Shared_Const_CSE(size_t key)
{
return ((key & TARGET_SIGN_BIT) != 0);
}
// returns the encoded key
static size_t Encode_Shared_Const_CSE_Value(size_t key)
{
return TARGET_SIGN_BIT | (key >> CSE_CONST_SHARED_LOW_BITS);
}
// returns the orginal key
static size_t Decode_Shared_Const_CSE_Value(size_t enckey)
{
assert(Is_Shared_Const_CSE(enckey));
return (enckey & ~TARGET_SIGN_BIT) << CSE_CONST_SHARED_LOW_BITS;
}
/**************************************************************************
* Value Number based CSEs
*************************************************************************/
// String to use for formatting CSE numbers. Note that this is the positive number, e.g., from GET_CSE_INDEX().
#define FMT_CSE "CSE #%02u"
public:
void optOptimizeValnumCSEs();
protected:
void optValnumCSE_Init();
unsigned optValnumCSE_Index(GenTree* tree, Statement* stmt);
bool optValnumCSE_Locate();
void optValnumCSE_InitDataFlow();
void optValnumCSE_DataFlow();
void optValnumCSE_Availablity();
void optValnumCSE_Heuristic();
bool optDoCSE; // True when we have found a duplicate CSE tree
bool optValnumCSE_phase; // True when we are executing the optOptimizeValnumCSEs() phase
unsigned optCSECandidateCount; // Count of CSE's candidates
unsigned optCSEstart; // The first local variable number that is a CSE
unsigned optCSEcount; // The total count of CSE's introduced.
weight_t optCSEweight; // The weight of the current block when we are doing PerformCSE
bool optIsCSEcandidate(GenTree* tree);
// lclNumIsTrueCSE returns true if the LclVar was introduced by the CSE phase of the compiler
//
bool lclNumIsTrueCSE(unsigned lclNum) const
{
return ((optCSEcount > 0) && (lclNum >= optCSEstart) && (lclNum < optCSEstart + optCSEcount));
}
// lclNumIsCSE returns true if the LclVar should be treated like a CSE with regards to constant prop.
//
bool lclNumIsCSE(unsigned lclNum) const
{
return lvaGetDesc(lclNum)->lvIsCSE;
}
#ifdef DEBUG
bool optConfigDisableCSE();
bool optConfigDisableCSE2();
#endif
void optOptimizeCSEs();
struct isVarAssgDsc
{
GenTree* ivaSkip;
ALLVARSET_TP ivaMaskVal; // Set of variables assigned to. This is a set of all vars, not tracked vars.
#ifdef DEBUG
void* ivaSelf;
#endif
unsigned ivaVar; // Variable we are interested in, or -1
varRefKinds ivaMaskInd; // What kind of indirect assignments are there?
callInterf ivaMaskCall; // What kind of calls are there?
bool ivaMaskIncomplete; // Variables not representable in ivaMaskVal were assigned to.
};
static callInterf optCallInterf(GenTreeCall* call);
public:
// VN based copy propagation.
// In DEBUG builds, we'd like to know the tree that the SSA definition was pushed for.
// While for ordinary SSA defs it will be available (as an ASG) in the SSA descriptor,
// for locals which will use "definitions from uses", it will not be, so we store it
// in this class instead.
class CopyPropSsaDef
{
LclSsaVarDsc* m_ssaDef;
#ifdef DEBUG
GenTree* m_defNode;
#endif
public:
CopyPropSsaDef(LclSsaVarDsc* ssaDef, GenTree* defNode)
: m_ssaDef(ssaDef)
#ifdef DEBUG
, m_defNode(defNode)
#endif
{
}
LclSsaVarDsc* GetSsaDef() const
{
return m_ssaDef;
}
#ifdef DEBUG
GenTree* GetDefNode() const
{
return m_defNode;
}
#endif
};
typedef ArrayStack<CopyPropSsaDef> CopyPropSsaDefStack;
typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, CopyPropSsaDefStack*> LclNumToLiveDefsMap;
// Copy propagation functions.
void optCopyProp(Statement* stmt, GenTreeLclVarCommon* tree, unsigned lclNum, LclNumToLiveDefsMap* curSsaName);
void optBlockCopyPropPopStacks(BasicBlock* block, LclNumToLiveDefsMap* curSsaName);
void optBlockCopyProp(BasicBlock* block, LclNumToLiveDefsMap* curSsaName);
void optCopyPropPushDef(GenTree* defNode,
GenTreeLclVarCommon* lclNode,
unsigned lclNum,
LclNumToLiveDefsMap* curSsaName);
unsigned optIsSsaLocal(GenTreeLclVarCommon* lclNode);
int optCopyProp_LclVarScore(const LclVarDsc* lclVarDsc, const LclVarDsc* copyVarDsc, bool preferOp2);
void optVnCopyProp();
INDEBUG(void optDumpCopyPropStack(LclNumToLiveDefsMap* curSsaName));
/**************************************************************************
* Early value propagation
*************************************************************************/
struct SSAName
{
unsigned m_lvNum;
unsigned m_ssaNum;
SSAName(unsigned lvNum, unsigned ssaNum) : m_lvNum(lvNum), m_ssaNum(ssaNum)
{
}
static unsigned GetHashCode(SSAName ssaNm)
{
return (ssaNm.m_lvNum << 16) | (ssaNm.m_ssaNum);
}
static bool Equals(SSAName ssaNm1, SSAName ssaNm2)
{
return (ssaNm1.m_lvNum == ssaNm2.m_lvNum) && (ssaNm1.m_ssaNum == ssaNm2.m_ssaNum);
}
};
#define OMF_HAS_NEWARRAY 0x00000001 // Method contains 'new' of an array
#define OMF_HAS_NEWOBJ 0x00000002 // Method contains 'new' of an object type.
#define OMF_HAS_ARRAYREF 0x00000004 // Method contains array element loads or stores.
#define OMF_HAS_NULLCHECK 0x00000008 // Method contains null check.
#define OMF_HAS_FATPOINTER 0x00000010 // Method contains call, that needs fat pointer transformation.
#define OMF_HAS_OBJSTACKALLOC 0x00000020 // Method contains an object allocated on the stack.
#define OMF_HAS_GUARDEDDEVIRT 0x00000040 // Method contains guarded devirtualization candidate
#define OMF_HAS_EXPRUNTIMELOOKUP 0x00000080 // Method contains a runtime lookup to an expandable dictionary.
#define OMF_HAS_PATCHPOINT 0x00000100 // Method contains patchpoints
#define OMF_NEEDS_GCPOLLS 0x00000200 // Method needs GC polls
#define OMF_HAS_FROZEN_STRING 0x00000400 // Method has a frozen string (REF constant int), currently only on CoreRT.
#define OMF_HAS_PARTIAL_COMPILATION_PATCHPOINT 0x00000800 // Method contains partial compilation patchpoints
#define OMF_HAS_TAILCALL_SUCCESSOR 0x00001000 // Method has potential tail call in a non BBJ_RETURN block
bool doesMethodHaveFatPointer()
{
return (optMethodFlags & OMF_HAS_FATPOINTER) != 0;
}
void setMethodHasFatPointer()
{
optMethodFlags |= OMF_HAS_FATPOINTER;
}
void clearMethodHasFatPointer()
{
optMethodFlags &= ~OMF_HAS_FATPOINTER;
}
void addFatPointerCandidate(GenTreeCall* call);
bool doesMethodHaveFrozenString() const
{
return (optMethodFlags & OMF_HAS_FROZEN_STRING) != 0;
}
void setMethodHasFrozenString()
{
optMethodFlags |= OMF_HAS_FROZEN_STRING;
}
bool doesMethodHaveGuardedDevirtualization() const
{
return (optMethodFlags & OMF_HAS_GUARDEDDEVIRT) != 0;
}
void setMethodHasGuardedDevirtualization()
{
optMethodFlags |= OMF_HAS_GUARDEDDEVIRT;
}
void clearMethodHasGuardedDevirtualization()
{
optMethodFlags &= ~OMF_HAS_GUARDEDDEVIRT;
}
void considerGuardedDevirtualization(GenTreeCall* call,
IL_OFFSET ilOffset,
bool isInterface,
CORINFO_METHOD_HANDLE baseMethod,
CORINFO_CLASS_HANDLE baseClass,
CORINFO_CONTEXT_HANDLE* pContextHandle DEBUGARG(CORINFO_CLASS_HANDLE objClass)
DEBUGARG(const char* objClassName));
void addGuardedDevirtualizationCandidate(GenTreeCall* call,
CORINFO_METHOD_HANDLE methodHandle,
CORINFO_CLASS_HANDLE classHandle,
unsigned methodAttr,
unsigned classAttr,
unsigned likelihood);
bool doesMethodHaveExpRuntimeLookup()
{
return (optMethodFlags & OMF_HAS_EXPRUNTIMELOOKUP) != 0;
}
void setMethodHasExpRuntimeLookup()
{
optMethodFlags |= OMF_HAS_EXPRUNTIMELOOKUP;
}
void clearMethodHasExpRuntimeLookup()
{
optMethodFlags &= ~OMF_HAS_EXPRUNTIMELOOKUP;
}
void addExpRuntimeLookupCandidate(GenTreeCall* call);
bool doesMethodHavePatchpoints()
{
return (optMethodFlags & OMF_HAS_PATCHPOINT) != 0;
}
void setMethodHasPatchpoint()
{
optMethodFlags |= OMF_HAS_PATCHPOINT;
}
bool doesMethodHavePartialCompilationPatchpoints()
{
return (optMethodFlags & OMF_HAS_PARTIAL_COMPILATION_PATCHPOINT) != 0;
}
void setMethodHasPartialCompilationPatchpoint()
{
optMethodFlags |= OMF_HAS_PARTIAL_COMPILATION_PATCHPOINT;
}
unsigned optMethodFlags;
bool doesMethodHaveNoReturnCalls()
{
return optNoReturnCallCount > 0;
}
void setMethodHasNoReturnCalls()
{
optNoReturnCallCount++;
}
unsigned optNoReturnCallCount;
// Recursion bound controls how far we can go backwards tracking for a SSA value.
// No throughput diff was found with backward walk bound between 3-8.
static const int optEarlyPropRecurBound = 5;
enum class optPropKind
{
OPK_INVALID,
OPK_ARRAYLEN,
OPK_NULLCHECK
};
typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, GenTree*> LocalNumberToNullCheckTreeMap;
GenTree* getArrayLengthFromAllocation(GenTree* tree DEBUGARG(BasicBlock* block));
GenTree* optPropGetValueRec(unsigned lclNum, unsigned ssaNum, optPropKind valueKind, int walkDepth);
GenTree* optPropGetValue(unsigned lclNum, unsigned ssaNum, optPropKind valueKind);
GenTree* optEarlyPropRewriteTree(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap);
bool optDoEarlyPropForBlock(BasicBlock* block);
bool optDoEarlyPropForFunc();
void optEarlyProp();
void optFoldNullCheck(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap);
GenTree* optFindNullCheckToFold(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap);
bool optIsNullCheckFoldingLegal(GenTree* tree,
GenTree* nullCheckTree,
GenTree** nullCheckParent,
Statement** nullCheckStmt);
bool optCanMoveNullCheckPastTree(GenTree* tree,
unsigned nullCheckLclNum,
bool isInsideTry,
bool checkSideEffectSummary);
#if DEBUG
void optCheckFlagsAreSet(unsigned methodFlag,
const char* methodFlagStr,
unsigned bbFlag,
const char* bbFlagStr,
GenTree* tree,
BasicBlock* basicBlock);
#endif
// Redundant branch opts
//
PhaseStatus optRedundantBranches();
bool optRedundantRelop(BasicBlock* const block);
bool optRedundantBranch(BasicBlock* const block);
bool optJumpThread(BasicBlock* const block, BasicBlock* const domBlock, bool domIsSameRelop);
bool optReachable(BasicBlock* const fromBlock, BasicBlock* const toBlock, BasicBlock* const excludedBlock);
/**************************************************************************
* Value/Assertion propagation
*************************************************************************/
public:
// Data structures for assertion prop
BitVecTraits* apTraits;
ASSERT_TP apFull;
enum optAssertionKind
{
OAK_INVALID,
OAK_EQUAL,
OAK_NOT_EQUAL,
OAK_SUBRANGE,
OAK_NO_THROW,
OAK_COUNT
};
enum optOp1Kind
{
O1K_INVALID,
O1K_LCLVAR,
O1K_ARR_BND,
O1K_BOUND_OPER_BND,
O1K_BOUND_LOOP_BND,
O1K_CONSTANT_LOOP_BND,
O1K_CONSTANT_LOOP_BND_UN,
O1K_EXACT_TYPE,
O1K_SUBTYPE,
O1K_VALUE_NUMBER,
O1K_COUNT
};
enum optOp2Kind
{
O2K_INVALID,
O2K_LCLVAR_COPY,
O2K_IND_CNS_INT,
O2K_CONST_INT,
O2K_CONST_LONG,
O2K_CONST_DOUBLE,
O2K_ZEROOBJ,
O2K_SUBRANGE,
O2K_COUNT
};
struct AssertionDsc
{
optAssertionKind assertionKind;
struct SsaVar
{
unsigned lclNum; // assigned to or property of this local var number
unsigned ssaNum;
};
struct ArrBnd
{
ValueNum vnIdx;
ValueNum vnLen;
};
struct AssertionDscOp1
{
optOp1Kind kind; // a normal LclVar, or Exact-type or Subtype
ValueNum vn;
union {
SsaVar lcl;
ArrBnd bnd;
};
} op1;
struct AssertionDscOp2
{
optOp2Kind kind; // a const or copy assignment
ValueNum vn;
struct IntVal
{
ssize_t iconVal; // integer
#if !defined(HOST_64BIT)
unsigned padding; // unused; ensures iconFlags does not overlap lconVal
#endif
GenTreeFlags iconFlags; // gtFlags
};
union {
struct
{
SsaVar lcl;
FieldSeqNode* zeroOffsetFieldSeq;
};
IntVal u1;
__int64 lconVal;
double dconVal;
IntegralRange u2;
};
} op2;
bool IsCheckedBoundArithBound()
{
return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) && op1.kind == O1K_BOUND_OPER_BND);
}
bool IsCheckedBoundBound()
{
return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) && op1.kind == O1K_BOUND_LOOP_BND);
}
bool IsConstantBound()
{
return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) &&
(op1.kind == O1K_CONSTANT_LOOP_BND));
}
bool IsConstantBoundUnsigned()
{
return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) &&
(op1.kind == O1K_CONSTANT_LOOP_BND_UN));
}
bool IsBoundsCheckNoThrow()
{
return ((assertionKind == OAK_NO_THROW) && (op1.kind == O1K_ARR_BND));
}
bool IsCopyAssertion()
{
return ((assertionKind == OAK_EQUAL) && (op1.kind == O1K_LCLVAR) && (op2.kind == O2K_LCLVAR_COPY));
}
bool IsConstantInt32Assertion()
{
return ((assertionKind == OAK_EQUAL) || (assertionKind == OAK_NOT_EQUAL)) && (op2.kind == O2K_CONST_INT);
}
static bool SameKind(AssertionDsc* a1, AssertionDsc* a2)
{
return a1->assertionKind == a2->assertionKind && a1->op1.kind == a2->op1.kind &&
a1->op2.kind == a2->op2.kind;
}
static bool ComplementaryKind(optAssertionKind kind, optAssertionKind kind2)
{
if (kind == OAK_EQUAL)
{
return kind2 == OAK_NOT_EQUAL;
}
else if (kind == OAK_NOT_EQUAL)
{
return kind2 == OAK_EQUAL;
}
return false;
}
bool HasSameOp1(AssertionDsc* that, bool vnBased)
{
if (op1.kind != that->op1.kind)
{
return false;
}
else if (op1.kind == O1K_ARR_BND)
{
assert(vnBased);
return (op1.bnd.vnIdx == that->op1.bnd.vnIdx) && (op1.bnd.vnLen == that->op1.bnd.vnLen);
}
else
{
return ((vnBased && (op1.vn == that->op1.vn)) ||
(!vnBased && (op1.lcl.lclNum == that->op1.lcl.lclNum)));
}
}
bool HasSameOp2(AssertionDsc* that, bool vnBased)
{
if (op2.kind != that->op2.kind)
{
return false;
}
switch (op2.kind)
{
case O2K_IND_CNS_INT:
case O2K_CONST_INT:
return ((op2.u1.iconVal == that->op2.u1.iconVal) && (op2.u1.iconFlags == that->op2.u1.iconFlags));
case O2K_CONST_LONG:
return (op2.lconVal == that->op2.lconVal);
case O2K_CONST_DOUBLE:
// exact match because of positive and negative zero.
return (memcmp(&op2.dconVal, &that->op2.dconVal, sizeof(double)) == 0);
case O2K_ZEROOBJ:
return true;
case O2K_LCLVAR_COPY:
return (op2.lcl.lclNum == that->op2.lcl.lclNum) &&
(!vnBased || op2.lcl.ssaNum == that->op2.lcl.ssaNum) &&
(op2.zeroOffsetFieldSeq == that->op2.zeroOffsetFieldSeq);
case O2K_SUBRANGE:
return op2.u2.Equals(that->op2.u2);
case O2K_INVALID:
// we will return false
break;
default:
assert(!"Unexpected value for op2.kind in AssertionDsc.");
break;
}
return false;
}
bool Complementary(AssertionDsc* that, bool vnBased)
{
return ComplementaryKind(assertionKind, that->assertionKind) && HasSameOp1(that, vnBased) &&
HasSameOp2(that, vnBased);
}
bool Equals(AssertionDsc* that, bool vnBased)
{
if (assertionKind != that->assertionKind)
{
return false;
}
else if (assertionKind == OAK_NO_THROW)
{
assert(op2.kind == O2K_INVALID);
return HasSameOp1(that, vnBased);
}
else
{
return HasSameOp1(that, vnBased) && HasSameOp2(that, vnBased);
}
}
};
protected:
static fgWalkPreFn optAddCopiesCallback;
static fgWalkPreFn optVNAssertionPropCurStmtVisitor;
unsigned optAddCopyLclNum;
GenTree* optAddCopyAsgnNode;
bool optLocalAssertionProp; // indicates that we are performing local assertion prop
bool optAssertionPropagated; // set to true if we modified the trees
bool optAssertionPropagatedCurrentStmt;
#ifdef DEBUG
GenTree* optAssertionPropCurrentTree;
#endif
AssertionIndex* optComplementaryAssertionMap;
JitExpandArray<ASSERT_TP>* optAssertionDep; // table that holds dependent assertions (assertions
// using the value of a local var) for each local var
AssertionDsc* optAssertionTabPrivate; // table that holds info about value assignments
AssertionIndex optAssertionCount; // total number of assertions in the assertion table
AssertionIndex optMaxAssertionCount;
public:
void optVnNonNullPropCurStmt(BasicBlock* block, Statement* stmt, GenTree* tree);
fgWalkResult optVNConstantPropCurStmt(BasicBlock* block, Statement* stmt, GenTree* tree);
GenTree* optVNConstantPropOnJTrue(BasicBlock* block, GenTree* test);
GenTree* optVNConstantPropOnTree(BasicBlock* block, GenTree* tree);
GenTree* optExtractSideEffListFromConst(GenTree* tree);
AssertionIndex GetAssertionCount()
{
return optAssertionCount;
}
ASSERT_TP* bbJtrueAssertionOut;
typedef JitHashTable<ValueNum, JitSmallPrimitiveKeyFuncs<ValueNum>, ASSERT_TP> ValueNumToAssertsMap;
ValueNumToAssertsMap* optValueNumToAsserts;
// Assertion prop helpers.
ASSERT_TP& GetAssertionDep(unsigned lclNum);
AssertionDsc* optGetAssertion(AssertionIndex assertIndex);
void optAssertionInit(bool isLocalProp);
void optAssertionTraitsInit(AssertionIndex assertionCount);
void optAssertionReset(AssertionIndex limit);
void optAssertionRemove(AssertionIndex index);
// Assertion prop data flow functions.
void optAssertionPropMain();
Statement* optVNAssertionPropCurStmt(BasicBlock* block, Statement* stmt);
bool optIsTreeKnownIntValue(bool vnBased, GenTree* tree, ssize_t* pConstant, GenTreeFlags* pIconFlags);
ASSERT_TP* optInitAssertionDataflowFlags();
ASSERT_TP* optComputeAssertionGen();
// Assertion Gen functions.
void optAssertionGen(GenTree* tree);
AssertionIndex optAssertionGenCast(GenTreeCast* cast);
AssertionIndex optAssertionGenPhiDefn(GenTree* tree);
AssertionInfo optCreateJTrueBoundsAssertion(GenTree* tree);
AssertionInfo optAssertionGenJtrue(GenTree* tree);
AssertionIndex optCreateJtrueAssertions(GenTree* op1,
GenTree* op2,
Compiler::optAssertionKind assertionKind,
bool helperCallArgs = false);
AssertionIndex optFindComplementary(AssertionIndex assertionIndex);
void optMapComplementary(AssertionIndex assertionIndex, AssertionIndex index);
// Assertion creation functions.
AssertionIndex optCreateAssertion(GenTree* op1,
GenTree* op2,
optAssertionKind assertionKind,
bool helperCallArgs = false);
AssertionIndex optFinalizeCreatingAssertion(AssertionDsc* assertion);
bool optTryExtractSubrangeAssertion(GenTree* source, IntegralRange* pRange);
void optCreateComplementaryAssertion(AssertionIndex assertionIndex,
GenTree* op1,
GenTree* op2,
bool helperCallArgs = false);
bool optAssertionVnInvolvesNan(AssertionDsc* assertion);
AssertionIndex optAddAssertion(AssertionDsc* assertion);
void optAddVnAssertionMapping(ValueNum vn, AssertionIndex index);
#ifdef DEBUG
void optPrintVnAssertionMapping();
#endif
ASSERT_TP optGetVnMappedAssertions(ValueNum vn);
// Used for respective assertion propagations.
AssertionIndex optAssertionIsSubrange(GenTree* tree, IntegralRange range, ASSERT_VALARG_TP assertions);
AssertionIndex optAssertionIsSubtype(GenTree* tree, GenTree* methodTableArg, ASSERT_VALARG_TP assertions);
AssertionIndex optAssertionIsNonNullInternal(GenTree* op, ASSERT_VALARG_TP assertions DEBUGARG(bool* pVnBased));
bool optAssertionIsNonNull(GenTree* op,
ASSERT_VALARG_TP assertions DEBUGARG(bool* pVnBased) DEBUGARG(AssertionIndex* pIndex));
AssertionIndex optGlobalAssertionIsEqualOrNotEqual(ASSERT_VALARG_TP assertions, GenTree* op1, GenTree* op2);
AssertionIndex optGlobalAssertionIsEqualOrNotEqualZero(ASSERT_VALARG_TP assertions, GenTree* op1);
AssertionIndex optLocalAssertionIsEqualOrNotEqual(
optOp1Kind op1Kind, unsigned lclNum, optOp2Kind op2Kind, ssize_t cnsVal, ASSERT_VALARG_TP assertions);
// Assertion prop for lcl var functions.
bool optAssertionProp_LclVarTypeCheck(GenTree* tree, LclVarDsc* lclVarDsc, LclVarDsc* copyVarDsc);
GenTree* optCopyAssertionProp(AssertionDsc* curAssertion,
GenTreeLclVarCommon* tree,
Statement* stmt DEBUGARG(AssertionIndex index));
GenTree* optConstantAssertionProp(AssertionDsc* curAssertion,
GenTreeLclVarCommon* tree,
Statement* stmt DEBUGARG(AssertionIndex index));
bool optZeroObjAssertionProp(GenTree* tree, ASSERT_VALARG_TP assertions);
// Assertion propagation functions.
GenTree* optAssertionProp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt, BasicBlock* block);
GenTree* optAssertionProp_LclVar(ASSERT_VALARG_TP assertions, GenTreeLclVarCommon* tree, Statement* stmt);
GenTree* optAssertionProp_Asg(ASSERT_VALARG_TP assertions, GenTreeOp* asg, Statement* stmt);
GenTree* optAssertionProp_Return(ASSERT_VALARG_TP assertions, GenTreeUnOp* ret, Statement* stmt);
GenTree* optAssertionProp_Ind(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt);
GenTree* optAssertionProp_Cast(ASSERT_VALARG_TP assertions, GenTreeCast* cast, Statement* stmt);
GenTree* optAssertionProp_Call(ASSERT_VALARG_TP assertions, GenTreeCall* call, Statement* stmt);
GenTree* optAssertionProp_RelOp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt);
GenTree* optAssertionProp_Comma(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt);
GenTree* optAssertionProp_BndsChk(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt);
GenTree* optAssertionPropGlobal_RelOp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt);
GenTree* optAssertionPropLocal_RelOp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt);
GenTree* optAssertionProp_Update(GenTree* newTree, GenTree* tree, Statement* stmt);
GenTree* optNonNullAssertionProp_Call(ASSERT_VALARG_TP assertions, GenTreeCall* call);
// Implied assertion functions.
void optImpliedAssertions(AssertionIndex assertionIndex, ASSERT_TP& activeAssertions);
void optImpliedByTypeOfAssertions(ASSERT_TP& activeAssertions);
void optImpliedByCopyAssertion(AssertionDsc* copyAssertion, AssertionDsc* depAssertion, ASSERT_TP& result);
void optImpliedByConstAssertion(AssertionDsc* curAssertion, ASSERT_TP& result);
#ifdef DEBUG
void optPrintAssertion(AssertionDsc* newAssertion, AssertionIndex assertionIndex = 0);
void optPrintAssertionIndex(AssertionIndex index);
void optPrintAssertionIndices(ASSERT_TP assertions);
void optDebugCheckAssertion(AssertionDsc* assertion);
void optDebugCheckAssertions(AssertionIndex AssertionIndex);
#endif
static void optDumpAssertionIndices(const char* header, ASSERT_TP assertions, const char* footer = nullptr);
static void optDumpAssertionIndices(ASSERT_TP assertions, const char* footer = nullptr);
void optAddCopies();
/**************************************************************************
* Range checks
*************************************************************************/
public:
struct LoopCloneVisitorInfo
{
LoopCloneContext* context;
unsigned loopNum;
Statement* stmt;
LoopCloneVisitorInfo(LoopCloneContext* context, unsigned loopNum, Statement* stmt)
: context(context), loopNum(loopNum), stmt(nullptr)
{
}
};
bool optIsStackLocalInvariant(unsigned loopNum, unsigned lclNum);
bool optExtractArrIndex(GenTree* tree, ArrIndex* result, unsigned lhsNum);
bool optReconstructArrIndex(GenTree* tree, ArrIndex* result, unsigned lhsNum);
bool optIdentifyLoopOptInfo(unsigned loopNum, LoopCloneContext* context);
static fgWalkPreFn optCanOptimizeByLoopCloningVisitor;
fgWalkResult optCanOptimizeByLoopCloning(GenTree* tree, LoopCloneVisitorInfo* info);
bool optObtainLoopCloningOpts(LoopCloneContext* context);
bool optIsLoopClonable(unsigned loopInd);
bool optLoopCloningEnabled();
#ifdef DEBUG
void optDebugLogLoopCloning(BasicBlock* block, Statement* insertBefore);
#endif
void optPerformStaticOptimizations(unsigned loopNum, LoopCloneContext* context DEBUGARG(bool fastPath));
bool optComputeDerefConditions(unsigned loopNum, LoopCloneContext* context);
bool optDeriveLoopCloningConditions(unsigned loopNum, LoopCloneContext* context);
BasicBlock* optInsertLoopChoiceConditions(LoopCloneContext* context,
unsigned loopNum,
BasicBlock* slowHead,
BasicBlock* insertAfter);
protected:
ssize_t optGetArrayRefScaleAndIndex(GenTree* mul, GenTree** pIndex DEBUGARG(bool bRngChk));
bool optReachWithoutCall(BasicBlock* srcBB, BasicBlock* dstBB);
protected:
bool optLoopsMarked;
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX RegAlloc XX
XX XX
XX Does the register allocation and puts the remaining lclVars on the stack XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
public:
regNumber raUpdateRegStateForArg(RegState* regState, LclVarDsc* argDsc);
void raMarkStkVars();
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
#if defined(TARGET_AMD64)
static bool varTypeNeedsPartialCalleeSave(var_types type)
{
assert(type != TYP_STRUCT);
return (type == TYP_SIMD32);
}
#elif defined(TARGET_ARM64)
static bool varTypeNeedsPartialCalleeSave(var_types type)
{
assert(type != TYP_STRUCT);
// ARM64 ABI FP Callee save registers only require Callee to save lower 8 Bytes
// For SIMD types longer than 8 bytes Caller is responsible for saving and restoring Upper bytes.
return ((type == TYP_SIMD16) || (type == TYP_SIMD12));
}
#else // !defined(TARGET_AMD64) && !defined(TARGET_ARM64)
#error("Unknown target architecture for FEATURE_SIMD")
#endif // !defined(TARGET_AMD64) && !defined(TARGET_ARM64)
#endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE
protected:
// Some things are used by both LSRA and regpredict allocators.
FrameType rpFrameType;
bool rpMustCreateEBPCalled; // Set to true after we have called rpMustCreateEBPFrame once
bool rpMustCreateEBPFrame(INDEBUG(const char** wbReason));
private:
Lowering* m_pLowering; // Lowering; needed to Lower IR that's added or modified after Lowering.
LinearScanInterface* m_pLinearScan; // Linear Scan allocator
/* raIsVarargsStackArg is called by raMaskStkVars and by
lvaComputeRefCounts. It identifies the special case
where a varargs function has a parameter passed on the
stack, other than the special varargs handle. Such parameters
require special treatment, because they cannot be tracked
by the GC (their offsets in the stack are not known
at compile time).
*/
bool raIsVarargsStackArg(unsigned lclNum)
{
#ifdef TARGET_X86
LclVarDsc* varDsc = lvaGetDesc(lclNum);
assert(varDsc->lvIsParam);
return (info.compIsVarArgs && !varDsc->lvIsRegArg && (lclNum != lvaVarargsHandleArg));
#else // TARGET_X86
return false;
#endif // TARGET_X86
}
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX EEInterface XX
XX XX
XX Get to the class and method info from the Execution Engine given XX
XX tokens for the class and method XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
public:
// Get handles
void eeGetCallInfo(CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_RESOLVED_TOKEN* pConstrainedToken,
CORINFO_CALLINFO_FLAGS flags,
CORINFO_CALL_INFO* pResult);
void eeGetFieldInfo(CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_ACCESS_FLAGS flags,
CORINFO_FIELD_INFO* pResult);
// Get the flags
bool eeIsValueClass(CORINFO_CLASS_HANDLE clsHnd);
bool eeIsIntrinsic(CORINFO_METHOD_HANDLE ftn);
bool eeIsFieldStatic(CORINFO_FIELD_HANDLE fldHnd);
var_types eeGetFieldType(CORINFO_FIELD_HANDLE fldHnd, CORINFO_CLASS_HANDLE* pStructHnd = nullptr);
#if defined(DEBUG) || defined(FEATURE_JIT_METHOD_PERF) || defined(FEATURE_SIMD) || defined(TRACK_LSRA_STATS)
const char* eeGetMethodName(CORINFO_METHOD_HANDLE hnd, const char** className);
const char* eeGetMethodFullName(CORINFO_METHOD_HANDLE hnd);
unsigned compMethodHash(CORINFO_METHOD_HANDLE methodHandle);
bool eeIsNativeMethod(CORINFO_METHOD_HANDLE method);
CORINFO_METHOD_HANDLE eeGetMethodHandleForNative(CORINFO_METHOD_HANDLE method);
#endif
var_types eeGetArgType(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig);
var_types eeGetArgType(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig, bool* isPinned);
CORINFO_CLASS_HANDLE eeGetArgClass(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE list);
CORINFO_CLASS_HANDLE eeGetClassFromContext(CORINFO_CONTEXT_HANDLE context);
unsigned eeGetArgSize(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig);
static unsigned eeGetArgSizeAlignment(var_types type, bool isFloatHfa);
// VOM info, method sigs
void eeGetSig(unsigned sigTok,
CORINFO_MODULE_HANDLE scope,
CORINFO_CONTEXT_HANDLE context,
CORINFO_SIG_INFO* retSig);
void eeGetCallSiteSig(unsigned sigTok,
CORINFO_MODULE_HANDLE scope,
CORINFO_CONTEXT_HANDLE context,
CORINFO_SIG_INFO* retSig);
void eeGetMethodSig(CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* retSig, CORINFO_CLASS_HANDLE owner = nullptr);
// Method entry-points, instrs
CORINFO_METHOD_HANDLE eeMarkNativeTarget(CORINFO_METHOD_HANDLE method);
CORINFO_EE_INFO eeInfo;
bool eeInfoInitialized;
CORINFO_EE_INFO* eeGetEEInfo();
// Gets the offset of a SDArray's first element
static unsigned eeGetArrayDataOffset();
// Get the offset of a MDArray's first element
static unsigned eeGetMDArrayDataOffset(unsigned rank);
// Get the offset of a MDArray's dimension length for a given dimension.
static unsigned eeGetMDArrayLengthOffset(unsigned rank, unsigned dimension);
// Get the offset of a MDArray's lower bound for a given dimension.
static unsigned eeGetMDArrayLowerBoundOffset(unsigned rank, unsigned dimension);
GenTree* eeGetPInvokeCookie(CORINFO_SIG_INFO* szMetaSig);
// Returns the page size for the target machine as reported by the EE.
target_size_t eeGetPageSize()
{
return (target_size_t)eeGetEEInfo()->osPageSize;
}
//------------------------------------------------------------------------
// VirtualStubParam: virtual stub dispatch extra parameter (slot address).
//
// It represents Abi and target specific registers for the parameter.
//
class VirtualStubParamInfo
{
public:
VirtualStubParamInfo(bool isCoreRTABI)
{
#if defined(TARGET_X86)
reg = REG_EAX;
regMask = RBM_EAX;
#elif defined(TARGET_AMD64)
if (isCoreRTABI)
{
reg = REG_R10;
regMask = RBM_R10;
}
else
{
reg = REG_R11;
regMask = RBM_R11;
}
#elif defined(TARGET_ARM)
if (isCoreRTABI)
{
reg = REG_R12;
regMask = RBM_R12;
}
else
{
reg = REG_R4;
regMask = RBM_R4;
}
#elif defined(TARGET_ARM64)
reg = REG_R11;
regMask = RBM_R11;
#else
#error Unsupported or unset target architecture
#endif
}
regNumber GetReg() const
{
return reg;
}
_regMask_enum GetRegMask() const
{
return regMask;
}
private:
regNumber reg;
_regMask_enum regMask;
};
VirtualStubParamInfo* virtualStubParamInfo;
bool IsTargetAbi(CORINFO_RUNTIME_ABI abi)
{
return eeGetEEInfo()->targetAbi == abi;
}
bool generateCFIUnwindCodes()
{
#if defined(FEATURE_CFI_SUPPORT)
return TargetOS::IsUnix && IsTargetAbi(CORINFO_CORERT_ABI);
#else
return false;
#endif
}
// Debugging support - Line number info
void eeGetStmtOffsets();
unsigned eeBoundariesCount;
ICorDebugInfo::OffsetMapping* eeBoundaries; // Boundaries to report to the EE
void eeSetLIcount(unsigned count);
void eeSetLIinfo(unsigned which, UNATIVE_OFFSET offs, IPmappingDscKind kind, const ILLocation& loc);
void eeSetLIdone();
#ifdef DEBUG
static void eeDispILOffs(IL_OFFSET offs);
static void eeDispSourceMappingOffs(uint32_t offs);
static void eeDispLineInfo(const ICorDebugInfo::OffsetMapping* line);
void eeDispLineInfos();
#endif // DEBUG
// Debugging support - Local var info
void eeGetVars();
unsigned eeVarsCount;
struct VarResultInfo
{
UNATIVE_OFFSET startOffset;
UNATIVE_OFFSET endOffset;
DWORD varNumber;
CodeGenInterface::siVarLoc loc;
} * eeVars;
void eeSetLVcount(unsigned count);
void eeSetLVinfo(unsigned which,
UNATIVE_OFFSET startOffs,
UNATIVE_OFFSET length,
unsigned varNum,
const CodeGenInterface::siVarLoc& loc);
void eeSetLVdone();
#ifdef DEBUG
void eeDispVar(ICorDebugInfo::NativeVarInfo* var);
void eeDispVars(CORINFO_METHOD_HANDLE ftn, ULONG32 cVars, ICorDebugInfo::NativeVarInfo* vars);
#endif // DEBUG
// ICorJitInfo wrappers
void eeReserveUnwindInfo(bool isFunclet, bool isColdCode, ULONG unwindSize);
void eeAllocUnwindInfo(BYTE* pHotCode,
BYTE* pColdCode,
ULONG startOffset,
ULONG endOffset,
ULONG unwindSize,
BYTE* pUnwindBlock,
CorJitFuncKind funcKind);
void eeSetEHcount(unsigned cEH);
void eeSetEHinfo(unsigned EHnumber, const CORINFO_EH_CLAUSE* clause);
WORD eeGetRelocTypeHint(void* target);
// ICorStaticInfo wrapper functions
bool eeTryResolveToken(CORINFO_RESOLVED_TOKEN* resolvedToken);
#if defined(UNIX_AMD64_ABI)
#ifdef DEBUG
static void dumpSystemVClassificationType(SystemVClassificationType ct);
#endif // DEBUG
void eeGetSystemVAmd64PassStructInRegisterDescriptor(
/*IN*/ CORINFO_CLASS_HANDLE structHnd,
/*OUT*/ SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* structPassInRegDescPtr);
#endif // UNIX_AMD64_ABI
template <typename ParamType>
bool eeRunWithErrorTrap(void (*function)(ParamType*), ParamType* param)
{
return eeRunWithErrorTrapImp(reinterpret_cast<void (*)(void*)>(function), reinterpret_cast<void*>(param));
}
bool eeRunWithErrorTrapImp(void (*function)(void*), void* param);
template <typename ParamType>
bool eeRunWithSPMIErrorTrap(void (*function)(ParamType*), ParamType* param)
{
return eeRunWithSPMIErrorTrapImp(reinterpret_cast<void (*)(void*)>(function), reinterpret_cast<void*>(param));
}
bool eeRunWithSPMIErrorTrapImp(void (*function)(void*), void* param);
// Utility functions
const char* eeGetFieldName(CORINFO_FIELD_HANDLE fieldHnd, const char** classNamePtr = nullptr);
#if defined(DEBUG)
const WCHAR* eeGetCPString(size_t stringHandle);
unsigned eeTryGetClassSize(CORINFO_CLASS_HANDLE clsHnd);
const char16_t* eeGetShortClassName(CORINFO_CLASS_HANDLE clsHnd);
#endif
const char* eeGetClassName(CORINFO_CLASS_HANDLE clsHnd);
static CORINFO_METHOD_HANDLE eeFindHelper(unsigned helper);
static CorInfoHelpFunc eeGetHelperNum(CORINFO_METHOD_HANDLE method);
static bool IsSharedStaticHelper(GenTree* tree);
static bool IsGcSafePoint(GenTreeCall* call);
static CORINFO_FIELD_HANDLE eeFindJitDataOffs(unsigned jitDataOffs);
// returns true/false if 'field' is a Jit Data offset
static bool eeIsJitDataOffs(CORINFO_FIELD_HANDLE field);
// returns a number < 0 if 'field' is not a Jit Data offset, otherwise the data offset (limited to 2GB)
static int eeGetJitDataOffs(CORINFO_FIELD_HANDLE field);
/*****************************************************************************/
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX CodeGenerator XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
public:
CodeGenInterface* codeGen;
// Record the instr offset mapping to the generated code
jitstd::list<IPmappingDsc> genIPmappings;
#ifdef DEBUG
jitstd::list<PreciseIPMapping> genPreciseIPmappings;
#endif
// Managed RetVal - A side hash table meant to record the mapping from a
// GT_CALL node to its debug info. This info is used to emit sequence points
// that can be used by debugger to determine the native offset at which the
// managed RetVal will be available.
//
// In fact we can store debug info in a GT_CALL node. This was ruled out in
// favor of a side table for two reasons: 1) We need debug info for only those
// GT_CALL nodes (created during importation) that correspond to an IL call and
// whose return type is other than TYP_VOID. 2) GT_CALL node is a frequently used
// structure and IL offset is needed only when generating debuggable code. Therefore
// it is desirable to avoid memory size penalty in retail scenarios.
typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, DebugInfo> CallSiteDebugInfoTable;
CallSiteDebugInfoTable* genCallSite2DebugInfoMap;
unsigned genReturnLocal; // Local number for the return value when applicable.
BasicBlock* genReturnBB; // jumped to when not optimizing for speed.
// The following properties are part of CodeGenContext. Getters are provided here for
// convenience and backward compatibility, but the properties can only be set by invoking
// the setter on CodeGenContext directly.
emitter* GetEmitter() const
{
return codeGen->GetEmitter();
}
bool isFramePointerUsed() const
{
return codeGen->isFramePointerUsed();
}
bool GetInterruptible()
{
return codeGen->GetInterruptible();
}
void SetInterruptible(bool value)
{
codeGen->SetInterruptible(value);
}
#if DOUBLE_ALIGN
const bool genDoubleAlign()
{
return codeGen->doDoubleAlign();
}
DWORD getCanDoubleAlign();
bool shouldDoubleAlign(unsigned refCntStk,
unsigned refCntReg,
weight_t refCntWtdReg,
unsigned refCntStkParam,
weight_t refCntWtdStkDbl);
#endif // DOUBLE_ALIGN
bool IsFullPtrRegMapRequired()
{
return codeGen->IsFullPtrRegMapRequired();
}
void SetFullPtrRegMapRequired(bool value)
{
codeGen->SetFullPtrRegMapRequired(value);
}
// Things that MAY belong either in CodeGen or CodeGenContext
#if defined(FEATURE_EH_FUNCLETS)
FuncInfoDsc* compFuncInfos;
unsigned short compCurrFuncIdx;
unsigned short compFuncInfoCount;
unsigned short compFuncCount()
{
assert(fgFuncletsCreated);
return compFuncInfoCount;
}
#else // !FEATURE_EH_FUNCLETS
// This is a no-op when there are no funclets!
void genUpdateCurrentFunclet(BasicBlock* block)
{
return;
}
FuncInfoDsc compFuncInfoRoot;
static const unsigned compCurrFuncIdx = 0;
unsigned short compFuncCount()
{
return 1;
}
#endif // !FEATURE_EH_FUNCLETS
FuncInfoDsc* funCurrentFunc();
void funSetCurrentFunc(unsigned funcIdx);
FuncInfoDsc* funGetFunc(unsigned funcIdx);
unsigned int funGetFuncIdx(BasicBlock* block);
// LIVENESS
VARSET_TP compCurLife; // current live variables
GenTree* compCurLifeTree; // node after which compCurLife has been computed
// Compare the given "newLife" with last set of live variables and update
// codeGen "gcInfo", siScopes, "regSet" with the new variable's homes/liveness.
template <bool ForCodeGen>
void compChangeLife(VARSET_VALARG_TP newLife);
// Update the GC's masks, register's masks and reports change on variable's homes given a set of
// current live variables if changes have happened since "compCurLife".
template <bool ForCodeGen>
inline void compUpdateLife(VARSET_VALARG_TP newLife);
// Gets a register mask that represent the kill set for a helper call since
// not all JIT Helper calls follow the standard ABI on the target architecture.
regMaskTP compHelperCallKillSet(CorInfoHelpFunc helper);
#ifdef TARGET_ARM
// Requires that "varDsc" be a promoted struct local variable being passed as an argument, beginning at
// "firstArgRegNum", which is assumed to have already been aligned to the register alignment restriction of the
// struct type. Adds bits to "*pArgSkippedRegMask" for any argument registers *not* used in passing "varDsc" --
// i.e., internal "holes" caused by internal alignment constraints. For example, if the struct contained an int and
// a double, and we at R0 (on ARM), then R1 would be skipped, and the bit for R1 would be added to the mask.
void fgAddSkippedRegsInPromotedStructArg(LclVarDsc* varDsc, unsigned firstArgRegNum, regMaskTP* pArgSkippedRegMask);
#endif // TARGET_ARM
// If "tree" is a indirection (GT_IND, or GT_OBJ) whose arg is an ADDR, whose arg is a LCL_VAR, return that LCL_VAR
// node, else NULL.
static GenTreeLclVar* fgIsIndirOfAddrOfLocal(GenTree* tree);
// This map is indexed by GT_OBJ nodes that are address of promoted struct variables, which
// have been annotated with the GTF_VAR_DEATH flag. If such a node is *not* mapped in this
// table, one may assume that all the (tracked) field vars die at this GT_OBJ. Otherwise,
// the node maps to a pointer to a VARSET_TP, containing set bits for each of the tracked field
// vars of the promoted struct local that go dead at the given node (the set bits are the bits
// for the tracked var indices of the field vars, as in a live var set).
//
// The map is allocated on demand so all map operations should use one of the following three
// wrapper methods.
NodeToVarsetPtrMap* m_promotedStructDeathVars;
NodeToVarsetPtrMap* GetPromotedStructDeathVars()
{
if (m_promotedStructDeathVars == nullptr)
{
m_promotedStructDeathVars = new (getAllocator()) NodeToVarsetPtrMap(getAllocator());
}
return m_promotedStructDeathVars;
}
void ClearPromotedStructDeathVars()
{
if (m_promotedStructDeathVars != nullptr)
{
m_promotedStructDeathVars->RemoveAll();
}
}
bool LookupPromotedStructDeathVars(GenTree* tree, VARSET_TP** bits)
{
*bits = nullptr;
bool result = false;
if (m_promotedStructDeathVars != nullptr)
{
result = m_promotedStructDeathVars->Lookup(tree, bits);
}
return result;
}
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX UnwindInfo XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#if !defined(__GNUC__)
#pragma region Unwind information
#endif
public:
//
// Infrastructure functions: start/stop/reserve/emit.
//
void unwindBegProlog();
void unwindEndProlog();
void unwindBegEpilog();
void unwindEndEpilog();
void unwindReserve();
void unwindEmit(void* pHotCode, void* pColdCode);
//
// Specific unwind information functions: called by code generation to indicate a particular
// prolog or epilog unwindable instruction has been generated.
//
void unwindPush(regNumber reg);
void unwindAllocStack(unsigned size);
void unwindSetFrameReg(regNumber reg, unsigned offset);
void unwindSaveReg(regNumber reg, unsigned offset);
#if defined(TARGET_ARM)
void unwindPushMaskInt(regMaskTP mask);
void unwindPushMaskFloat(regMaskTP mask);
void unwindPopMaskInt(regMaskTP mask);
void unwindPopMaskFloat(regMaskTP mask);
void unwindBranch16(); // The epilog terminates with a 16-bit branch (e.g., "bx lr")
void unwindNop(unsigned codeSizeInBytes); // Generate unwind NOP code. 'codeSizeInBytes' is 2 or 4 bytes. Only
// called via unwindPadding().
void unwindPadding(); // Generate a sequence of unwind NOP codes representing instructions between the last
// instruction and the current location.
#endif // TARGET_ARM
#if defined(TARGET_ARM64)
void unwindNop();
void unwindPadding(); // Generate a sequence of unwind NOP codes representing instructions between the last
// instruction and the current location.
void unwindSaveReg(regNumber reg, int offset); // str reg, [sp, #offset]
void unwindSaveRegPreindexed(regNumber reg, int offset); // str reg, [sp, #offset]!
void unwindSaveRegPair(regNumber reg1, regNumber reg2, int offset); // stp reg1, reg2, [sp, #offset]
void unwindSaveRegPairPreindexed(regNumber reg1, regNumber reg2, int offset); // stp reg1, reg2, [sp, #offset]!
void unwindSaveNext(); // unwind code: save_next
void unwindReturn(regNumber reg); // ret lr
#endif // defined(TARGET_ARM64)
//
// Private "helper" functions for the unwind implementation.
//
private:
#if defined(FEATURE_EH_FUNCLETS)
void unwindGetFuncLocations(FuncInfoDsc* func,
bool getHotSectionData,
/* OUT */ emitLocation** ppStartLoc,
/* OUT */ emitLocation** ppEndLoc);
#endif // FEATURE_EH_FUNCLETS
void unwindReserveFunc(FuncInfoDsc* func);
void unwindEmitFunc(FuncInfoDsc* func, void* pHotCode, void* pColdCode);
#if defined(TARGET_AMD64) || (defined(TARGET_X86) && defined(FEATURE_EH_FUNCLETS))
void unwindReserveFuncHelper(FuncInfoDsc* func, bool isHotCode);
void unwindEmitFuncHelper(FuncInfoDsc* func, void* pHotCode, void* pColdCode, bool isHotCode);
#endif // TARGET_AMD64 || (TARGET_X86 && FEATURE_EH_FUNCLETS)
UNATIVE_OFFSET unwindGetCurrentOffset(FuncInfoDsc* func);
#if defined(TARGET_AMD64)
void unwindBegPrologWindows();
void unwindPushWindows(regNumber reg);
void unwindAllocStackWindows(unsigned size);
void unwindSetFrameRegWindows(regNumber reg, unsigned offset);
void unwindSaveRegWindows(regNumber reg, unsigned offset);
#ifdef UNIX_AMD64_ABI
void unwindSaveRegCFI(regNumber reg, unsigned offset);
#endif // UNIX_AMD64_ABI
#elif defined(TARGET_ARM)
void unwindPushPopMaskInt(regMaskTP mask, bool useOpsize16);
void unwindPushPopMaskFloat(regMaskTP mask);
#endif // TARGET_ARM
#if defined(FEATURE_CFI_SUPPORT)
short mapRegNumToDwarfReg(regNumber reg);
void createCfiCode(FuncInfoDsc* func, UNATIVE_OFFSET codeOffset, UCHAR opcode, short dwarfReg, INT offset = 0);
void unwindPushPopCFI(regNumber reg);
void unwindBegPrologCFI();
void unwindPushPopMaskCFI(regMaskTP regMask, bool isFloat);
void unwindAllocStackCFI(unsigned size);
void unwindSetFrameRegCFI(regNumber reg, unsigned offset);
void unwindEmitFuncCFI(FuncInfoDsc* func, void* pHotCode, void* pColdCode);
#ifdef DEBUG
void DumpCfiInfo(bool isHotCode,
UNATIVE_OFFSET startOffset,
UNATIVE_OFFSET endOffset,
DWORD cfiCodeBytes,
const CFI_CODE* const pCfiCode);
#endif
#endif // FEATURE_CFI_SUPPORT
#if !defined(__GNUC__)
#pragma endregion // Note: region is NOT under !defined(__GNUC__)
#endif
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX SIMD XX
XX XX
XX Info about SIMD types, methods and the SIMD assembly (i.e. the assembly XX
XX that contains the distinguished, well-known SIMD type definitions). XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
bool IsBaselineSimdIsaSupported()
{
#ifdef FEATURE_SIMD
#if defined(TARGET_XARCH)
CORINFO_InstructionSet minimumIsa = InstructionSet_SSE2;
#elif defined(TARGET_ARM64)
CORINFO_InstructionSet minimumIsa = InstructionSet_AdvSimd;
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
return compOpportunisticallyDependsOn(minimumIsa);
#else
return false;
#endif
}
#if defined(DEBUG)
bool IsBaselineSimdIsaSupportedDebugOnly()
{
#ifdef FEATURE_SIMD
#if defined(TARGET_XARCH)
CORINFO_InstructionSet minimumIsa = InstructionSet_SSE2;
#elif defined(TARGET_ARM64)
CORINFO_InstructionSet minimumIsa = InstructionSet_AdvSimd;
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
return compIsaSupportedDebugOnly(minimumIsa);
#else
return false;
#endif // FEATURE_SIMD
}
#endif // DEBUG
// Get highest available level for SIMD codegen
SIMDLevel getSIMDSupportLevel()
{
#if defined(TARGET_XARCH)
if (compOpportunisticallyDependsOn(InstructionSet_AVX2))
{
return SIMD_AVX2_Supported;
}
if (compOpportunisticallyDependsOn(InstructionSet_SSE42))
{
return SIMD_SSE4_Supported;
}
// min bar is SSE2
return SIMD_SSE2_Supported;
#else
assert(!"Available instruction set(s) for SIMD codegen is not defined for target arch");
unreached();
return SIMD_Not_Supported;
#endif
}
bool isIntrinsicType(CORINFO_CLASS_HANDLE clsHnd)
{
return info.compCompHnd->isIntrinsicType(clsHnd);
}
const char* getClassNameFromMetadata(CORINFO_CLASS_HANDLE cls, const char** namespaceName)
{
return info.compCompHnd->getClassNameFromMetadata(cls, namespaceName);
}
CORINFO_CLASS_HANDLE getTypeInstantiationArgument(CORINFO_CLASS_HANDLE cls, unsigned index)
{
return info.compCompHnd->getTypeInstantiationArgument(cls, index);
}
#ifdef FEATURE_SIMD
// Have we identified any SIMD types?
// This is currently used by struct promotion to avoid getting type information for a struct
// field to see if it is a SIMD type, if we haven't seen any SIMD types or operations in
// the method.
bool _usesSIMDTypes;
bool usesSIMDTypes()
{
return _usesSIMDTypes;
}
void setUsesSIMDTypes(bool value)
{
_usesSIMDTypes = value;
}
// This is a temp lclVar allocated on the stack as TYP_SIMD. It is used to implement intrinsics
// that require indexed access to the individual fields of the vector, which is not well supported
// by the hardware. It is allocated when/if such situations are encountered during Lowering.
unsigned lvaSIMDInitTempVarNum;
struct SIMDHandlesCache
{
// SIMD Types
CORINFO_CLASS_HANDLE SIMDFloatHandle;
CORINFO_CLASS_HANDLE SIMDDoubleHandle;
CORINFO_CLASS_HANDLE SIMDIntHandle;
CORINFO_CLASS_HANDLE SIMDUShortHandle;
CORINFO_CLASS_HANDLE SIMDUByteHandle;
CORINFO_CLASS_HANDLE SIMDShortHandle;
CORINFO_CLASS_HANDLE SIMDByteHandle;
CORINFO_CLASS_HANDLE SIMDLongHandle;
CORINFO_CLASS_HANDLE SIMDUIntHandle;
CORINFO_CLASS_HANDLE SIMDULongHandle;
CORINFO_CLASS_HANDLE SIMDNIntHandle;
CORINFO_CLASS_HANDLE SIMDNUIntHandle;
CORINFO_CLASS_HANDLE SIMDVector2Handle;
CORINFO_CLASS_HANDLE SIMDVector3Handle;
CORINFO_CLASS_HANDLE SIMDVector4Handle;
CORINFO_CLASS_HANDLE SIMDVectorHandle;
#ifdef FEATURE_HW_INTRINSICS
#if defined(TARGET_ARM64)
CORINFO_CLASS_HANDLE Vector64FloatHandle;
CORINFO_CLASS_HANDLE Vector64DoubleHandle;
CORINFO_CLASS_HANDLE Vector64IntHandle;
CORINFO_CLASS_HANDLE Vector64UShortHandle;
CORINFO_CLASS_HANDLE Vector64UByteHandle;
CORINFO_CLASS_HANDLE Vector64ShortHandle;
CORINFO_CLASS_HANDLE Vector64ByteHandle;
CORINFO_CLASS_HANDLE Vector64LongHandle;
CORINFO_CLASS_HANDLE Vector64UIntHandle;
CORINFO_CLASS_HANDLE Vector64ULongHandle;
CORINFO_CLASS_HANDLE Vector64NIntHandle;
CORINFO_CLASS_HANDLE Vector64NUIntHandle;
#endif // defined(TARGET_ARM64)
CORINFO_CLASS_HANDLE Vector128FloatHandle;
CORINFO_CLASS_HANDLE Vector128DoubleHandle;
CORINFO_CLASS_HANDLE Vector128IntHandle;
CORINFO_CLASS_HANDLE Vector128UShortHandle;
CORINFO_CLASS_HANDLE Vector128UByteHandle;
CORINFO_CLASS_HANDLE Vector128ShortHandle;
CORINFO_CLASS_HANDLE Vector128ByteHandle;
CORINFO_CLASS_HANDLE Vector128LongHandle;
CORINFO_CLASS_HANDLE Vector128UIntHandle;
CORINFO_CLASS_HANDLE Vector128ULongHandle;
CORINFO_CLASS_HANDLE Vector128NIntHandle;
CORINFO_CLASS_HANDLE Vector128NUIntHandle;
#if defined(TARGET_XARCH)
CORINFO_CLASS_HANDLE Vector256FloatHandle;
CORINFO_CLASS_HANDLE Vector256DoubleHandle;
CORINFO_CLASS_HANDLE Vector256IntHandle;
CORINFO_CLASS_HANDLE Vector256UShortHandle;
CORINFO_CLASS_HANDLE Vector256UByteHandle;
CORINFO_CLASS_HANDLE Vector256ShortHandle;
CORINFO_CLASS_HANDLE Vector256ByteHandle;
CORINFO_CLASS_HANDLE Vector256LongHandle;
CORINFO_CLASS_HANDLE Vector256UIntHandle;
CORINFO_CLASS_HANDLE Vector256ULongHandle;
CORINFO_CLASS_HANDLE Vector256NIntHandle;
CORINFO_CLASS_HANDLE Vector256NUIntHandle;
#endif // defined(TARGET_XARCH)
#endif // FEATURE_HW_INTRINSICS
SIMDHandlesCache()
{
memset(this, 0, sizeof(*this));
}
};
SIMDHandlesCache* m_simdHandleCache;
// Get an appropriate "zero" for the given type and class handle.
GenTree* gtGetSIMDZero(var_types simdType, CorInfoType simdBaseJitType, CORINFO_CLASS_HANDLE simdHandle);
// Get the handle for a SIMD type.
CORINFO_CLASS_HANDLE gtGetStructHandleForSIMD(var_types simdType, CorInfoType simdBaseJitType)
{
if (m_simdHandleCache == nullptr)
{
// This may happen if the JIT generates SIMD node on its own, without importing them.
// Otherwise getBaseJitTypeAndSizeOfSIMDType should have created the cache.
return NO_CLASS_HANDLE;
}
if (simdBaseJitType == CORINFO_TYPE_FLOAT)
{
switch (simdType)
{
case TYP_SIMD8:
return m_simdHandleCache->SIMDVector2Handle;
case TYP_SIMD12:
return m_simdHandleCache->SIMDVector3Handle;
case TYP_SIMD16:
if ((getSIMDVectorType() == TYP_SIMD32) ||
(m_simdHandleCache->SIMDVector4Handle != NO_CLASS_HANDLE))
{
return m_simdHandleCache->SIMDVector4Handle;
}
break;
case TYP_SIMD32:
break;
default:
unreached();
}
}
assert(emitTypeSize(simdType) <= largestEnregisterableStructSize());
switch (simdBaseJitType)
{
case CORINFO_TYPE_FLOAT:
return m_simdHandleCache->SIMDFloatHandle;
case CORINFO_TYPE_DOUBLE:
return m_simdHandleCache->SIMDDoubleHandle;
case CORINFO_TYPE_INT:
return m_simdHandleCache->SIMDIntHandle;
case CORINFO_TYPE_USHORT:
return m_simdHandleCache->SIMDUShortHandle;
case CORINFO_TYPE_UBYTE:
return m_simdHandleCache->SIMDUByteHandle;
case CORINFO_TYPE_SHORT:
return m_simdHandleCache->SIMDShortHandle;
case CORINFO_TYPE_BYTE:
return m_simdHandleCache->SIMDByteHandle;
case CORINFO_TYPE_LONG:
return m_simdHandleCache->SIMDLongHandle;
case CORINFO_TYPE_UINT:
return m_simdHandleCache->SIMDUIntHandle;
case CORINFO_TYPE_ULONG:
return m_simdHandleCache->SIMDULongHandle;
case CORINFO_TYPE_NATIVEINT:
return m_simdHandleCache->SIMDNIntHandle;
case CORINFO_TYPE_NATIVEUINT:
return m_simdHandleCache->SIMDNUIntHandle;
default:
assert(!"Didn't find a class handle for simdType");
}
return NO_CLASS_HANDLE;
}
// Returns true if this is a SIMD type that should be considered an opaque
// vector type (i.e. do not analyze or promote its fields).
// Note that all but the fixed vector types are opaque, even though they may
// actually be declared as having fields.
bool isOpaqueSIMDType(CORINFO_CLASS_HANDLE structHandle) const
{
return ((m_simdHandleCache != nullptr) && (structHandle != m_simdHandleCache->SIMDVector2Handle) &&
(structHandle != m_simdHandleCache->SIMDVector3Handle) &&
(structHandle != m_simdHandleCache->SIMDVector4Handle));
}
// Returns true if the tree corresponds to a TYP_SIMD lcl var.
// Note that both SIMD vector args and locals are mared as lvSIMDType = true, but
// type of an arg node is TYP_BYREF and a local node is TYP_SIMD or TYP_STRUCT.
bool isSIMDTypeLocal(GenTree* tree)
{
return tree->OperIsLocal() && lvaGetDesc(tree->AsLclVarCommon())->lvSIMDType;
}
// Returns true if the lclVar is an opaque SIMD type.
bool isOpaqueSIMDLclVar(const LclVarDsc* varDsc) const
{
if (!varDsc->lvSIMDType)
{
return false;
}
return isOpaqueSIMDType(varDsc->GetStructHnd());
}
static bool isRelOpSIMDIntrinsic(SIMDIntrinsicID intrinsicId)
{
return (intrinsicId == SIMDIntrinsicEqual);
}
// Returns base JIT type of a TYP_SIMD local.
// Returns CORINFO_TYPE_UNDEF if the local is not TYP_SIMD.
CorInfoType getBaseJitTypeOfSIMDLocal(GenTree* tree)
{
if (isSIMDTypeLocal(tree))
{
return lvaGetDesc(tree->AsLclVarCommon())->GetSimdBaseJitType();
}
return CORINFO_TYPE_UNDEF;
}
bool isSIMDClass(CORINFO_CLASS_HANDLE clsHnd)
{
if (isIntrinsicType(clsHnd))
{
const char* namespaceName = nullptr;
(void)getClassNameFromMetadata(clsHnd, &namespaceName);
return strcmp(namespaceName, "System.Numerics") == 0;
}
return false;
}
bool isSIMDClass(typeInfo* pTypeInfo)
{
return pTypeInfo->IsStruct() && isSIMDClass(pTypeInfo->GetClassHandleForValueClass());
}
bool isHWSIMDClass(CORINFO_CLASS_HANDLE clsHnd)
{
#ifdef FEATURE_HW_INTRINSICS
if (isIntrinsicType(clsHnd))
{
const char* namespaceName = nullptr;
(void)getClassNameFromMetadata(clsHnd, &namespaceName);
return strcmp(namespaceName, "System.Runtime.Intrinsics") == 0;
}
#endif // FEATURE_HW_INTRINSICS
return false;
}
bool isHWSIMDClass(typeInfo* pTypeInfo)
{
#ifdef FEATURE_HW_INTRINSICS
return pTypeInfo->IsStruct() && isHWSIMDClass(pTypeInfo->GetClassHandleForValueClass());
#else
return false;
#endif
}
bool isSIMDorHWSIMDClass(CORINFO_CLASS_HANDLE clsHnd)
{
return isSIMDClass(clsHnd) || isHWSIMDClass(clsHnd);
}
bool isSIMDorHWSIMDClass(typeInfo* pTypeInfo)
{
return isSIMDClass(pTypeInfo) || isHWSIMDClass(pTypeInfo);
}
// Get the base (element) type and size in bytes for a SIMD type. Returns CORINFO_TYPE_UNDEF
// if it is not a SIMD type or is an unsupported base JIT type.
CorInfoType getBaseJitTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd, unsigned* sizeBytes = nullptr);
CorInfoType getBaseJitTypeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd)
{
return getBaseJitTypeAndSizeOfSIMDType(typeHnd, nullptr);
}
// Get SIMD Intrinsic info given the method handle.
// Also sets typeHnd, argCount, baseType and sizeBytes out params.
const SIMDIntrinsicInfo* getSIMDIntrinsicInfo(CORINFO_CLASS_HANDLE* typeHnd,
CORINFO_METHOD_HANDLE methodHnd,
CORINFO_SIG_INFO* sig,
bool isNewObj,
unsigned* argCount,
CorInfoType* simdBaseJitType,
unsigned* sizeBytes);
// Pops and returns GenTree node from importers type stack.
// Normalizes TYP_STRUCT value in case of GT_CALL, GT_RET_EXPR and arg nodes.
GenTree* impSIMDPopStack(var_types type, bool expectAddr = false, CORINFO_CLASS_HANDLE structType = nullptr);
// Transforms operands and returns the SIMD intrinsic to be applied on
// transformed operands to obtain given relop result.
SIMDIntrinsicID impSIMDRelOp(SIMDIntrinsicID relOpIntrinsicId,
CORINFO_CLASS_HANDLE typeHnd,
unsigned simdVectorSize,
CorInfoType* inOutBaseJitType,
GenTree** op1,
GenTree** op2);
#if defined(TARGET_XARCH)
// Transforms operands and returns the SIMD intrinsic to be applied on
// transformed operands to obtain == comparison result.
SIMDIntrinsicID impSIMDLongRelOpEqual(CORINFO_CLASS_HANDLE typeHnd,
unsigned simdVectorSize,
GenTree** op1,
GenTree** op2);
#endif // defined(TARGET_XARCH)
void setLclRelatedToSIMDIntrinsic(GenTree* tree);
bool areFieldsContiguous(GenTree* op1, GenTree* op2);
bool areLocalFieldsContiguous(GenTreeLclFld* first, GenTreeLclFld* second);
bool areArrayElementsContiguous(GenTree* op1, GenTree* op2);
bool areArgumentsContiguous(GenTree* op1, GenTree* op2);
GenTree* createAddressNodeForSIMDInit(GenTree* tree, unsigned simdSize);
// check methodHnd to see if it is a SIMD method that is expanded as an intrinsic in the JIT.
GenTree* impSIMDIntrinsic(OPCODE opcode,
GenTree* newobjThis,
CORINFO_CLASS_HANDLE clsHnd,
CORINFO_METHOD_HANDLE method,
CORINFO_SIG_INFO* sig,
unsigned methodFlags,
int memberRef);
GenTree* getOp1ForConstructor(OPCODE opcode, GenTree* newobjThis, CORINFO_CLASS_HANDLE clsHnd);
// Whether SIMD vector occupies part of SIMD register.
// SSE2: vector2f/3f are considered sub register SIMD types.
// AVX: vector2f, 3f and 4f are all considered sub register SIMD types.
bool isSubRegisterSIMDType(GenTreeSIMD* simdNode)
{
unsigned vectorRegisterByteLength;
#if defined(TARGET_XARCH)
// Calling the getSIMDVectorRegisterByteLength api causes the size of Vector<T> to be recorded
// with the AOT compiler, so that it cannot change from aot compilation time to runtime
// This api does not require such fixing as it merely pertains to the size of the simd type
// relative to the Vector<T> size as used at compile time. (So detecting a vector length of 16 here
// does not preclude the code from being used on a machine with a larger vector length.)
if (getSIMDSupportLevel() < SIMD_AVX2_Supported)
{
vectorRegisterByteLength = 16;
}
else
{
vectorRegisterByteLength = 32;
}
#else
vectorRegisterByteLength = getSIMDVectorRegisterByteLength();
#endif
return (simdNode->GetSimdSize() < vectorRegisterByteLength);
}
// Get the type for the hardware SIMD vector.
// This is the maximum SIMD type supported for this target.
var_types getSIMDVectorType()
{
#if defined(TARGET_XARCH)
if (getSIMDSupportLevel() == SIMD_AVX2_Supported)
{
return TYP_SIMD32;
}
else
{
// Verify and record that AVX2 isn't supported
compVerifyInstructionSetUnusable(InstructionSet_AVX2);
assert(getSIMDSupportLevel() >= SIMD_SSE2_Supported);
return TYP_SIMD16;
}
#elif defined(TARGET_ARM64)
return TYP_SIMD16;
#else
assert(!"getSIMDVectorType() unimplemented on target arch");
unreached();
#endif
}
// Get the size of the SIMD type in bytes
int getSIMDTypeSizeInBytes(CORINFO_CLASS_HANDLE typeHnd)
{
unsigned sizeBytes = 0;
(void)getBaseJitTypeAndSizeOfSIMDType(typeHnd, &sizeBytes);
return sizeBytes;
}
// Get the the number of elements of baseType of SIMD vector given by its size and baseType
static int getSIMDVectorLength(unsigned simdSize, var_types baseType);
// Get the the number of elements of baseType of SIMD vector given by its type handle
int getSIMDVectorLength(CORINFO_CLASS_HANDLE typeHnd);
// Get preferred alignment of SIMD type.
int getSIMDTypeAlignment(var_types simdType);
// Get the number of bytes in a System.Numeric.Vector<T> for the current compilation.
// Note - cannot be used for System.Runtime.Intrinsic
unsigned getSIMDVectorRegisterByteLength()
{
#if defined(TARGET_XARCH)
if (getSIMDSupportLevel() == SIMD_AVX2_Supported)
{
return YMM_REGSIZE_BYTES;
}
else
{
// Verify and record that AVX2 isn't supported
compVerifyInstructionSetUnusable(InstructionSet_AVX2);
assert(getSIMDSupportLevel() >= SIMD_SSE2_Supported);
return XMM_REGSIZE_BYTES;
}
#elif defined(TARGET_ARM64)
return FP_REGSIZE_BYTES;
#else
assert(!"getSIMDVectorRegisterByteLength() unimplemented on target arch");
unreached();
#endif
}
// The minimum and maximum possible number of bytes in a SIMD vector.
// maxSIMDStructBytes
// The minimum SIMD size supported by System.Numeric.Vectors or System.Runtime.Intrinsic
// SSE: 16-byte Vector<T> and Vector128<T>
// AVX: 32-byte Vector256<T> (Vector<T> is 16-byte)
// AVX2: 32-byte Vector<T> and Vector256<T>
unsigned int maxSIMDStructBytes()
{
#if defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH)
if (compOpportunisticallyDependsOn(InstructionSet_AVX))
{
return YMM_REGSIZE_BYTES;
}
else
{
// Verify and record that AVX2 isn't supported
compVerifyInstructionSetUnusable(InstructionSet_AVX2);
assert(getSIMDSupportLevel() >= SIMD_SSE2_Supported);
return XMM_REGSIZE_BYTES;
}
#else
return getSIMDVectorRegisterByteLength();
#endif
}
unsigned int minSIMDStructBytes()
{
return emitTypeSize(TYP_SIMD8);
}
public:
// Returns the codegen type for a given SIMD size.
static var_types getSIMDTypeForSize(unsigned size)
{
var_types simdType = TYP_UNDEF;
if (size == 8)
{
simdType = TYP_SIMD8;
}
else if (size == 12)
{
simdType = TYP_SIMD12;
}
else if (size == 16)
{
simdType = TYP_SIMD16;
}
else if (size == 32)
{
simdType = TYP_SIMD32;
}
else
{
noway_assert(!"Unexpected size for SIMD type");
}
return simdType;
}
private:
unsigned getSIMDInitTempVarNum(var_types simdType);
#else // !FEATURE_SIMD
bool isOpaqueSIMDLclVar(LclVarDsc* varDsc)
{
return false;
}
#endif // FEATURE_SIMD
public:
//------------------------------------------------------------------------
// largestEnregisterableStruct: The size in bytes of the largest struct that can be enregistered.
//
// Notes: It is not guaranteed that the struct of this size or smaller WILL be a
// candidate for enregistration.
unsigned largestEnregisterableStructSize()
{
#ifdef FEATURE_SIMD
#if defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH)
if (opts.IsReadyToRun())
{
// Return constant instead of maxSIMDStructBytes, as maxSIMDStructBytes performs
// checks that are effected by the current level of instruction set support would
// otherwise cause the highest level of instruction set support to be reported to crossgen2.
// and this api is only ever used as an optimization or assert, so no reporting should
// ever happen.
return YMM_REGSIZE_BYTES;
}
#endif // defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH)
unsigned vectorRegSize = maxSIMDStructBytes();
assert(vectorRegSize >= TARGET_POINTER_SIZE);
return vectorRegSize;
#else // !FEATURE_SIMD
return TARGET_POINTER_SIZE;
#endif // !FEATURE_SIMD
}
// Use to determine if a struct *might* be a SIMD type. As this function only takes a size, many
// structs will fit the criteria.
bool structSizeMightRepresentSIMDType(size_t structSize)
{
#ifdef FEATURE_SIMD
// Do not use maxSIMDStructBytes as that api in R2R on X86 and X64 may notify the JIT
// about the size of a struct under the assumption that the struct size needs to be recorded.
// By using largestEnregisterableStructSize here, the detail of whether or not Vector256<T> is
// enregistered or not will not be messaged to the R2R compiler.
return (structSize >= minSIMDStructBytes()) && (structSize <= largestEnregisterableStructSize());
#else
return false;
#endif // FEATURE_SIMD
}
#ifdef FEATURE_SIMD
static bool vnEncodesResultTypeForSIMDIntrinsic(SIMDIntrinsicID intrinsicId);
#endif // !FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
static bool vnEncodesResultTypeForHWIntrinsic(NamedIntrinsic hwIntrinsicID);
#endif // FEATURE_HW_INTRINSICS
private:
// These routines need not be enclosed under FEATURE_SIMD since lvIsSIMDType()
// is defined for both FEATURE_SIMD and !FEATURE_SIMD apropriately. The use
// of this routines also avoids the need of #ifdef FEATURE_SIMD specific code.
// Is this var is of type simd struct?
bool lclVarIsSIMDType(unsigned varNum)
{
return lvaGetDesc(varNum)->lvIsSIMDType();
}
// Is this Local node a SIMD local?
bool lclVarIsSIMDType(GenTreeLclVarCommon* lclVarTree)
{
return lclVarIsSIMDType(lclVarTree->GetLclNum());
}
// Returns true if the TYP_SIMD locals on stack are aligned at their
// preferred byte boundary specified by getSIMDTypeAlignment().
//
// As per the Intel manual, the preferred alignment for AVX vectors is
// 32-bytes. It is not clear whether additional stack space used in
// aligning stack is worth the benefit and for now will use 16-byte
// alignment for AVX 256-bit vectors with unaligned load/stores to/from
// memory. On x86, the stack frame is aligned to 4 bytes. We need to extend
// existing support for double (8-byte) alignment to 16 or 32 byte
// alignment for frames with local SIMD vars, if that is determined to be
// profitable.
//
// On Amd64 and SysV, RSP+8 is aligned on entry to the function (before
// prolog has run). This means that in RBP-based frames RBP will be 16-byte
// aligned. For RSP-based frames these are only sometimes aligned, depending
// on the frame size.
//
bool isSIMDTypeLocalAligned(unsigned varNum)
{
#if defined(FEATURE_SIMD) && ALIGN_SIMD_TYPES
if (lclVarIsSIMDType(varNum) && lvaTable[varNum].lvType != TYP_BYREF)
{
// TODO-Cleanup: Can't this use the lvExactSize on the varDsc?
int alignment = getSIMDTypeAlignment(lvaTable[varNum].lvType);
if (alignment <= STACK_ALIGN)
{
bool rbpBased;
int off = lvaFrameAddress(varNum, &rbpBased);
// On SysV and Winx64 ABIs RSP+8 will be 16-byte aligned at the
// first instruction of a function. If our frame is RBP based
// then RBP will always be 16 bytes aligned, so we can simply
// check the offset.
if (rbpBased)
{
return (off % alignment) == 0;
}
// For RSP-based frame the alignment of RSP depends on our
// locals. rsp+8 is aligned on entry and we just subtract frame
// size so it is not hard to compute. Note that the compiler
// tries hard to make sure the frame size means RSP will be
// 16-byte aligned, but for leaf functions without locals (i.e.
// frameSize = 0) it will not be.
int frameSize = codeGen->genTotalFrameSize();
return ((8 - frameSize + off) % alignment) == 0;
}
}
#endif // FEATURE_SIMD
return false;
}
#ifdef DEBUG
// Answer the question: Is a particular ISA supported?
// Use this api when asking the question so that future
// ISA questions can be asked correctly or when asserting
// support/nonsupport for an instruction set
bool compIsaSupportedDebugOnly(CORINFO_InstructionSet isa) const
{
#if defined(TARGET_XARCH) || defined(TARGET_ARM64)
return (opts.compSupportsISA & (1ULL << isa)) != 0;
#else
return false;
#endif
}
#endif // DEBUG
bool notifyInstructionSetUsage(CORINFO_InstructionSet isa, bool supported) const;
// Answer the question: Is a particular ISA allowed to be used implicitly by optimizations?
// The result of this api call will exactly match the target machine
// on which the function is executed (except for CoreLib, where there are special rules)
bool compExactlyDependsOn(CORINFO_InstructionSet isa) const
{
#if defined(TARGET_XARCH) || defined(TARGET_ARM64)
uint64_t isaBit = (1ULL << isa);
if ((opts.compSupportsISAReported & isaBit) == 0)
{
if (notifyInstructionSetUsage(isa, (opts.compSupportsISA & isaBit) != 0))
((Compiler*)this)->opts.compSupportsISAExactly |= isaBit;
((Compiler*)this)->opts.compSupportsISAReported |= isaBit;
}
return (opts.compSupportsISAExactly & isaBit) != 0;
#else
return false;
#endif
}
// Ensure that code will not execute if an instruction set is usable. Call only
// if the instruction set has previously reported as unusable, but when
// that that status has not yet been recorded to the AOT compiler
void compVerifyInstructionSetUnusable(CORINFO_InstructionSet isa)
{
// use compExactlyDependsOn to capture are record the use of the isa
bool isaUsable = compExactlyDependsOn(isa);
// Assert that the is unusable. If true, this function should never be called.
assert(!isaUsable);
}
// Answer the question: Is a particular ISA allowed to be used implicitly by optimizations?
// The result of this api call will match the target machine if the result is true
// If the result is false, then the target machine may have support for the instruction
bool compOpportunisticallyDependsOn(CORINFO_InstructionSet isa) const
{
if ((opts.compSupportsISA & (1ULL << isa)) != 0)
{
return compExactlyDependsOn(isa);
}
else
{
return false;
}
}
// Answer the question: Is a particular ISA supported for explicit hardware intrinsics?
bool compHWIntrinsicDependsOn(CORINFO_InstructionSet isa) const
{
// Report intent to use the ISA to the EE
compExactlyDependsOn(isa);
return ((opts.compSupportsISA & (1ULL << isa)) != 0);
}
bool canUseVexEncoding() const
{
#ifdef TARGET_XARCH
return compOpportunisticallyDependsOn(InstructionSet_AVX);
#else
return false;
#endif
}
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Compiler XX
XX XX
XX Generic info about the compilation and the method being compiled. XX
XX It is responsible for driving the other phases. XX
XX It is also responsible for all the memory management. XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
public:
Compiler* InlineeCompiler; // The Compiler instance for the inlinee
InlineResult* compInlineResult; // The result of importing the inlinee method.
bool compDoAggressiveInlining; // If true, mark every method as CORINFO_FLG_FORCEINLINE
bool compJmpOpUsed; // Does the method do a JMP
bool compLongUsed; // Does the method use TYP_LONG
bool compFloatingPointUsed; // Does the method use TYP_FLOAT or TYP_DOUBLE
bool compTailCallUsed; // Does the method do a tailcall
bool compTailPrefixSeen; // Does the method IL have tail. prefix
bool compLocallocSeen; // Does the method IL have localloc opcode
bool compLocallocUsed; // Does the method use localloc.
bool compLocallocOptimized; // Does the method have an optimized localloc
bool compQmarkUsed; // Does the method use GT_QMARK/GT_COLON
bool compQmarkRationalized; // Is it allowed to use a GT_QMARK/GT_COLON node.
bool compHasBackwardJump; // Does the method (or some inlinee) have a lexically backwards jump?
bool compHasBackwardJumpInHandler; // Does the method have a lexically backwards jump in a handler?
bool compSwitchedToOptimized; // Codegen initially was Tier0 but jit switched to FullOpts
bool compSwitchedToMinOpts; // Codegen initially was Tier1/FullOpts but jit switched to MinOpts
bool compSuppressedZeroInit; // There are vars with lvSuppressedZeroInit set
// NOTE: These values are only reliable after
// the importing is completely finished.
#ifdef DEBUG
// State information - which phases have completed?
// These are kept together for easy discoverability
bool bRangeAllowStress;
bool compCodeGenDone;
int64_t compNumStatementLinksTraversed; // # of links traversed while doing debug checks
bool fgNormalizeEHDone; // Has the flowgraph EH normalization phase been done?
size_t compSizeEstimate; // The estimated size of the method as per `gtSetEvalOrder`.
size_t compCycleEstimate; // The estimated cycle count of the method as per `gtSetEvalOrder`
#endif // DEBUG
bool fgLocalVarLivenessDone; // Note that this one is used outside of debug.
bool fgLocalVarLivenessChanged;
bool compLSRADone;
bool compRationalIRForm;
bool compUsesThrowHelper; // There is a call to a THROW_HELPER for the compiled method.
bool compGeneratingProlog;
bool compGeneratingEpilog;
bool compNeedsGSSecurityCookie; // There is an unsafe buffer (or localloc) on the stack.
// Insert cookie on frame and code to check the cookie, like VC++ -GS.
bool compGSReorderStackLayout; // There is an unsafe buffer on the stack, reorder locals and make local
// copies of susceptible parameters to avoid buffer overrun attacks through locals/params
bool getNeedsGSSecurityCookie() const
{
return compNeedsGSSecurityCookie;
}
void setNeedsGSSecurityCookie()
{
compNeedsGSSecurityCookie = true;
}
FrameLayoutState lvaDoneFrameLayout; // The highest frame layout state that we've completed. During
// frame layout calculations, this is the level we are currently
// computing.
//---------------------------- JITing options -----------------------------
enum codeOptimize
{
BLENDED_CODE,
SMALL_CODE,
FAST_CODE,
COUNT_OPT_CODE
};
struct Options
{
JitFlags* jitFlags; // all flags passed from the EE
// The instruction sets that the compiler is allowed to emit.
uint64_t compSupportsISA;
// The instruction sets that were reported to the VM as being used by the current method. Subset of
// compSupportsISA.
uint64_t compSupportsISAReported;
// The instruction sets that the compiler is allowed to take advantage of implicitly during optimizations.
// Subset of compSupportsISA.
// The instruction sets available in compSupportsISA and not available in compSupportsISAExactly can be only
// used via explicit hardware intrinsics.
uint64_t compSupportsISAExactly;
void setSupportedISAs(CORINFO_InstructionSetFlags isas)
{
compSupportsISA = isas.GetFlagsRaw();
}
unsigned compFlags; // method attributes
unsigned instrCount;
unsigned lvRefCount;
codeOptimize compCodeOpt; // what type of code optimizations
bool compUseCMOV;
// optimize maximally and/or favor speed over size?
#define DEFAULT_MIN_OPTS_CODE_SIZE 60000
#define DEFAULT_MIN_OPTS_INSTR_COUNT 20000
#define DEFAULT_MIN_OPTS_BB_COUNT 2000
#define DEFAULT_MIN_OPTS_LV_NUM_COUNT 2000
#define DEFAULT_MIN_OPTS_LV_REF_COUNT 8000
// Maximun number of locals before turning off the inlining
#define MAX_LV_NUM_COUNT_FOR_INLINING 512
bool compMinOpts;
bool compMinOptsIsSet;
#ifdef DEBUG
mutable bool compMinOptsIsUsed;
bool MinOpts() const
{
assert(compMinOptsIsSet);
compMinOptsIsUsed = true;
return compMinOpts;
}
bool IsMinOptsSet() const
{
return compMinOptsIsSet;
}
#else // !DEBUG
bool MinOpts() const
{
return compMinOpts;
}
bool IsMinOptsSet() const
{
return compMinOptsIsSet;
}
#endif // !DEBUG
bool OptimizationDisabled() const
{
return MinOpts() || compDbgCode;
}
bool OptimizationEnabled() const
{
return !OptimizationDisabled();
}
void SetMinOpts(bool val)
{
assert(!compMinOptsIsUsed);
assert(!compMinOptsIsSet || (compMinOpts == val));
compMinOpts = val;
compMinOptsIsSet = true;
}
// true if the CLFLG_* for an optimization is set.
bool OptEnabled(unsigned optFlag) const
{
return !!(compFlags & optFlag);
}
#ifdef FEATURE_READYTORUN
bool IsReadyToRun() const
{
return jitFlags->IsSet(JitFlags::JIT_FLAG_READYTORUN);
}
#else
bool IsReadyToRun() const
{
return false;
}
#endif
// Check if the compilation is control-flow guard enabled.
bool IsCFGEnabled() const
{
#if defined(TARGET_ARM64) || defined(TARGET_AMD64)
// On these platforms we assume the register that the target is
// passed in is preserved by the validator and take care to get the
// target from the register for the call (even in debug mode).
static_assert_no_msg((RBM_VALIDATE_INDIRECT_CALL_TRASH & (1 << REG_VALIDATE_INDIRECT_CALL_ADDR)) == 0);
if (JitConfig.JitForceControlFlowGuard())
return true;
return jitFlags->IsSet(JitFlags::JIT_FLAG_ENABLE_CFG);
#else
// The remaining platforms are not supported and would require some
// work to support.
//
// ARM32:
// The ARM32 validator does not preserve any volatile registers
// which means we have to take special care to allocate and use a
// callee-saved register (reloading the target from memory is a
// security issue).
//
// x86:
// On x86 some VSD calls disassemble the call site and expect an
// indirect call which is fundamentally incompatible with CFG.
// This would require a different way to pass this information
// through.
//
return false;
#endif
}
#ifdef FEATURE_ON_STACK_REPLACEMENT
bool IsOSR() const
{
return jitFlags->IsSet(JitFlags::JIT_FLAG_OSR);
}
#else
bool IsOSR() const
{
return false;
}
#endif
// true if we should use the PINVOKE_{BEGIN,END} helpers instead of generating
// PInvoke transitions inline. Normally used by R2R, but also used when generating a reverse pinvoke frame, as
// the current logic for frame setup initializes and pushes
// the InlinedCallFrame before performing the Reverse PInvoke transition, which is invalid (as frames cannot
// safely be pushed/popped while the thread is in a preemptive state.).
bool ShouldUsePInvokeHelpers()
{
return jitFlags->IsSet(JitFlags::JIT_FLAG_USE_PINVOKE_HELPERS) ||
jitFlags->IsSet(JitFlags::JIT_FLAG_REVERSE_PINVOKE);
}
// true if we should use insert the REVERSE_PINVOKE_{ENTER,EXIT} helpers in the method
// prolog/epilog
bool IsReversePInvoke()
{
return jitFlags->IsSet(JitFlags::JIT_FLAG_REVERSE_PINVOKE);
}
bool compScopeInfo; // Generate the LocalVar info ?
bool compDbgCode; // Generate debugger-friendly code?
bool compDbgInfo; // Gather debugging info?
bool compDbgEnC;
#ifdef PROFILING_SUPPORTED
bool compNoPInvokeInlineCB;
#else
static const bool compNoPInvokeInlineCB;
#endif
#ifdef DEBUG
bool compGcChecks; // Check arguments and return values to ensure they are sane
#endif
#if defined(DEBUG) && defined(TARGET_XARCH)
bool compStackCheckOnRet; // Check stack pointer on return to ensure it is correct.
#endif // defined(DEBUG) && defined(TARGET_XARCH)
#if defined(DEBUG) && defined(TARGET_X86)
bool compStackCheckOnCall; // Check stack pointer after call to ensure it is correct. Only for x86.
#endif // defined(DEBUG) && defined(TARGET_X86)
bool compReloc; // Generate relocs for pointers in code, true for all ngen/prejit codegen
#ifdef DEBUG
#if defined(TARGET_XARCH)
bool compEnablePCRelAddr; // Whether absolute addr be encoded as PC-rel offset by RyuJIT where possible
#endif
#endif // DEBUG
#ifdef UNIX_AMD64_ABI
// This flag is indicating if there is a need to align the frame.
// On AMD64-Windows, if there are calls, 4 slots for the outgoing ars are allocated, except for
// FastTailCall. This slots makes the frame size non-zero, so alignment logic will be called.
// On AMD64-Unix, there are no such slots. There is a possibility to have calls in the method with frame size of
// 0. The frame alignment logic won't kick in. This flags takes care of the AMD64-Unix case by remembering that
// there are calls and making sure the frame alignment logic is executed.
bool compNeedToAlignFrame;
#endif // UNIX_AMD64_ABI
bool compProcedureSplitting; // Separate cold code from hot code
bool genFPorder; // Preserve FP order (operations are non-commutative)
bool genFPopt; // Can we do frame-pointer-omission optimization?
bool altJit; // True if we are an altjit and are compiling this method
#ifdef OPT_CONFIG
bool optRepeat; // Repeat optimizer phases k times
#endif
#ifdef DEBUG
bool compProcedureSplittingEH; // Separate cold code from hot code for functions with EH
bool dspCode; // Display native code generated
bool dspEHTable; // Display the EH table reported to the VM
bool dspDebugInfo; // Display the Debug info reported to the VM
bool dspInstrs; // Display the IL instructions intermixed with the native code output
bool dspLines; // Display source-code lines intermixed with native code output
bool dmpHex; // Display raw bytes in hex of native code output
bool varNames; // Display variables names in native code output
bool disAsm; // Display native code as it is generated
bool disAsmSpilled; // Display native code when any register spilling occurs
bool disasmWithGC; // Display GC info interleaved with disassembly.
bool disDiffable; // Makes the Disassembly code 'diff-able'
bool disAddr; // Display process address next to each instruction in disassembly code
bool disAlignment; // Display alignment boundaries in disassembly code
bool disAsm2; // Display native code after it is generated using external disassembler
bool dspOrder; // Display names of each of the methods that we ngen/jit
bool dspUnwind; // Display the unwind info output
bool dspDiffable; // Makes the Jit Dump 'diff-able' (currently uses same COMPlus_* flag as disDiffable)
bool compLongAddress; // Force using large pseudo instructions for long address
// (IF_LARGEJMP/IF_LARGEADR/IF_LARGLDC)
bool dspGCtbls; // Display the GC tables
#endif
bool compExpandCallsEarly; // True if we should expand virtual call targets early for this method
// Default numbers used to perform loop alignment. All the numbers are choosen
// based on experimenting with various benchmarks.
// Default minimum loop block weight required to enable loop alignment.
#define DEFAULT_ALIGN_LOOP_MIN_BLOCK_WEIGHT 4
// By default a loop will be aligned at 32B address boundary to get better
// performance as per architecture manuals.
#define DEFAULT_ALIGN_LOOP_BOUNDARY 0x20
// For non-adaptive loop alignment, by default, only align a loop whose size is
// at most 3 times the alignment block size. If the loop is bigger than that, it is most
// likely complicated enough that loop alignment will not impact performance.
#define DEFAULT_MAX_LOOPSIZE_FOR_ALIGN DEFAULT_ALIGN_LOOP_BOUNDARY * 3
#ifdef DEBUG
// Loop alignment variables
// If set, for non-adaptive alignment, ensure loop jmps are not on or cross alignment boundary.
bool compJitAlignLoopForJcc;
#endif
// For non-adaptive alignment, minimum loop size (in bytes) for which alignment will be done.
unsigned short compJitAlignLoopMaxCodeSize;
// Minimum weight needed for the first block of a loop to make it a candidate for alignment.
unsigned short compJitAlignLoopMinBlockWeight;
// For non-adaptive alignment, address boundary (power of 2) at which loop alignment should
// be done. By default, 32B.
unsigned short compJitAlignLoopBoundary;
// Padding limit to align a loop.
unsigned short compJitAlignPaddingLimit;
// If set, perform adaptive loop alignment that limits number of padding based on loop size.
bool compJitAlignLoopAdaptive;
// If set, tries to hide alignment instructions behind unconditional jumps.
bool compJitHideAlignBehindJmp;
// If set, tracks the hidden return buffer for struct arg.
bool compJitOptimizeStructHiddenBuffer;
#ifdef LATE_DISASM
bool doLateDisasm; // Run the late disassembler
#endif // LATE_DISASM
#if DUMP_GC_TABLES && !defined(DEBUG)
#pragma message("NOTE: this non-debug build has GC ptr table dumping always enabled!")
static const bool dspGCtbls = true;
#endif
#ifdef PROFILING_SUPPORTED
// Whether to emit Enter/Leave/TailCall hooks using a dummy stub (DummyProfilerELTStub()).
// This option helps make the JIT behave as if it is running under a profiler.
bool compJitELTHookEnabled;
#endif // PROFILING_SUPPORTED
#if FEATURE_TAILCALL_OPT
// Whether opportunistic or implicit tail call optimization is enabled.
bool compTailCallOpt;
// Whether optimization of transforming a recursive tail call into a loop is enabled.
bool compTailCallLoopOpt;
#endif
#if FEATURE_FASTTAILCALL
// Whether fast tail calls are allowed.
bool compFastTailCalls;
#endif // FEATURE_FASTTAILCALL
#if defined(TARGET_ARM64)
// Decision about whether to save FP/LR registers with callee-saved registers (see
// COMPlus_JitSaveFpLrWithCalleSavedRegisters).
int compJitSaveFpLrWithCalleeSavedRegisters;
#endif // defined(TARGET_ARM64)
#ifdef CONFIGURABLE_ARM_ABI
bool compUseSoftFP = false;
#else
#ifdef ARM_SOFTFP
static const bool compUseSoftFP = true;
#else // !ARM_SOFTFP
static const bool compUseSoftFP = false;
#endif // ARM_SOFTFP
#endif // CONFIGURABLE_ARM_ABI
} opts;
static bool s_pAltJitExcludeAssembliesListInitialized;
static AssemblyNamesList2* s_pAltJitExcludeAssembliesList;
#ifdef DEBUG
static bool s_pJitDisasmIncludeAssembliesListInitialized;
static AssemblyNamesList2* s_pJitDisasmIncludeAssembliesList;
static bool s_pJitFunctionFileInitialized;
static MethodSet* s_pJitMethodSet;
#endif // DEBUG
#ifdef DEBUG
// silence warning of cast to greater size. It is easier to silence than construct code the compiler is happy with, and
// it is safe in this case
#pragma warning(push)
#pragma warning(disable : 4312)
template <typename T>
T dspPtr(T p)
{
return (p == ZERO) ? ZERO : (opts.dspDiffable ? T(0xD1FFAB1E) : p);
}
template <typename T>
T dspOffset(T o)
{
return (o == ZERO) ? ZERO : (opts.dspDiffable ? T(0xD1FFAB1E) : o);
}
#pragma warning(pop)
static int dspTreeID(GenTree* tree)
{
return tree->gtTreeID;
}
static void printStmtID(Statement* stmt)
{
assert(stmt != nullptr);
printf(FMT_STMT, stmt->GetID());
}
static void printTreeID(GenTree* tree)
{
if (tree == nullptr)
{
printf("[------]");
}
else
{
printf("[%06d]", dspTreeID(tree));
}
}
const char* pgoSourceToString(ICorJitInfo::PgoSource p);
const char* devirtualizationDetailToString(CORINFO_DEVIRTUALIZATION_DETAIL detail);
#endif // DEBUG
// clang-format off
#define STRESS_MODES \
\
STRESS_MODE(NONE) \
\
/* "Variations" stress areas which we try to mix up with each other. */ \
/* These should not be exhaustively used as they might */ \
/* hide/trivialize other areas */ \
\
STRESS_MODE(REGS) \
STRESS_MODE(DBL_ALN) \
STRESS_MODE(LCL_FLDS) \
STRESS_MODE(UNROLL_LOOPS) \
STRESS_MODE(MAKE_CSE) \
STRESS_MODE(LEGACY_INLINE) \
STRESS_MODE(CLONE_EXPR) \
STRESS_MODE(USE_CMOV) \
STRESS_MODE(FOLD) \
STRESS_MODE(MERGED_RETURNS) \
STRESS_MODE(BB_PROFILE) \
STRESS_MODE(OPT_BOOLS_GC) \
STRESS_MODE(REMORPH_TREES) \
STRESS_MODE(64RSLT_MUL) \
STRESS_MODE(DO_WHILE_LOOPS) \
STRESS_MODE(MIN_OPTS) \
STRESS_MODE(REVERSE_FLAG) /* Will set GTF_REVERSE_OPS whenever we can */ \
STRESS_MODE(REVERSE_COMMA) /* Will reverse commas created with gtNewCommaNode */ \
STRESS_MODE(TAILCALL) /* Will make the call as a tailcall whenever legal */ \
STRESS_MODE(CATCH_ARG) /* Will spill catch arg */ \
STRESS_MODE(UNSAFE_BUFFER_CHECKS) \
STRESS_MODE(NULL_OBJECT_CHECK) \
STRESS_MODE(PINVOKE_RESTORE_ESP) \
STRESS_MODE(RANDOM_INLINE) \
STRESS_MODE(SWITCH_CMP_BR_EXPANSION) \
STRESS_MODE(GENERIC_VARN) \
STRESS_MODE(PROFILER_CALLBACKS) /* Will generate profiler hooks for ELT callbacks */ \
STRESS_MODE(BYREF_PROMOTION) /* Change undoPromotion decisions for byrefs */ \
STRESS_MODE(PROMOTE_FEWER_STRUCTS)/* Don't promote some structs that can be promoted */ \
STRESS_MODE(VN_BUDGET)/* Randomize the VN budget */ \
\
/* After COUNT_VARN, stress level 2 does all of these all the time */ \
\
STRESS_MODE(COUNT_VARN) \
\
/* "Check" stress areas that can be exhaustively used if we */ \
/* dont care about performance at all */ \
\
STRESS_MODE(FORCE_INLINE) /* Treat every method as AggressiveInlining */ \
STRESS_MODE(CHK_FLOW_UPDATE) \
STRESS_MODE(EMITTER) \
STRESS_MODE(CHK_REIMPORT) \
STRESS_MODE(FLATFP) \
STRESS_MODE(GENERIC_CHECK) \
STRESS_MODE(COUNT)
enum compStressArea
{
#define STRESS_MODE(mode) STRESS_##mode,
STRESS_MODES
#undef STRESS_MODE
};
// clang-format on
#ifdef DEBUG
static const LPCWSTR s_compStressModeNames[STRESS_COUNT + 1];
BYTE compActiveStressModes[STRESS_COUNT];
#endif // DEBUG
#define MAX_STRESS_WEIGHT 100
bool compStressCompile(compStressArea stressArea, unsigned weightPercentage);
bool compStressCompileHelper(compStressArea stressArea, unsigned weightPercentage);
#ifdef DEBUG
bool compInlineStress()
{
return compStressCompile(STRESS_LEGACY_INLINE, 50);
}
bool compRandomInlineStress()
{
return compStressCompile(STRESS_RANDOM_INLINE, 50);
}
bool compPromoteFewerStructs(unsigned lclNum);
#endif // DEBUG
bool compTailCallStress()
{
#ifdef DEBUG
// Do not stress tailcalls in IL stubs as the runtime creates several IL
// stubs to implement the tailcall mechanism, which would then
// recursively create more IL stubs.
return !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) &&
(JitConfig.TailcallStress() != 0 || compStressCompile(STRESS_TAILCALL, 5));
#else
return false;
#endif
}
const char* compGetTieringName(bool wantShortName = false) const;
const char* compGetStressMessage() const;
codeOptimize compCodeOpt() const
{
#if 0
// Switching between size & speed has measurable throughput impact
// (3.5% on NGen CoreLib when measured). It used to be enabled for
// DEBUG, but should generate identical code between CHK & RET builds,
// so that's not acceptable.
// TODO-Throughput: Figure out what to do about size vs. speed & throughput.
// Investigate the cause of the throughput regression.
return opts.compCodeOpt;
#else
return BLENDED_CODE;
#endif
}
//--------------------- Info about the procedure --------------------------
struct Info
{
COMP_HANDLE compCompHnd;
CORINFO_MODULE_HANDLE compScopeHnd;
CORINFO_CLASS_HANDLE compClassHnd;
CORINFO_METHOD_HANDLE compMethodHnd;
CORINFO_METHOD_INFO* compMethodInfo;
bool hasCircularClassConstraints;
bool hasCircularMethodConstraints;
#if defined(DEBUG) || defined(LATE_DISASM) || DUMP_FLOWGRAPHS
const char* compMethodName;
const char* compClassName;
const char* compFullName;
double compPerfScore;
int compMethodSuperPMIIndex; // useful when debugging under SuperPMI
#endif // defined(DEBUG) || defined(LATE_DISASM) || DUMP_FLOWGRAPHS
#if defined(DEBUG) || defined(INLINE_DATA)
// Method hash is logically const, but computed
// on first demand.
mutable unsigned compMethodHashPrivate;
unsigned compMethodHash() const;
#endif // defined(DEBUG) || defined(INLINE_DATA)
#ifdef PSEUDORANDOM_NOP_INSERTION
// things for pseudorandom nop insertion
unsigned compChecksum;
CLRRandom compRNG;
#endif
// The following holds the FLG_xxxx flags for the method we're compiling.
unsigned compFlags;
// The following holds the class attributes for the method we're compiling.
unsigned compClassAttr;
const BYTE* compCode;
IL_OFFSET compILCodeSize; // The IL code size
IL_OFFSET compILImportSize; // Estimated amount of IL actually imported
IL_OFFSET compILEntry; // The IL entry point (normally 0)
PatchpointInfo* compPatchpointInfo; // Patchpoint data for OSR (normally nullptr)
UNATIVE_OFFSET compNativeCodeSize; // The native code size, after instructions are issued. This
// is less than (compTotalHotCodeSize + compTotalColdCodeSize) only if:
// (1) the code is not hot/cold split, and we issued less code than we expected, or
// (2) the code is hot/cold split, and we issued less code than we expected
// in the cold section (the hot section will always be padded out to compTotalHotCodeSize).
bool compIsStatic : 1; // Is the method static (no 'this' pointer)?
bool compIsVarArgs : 1; // Does the method have varargs parameters?
bool compInitMem : 1; // Is the CORINFO_OPT_INIT_LOCALS bit set in the method info options?
bool compProfilerCallback : 1; // JIT inserted a profiler Enter callback
bool compPublishStubParam : 1; // EAX captured in prolog will be available through an intrinsic
bool compHasNextCallRetAddr : 1; // The NextCallReturnAddress intrinsic is used.
var_types compRetType; // Return type of the method as declared in IL
var_types compRetNativeType; // Normalized return type as per target arch ABI
unsigned compILargsCount; // Number of arguments (incl. implicit but not hidden)
unsigned compArgsCount; // Number of arguments (incl. implicit and hidden)
#if FEATURE_FASTTAILCALL
unsigned compArgStackSize; // Incoming argument stack size in bytes
#endif // FEATURE_FASTTAILCALL
unsigned compRetBuffArg; // position of hidden return param var (0, 1) (BAD_VAR_NUM means not present);
int compTypeCtxtArg; // position of hidden param for type context for generic code (CORINFO_CALLCONV_PARAMTYPE)
unsigned compThisArg; // position of implicit this pointer param (not to be confused with lvaArg0Var)
unsigned compILlocalsCount; // Number of vars : args + locals (incl. implicit but not hidden)
unsigned compLocalsCount; // Number of vars : args + locals (incl. implicit and hidden)
unsigned compMaxStack;
UNATIVE_OFFSET compTotalHotCodeSize; // Total number of bytes of Hot Code in the method
UNATIVE_OFFSET compTotalColdCodeSize; // Total number of bytes of Cold Code in the method
unsigned compUnmanagedCallCountWithGCTransition; // count of unmanaged calls with GC transition.
CorInfoCallConvExtension compCallConv; // The entry-point calling convention for this method.
unsigned compLvFrameListRoot; // lclNum for the Frame root
unsigned compXcptnsCount; // Number of exception-handling clauses read in the method's IL.
// You should generally use compHndBBtabCount instead: it is the
// current number of EH clauses (after additions like synchronized
// methods and funclets, and removals like unreachable code deletion).
Target::ArgOrder compArgOrder;
bool compMatchedVM; // true if the VM is "matched": either the JIT is a cross-compiler
// and the VM expects that, or the JIT is a "self-host" compiler
// (e.g., x86 hosted targeting x86) and the VM expects that.
/* The following holds IL scope information about local variables.
*/
unsigned compVarScopesCount;
VarScopeDsc* compVarScopes;
/* The following holds information about instr offsets for
* which we need to report IP-mappings
*/
IL_OFFSET* compStmtOffsets; // sorted
unsigned compStmtOffsetsCount;
ICorDebugInfo::BoundaryTypes compStmtOffsetsImplicit;
#define CPU_X86 0x0100 // The generic X86 CPU
#define CPU_X86_PENTIUM_4 0x0110
#define CPU_X64 0x0200 // The generic x64 CPU
#define CPU_AMD_X64 0x0210 // AMD x64 CPU
#define CPU_INTEL_X64 0x0240 // Intel x64 CPU
#define CPU_ARM 0x0300 // The generic ARM CPU
#define CPU_ARM64 0x0400 // The generic ARM64 CPU
unsigned genCPU; // What CPU are we running on
// Number of class profile probes in this method
unsigned compClassProbeCount;
} info;
// Returns true if the method being compiled returns a non-void and non-struct value.
// Note that lvaInitTypeRef() normalizes compRetNativeType for struct returns in a
// single register as per target arch ABI (e.g on Amd64 Windows structs of size 1, 2,
// 4 or 8 gets normalized to TYP_BYTE/TYP_SHORT/TYP_INT/TYP_LONG; On Arm HFA structs).
// Methods returning such structs are considered to return non-struct return value and
// this method returns true in that case.
bool compMethodReturnsNativeScalarType()
{
return (info.compRetType != TYP_VOID) && !varTypeIsStruct(info.compRetNativeType);
}
// Returns true if the method being compiled returns RetBuf addr as its return value
bool compMethodReturnsRetBufAddr()
{
// There are cases where implicit RetBuf argument should be explicitly returned in a register.
// In such cases the return type is changed to TYP_BYREF and appropriate IR is generated.
// These cases are:
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_AMD64
// 1. on x64 Windows and Unix the address of RetBuf needs to be returned by
// methods with hidden RetBufArg in RAX. In such case GT_RETURN is of TYP_BYREF,
// returning the address of RetBuf.
return (info.compRetBuffArg != BAD_VAR_NUM);
#else // TARGET_AMD64
#ifdef PROFILING_SUPPORTED
// 2. Profiler Leave callback expects the address of retbuf as return value for
// methods with hidden RetBuf argument. impReturnInstruction() when profiler
// callbacks are needed creates GT_RETURN(TYP_BYREF, op1 = Addr of RetBuf) for
// methods with hidden RetBufArg.
if (compIsProfilerHookNeeded())
{
return (info.compRetBuffArg != BAD_VAR_NUM);
}
#endif
// 3. Windows ARM64 native instance calling convention requires the address of RetBuff
// to be returned in x0.
CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_ARM64)
if (TargetOS::IsWindows)
{
auto callConv = info.compCallConv;
if (callConvIsInstanceMethodCallConv(callConv))
{
return (info.compRetBuffArg != BAD_VAR_NUM);
}
}
#endif // TARGET_ARM64
// 4. x86 unmanaged calling conventions require the address of RetBuff to be returned in eax.
CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_X86)
if (info.compCallConv != CorInfoCallConvExtension::Managed)
{
return (info.compRetBuffArg != BAD_VAR_NUM);
}
#endif
return false;
#endif // TARGET_AMD64
}
// Returns true if the method returns a value in more than one return register
// TODO-ARM-Bug: Deal with multi-register genReturnLocaled structs?
// TODO-ARM64: Does this apply for ARM64 too?
bool compMethodReturnsMultiRegRetType()
{
#if FEATURE_MULTIREG_RET
#if defined(TARGET_X86)
// On x86, 64-bit longs and structs are returned in multiple registers
return varTypeIsLong(info.compRetNativeType) ||
(varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM));
#else // targets: X64-UNIX, ARM64 or ARM32
// On all other targets that support multireg return values:
// Methods returning a struct in multiple registers have a return value of TYP_STRUCT.
// Such method's compRetNativeType is TYP_STRUCT without a hidden RetBufArg
return varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM);
#endif // TARGET_XXX
#else // not FEATURE_MULTIREG_RET
// For this architecture there are no multireg returns
return false;
#endif // FEATURE_MULTIREG_RET
}
bool compEnregLocals()
{
return ((opts.compFlags & CLFLG_REGVAR) != 0);
}
bool compEnregStructLocals()
{
return (JitConfig.JitEnregStructLocals() != 0);
}
bool compObjectStackAllocation()
{
return (JitConfig.JitObjectStackAllocation() != 0);
}
// Returns true if the method returns a value in more than one return register,
// it should replace/be merged with compMethodReturnsMultiRegRetType when #36868 is fixed.
// The difference from original `compMethodReturnsMultiRegRetType` is in ARM64 SIMD* handling,
// this method correctly returns false for it (it is passed as HVA), when the original returns true.
bool compMethodReturnsMultiRegRegTypeAlternate()
{
#if FEATURE_MULTIREG_RET
#if defined(TARGET_X86)
// On x86, 64-bit longs and structs are returned in multiple registers
return varTypeIsLong(info.compRetNativeType) ||
(varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM));
#else // targets: X64-UNIX, ARM64 or ARM32
#if defined(TARGET_ARM64)
// TYP_SIMD* are returned in one register.
if (varTypeIsSIMD(info.compRetNativeType))
{
return false;
}
#endif
// On all other targets that support multireg return values:
// Methods returning a struct in multiple registers have a return value of TYP_STRUCT.
// Such method's compRetNativeType is TYP_STRUCT without a hidden RetBufArg
return varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM);
#endif // TARGET_XXX
#else // not FEATURE_MULTIREG_RET
// For this architecture there are no multireg returns
return false;
#endif // FEATURE_MULTIREG_RET
}
// Returns true if the method being compiled returns a value
bool compMethodHasRetVal()
{
return compMethodReturnsNativeScalarType() || compMethodReturnsRetBufAddr() ||
compMethodReturnsMultiRegRetType();
}
// Returns true if the method requires a PInvoke prolog and epilog
bool compMethodRequiresPInvokeFrame()
{
return (info.compUnmanagedCallCountWithGCTransition > 0);
}
// Returns true if address-exposed user variables should be poisoned with a recognizable value
bool compShouldPoisonFrame()
{
#ifdef FEATURE_ON_STACK_REPLACEMENT
if (opts.IsOSR())
return false;
#endif
return !info.compInitMem && opts.compDbgCode;
}
// Returns true if the jit supports having patchpoints in this method.
// Optionally, get the reason why not.
bool compCanHavePatchpoints(const char** reason = nullptr);
#if defined(DEBUG)
void compDispLocalVars();
#endif // DEBUG
private:
class ClassLayoutTable* m_classLayoutTable;
class ClassLayoutTable* typCreateClassLayoutTable();
class ClassLayoutTable* typGetClassLayoutTable();
public:
// Get the layout having the specified layout number.
ClassLayout* typGetLayoutByNum(unsigned layoutNum);
// Get the layout number of the specified layout.
unsigned typGetLayoutNum(ClassLayout* layout);
// Get the layout having the specified size but no class handle.
ClassLayout* typGetBlkLayout(unsigned blockSize);
// Get the number of a layout having the specified size but no class handle.
unsigned typGetBlkLayoutNum(unsigned blockSize);
// Get the layout for the specified class handle.
ClassLayout* typGetObjLayout(CORINFO_CLASS_HANDLE classHandle);
// Get the number of a layout for the specified class handle.
unsigned typGetObjLayoutNum(CORINFO_CLASS_HANDLE classHandle);
//-------------------------- Global Compiler Data ------------------------------------
#ifdef DEBUG
private:
static LONG s_compMethodsCount; // to produce unique label names
#endif
public:
#ifdef DEBUG
LONG compMethodID;
unsigned compGenTreeID;
unsigned compStatementID;
unsigned compBasicBlockID;
#endif
BasicBlock* compCurBB; // the current basic block in process
Statement* compCurStmt; // the current statement in process
GenTree* compCurTree; // the current tree in process
// The following is used to create the 'method JIT info' block.
size_t compInfoBlkSize;
BYTE* compInfoBlkAddr;
EHblkDsc* compHndBBtab; // array of EH data
unsigned compHndBBtabCount; // element count of used elements in EH data array
unsigned compHndBBtabAllocCount; // element count of allocated elements in EH data array
#if defined(TARGET_X86)
//-------------------------------------------------------------------------
// Tracking of region covered by the monitor in synchronized methods
void* syncStartEmitCookie; // the emitter cookie for first instruction after the call to MON_ENTER
void* syncEndEmitCookie; // the emitter cookie for first instruction after the call to MON_EXIT
#endif // !TARGET_X86
Phases mostRecentlyActivePhase; // the most recently active phase
PhaseChecks activePhaseChecks; // the currently active phase checks
//-------------------------------------------------------------------------
// The following keeps track of how many bytes of local frame space we've
// grabbed so far in the current function, and how many argument bytes we
// need to pop when we return.
//
unsigned compLclFrameSize; // secObject+lclBlk+locals+temps
// Count of callee-saved regs we pushed in the prolog.
// Does not include EBP for isFramePointerUsed() and double-aligned frames.
// In case of Amd64 this doesn't include float regs saved on stack.
unsigned compCalleeRegsPushed;
#if defined(TARGET_XARCH)
// Mask of callee saved float regs on stack.
regMaskTP compCalleeFPRegsSavedMask;
#endif
#ifdef TARGET_AMD64
// Quirk for VS debug-launch scenario to work:
// Bytes of padding between save-reg area and locals.
#define VSQUIRK_STACK_PAD (2 * REGSIZE_BYTES)
unsigned compVSQuirkStackPaddingNeeded;
#endif
unsigned compArgSize; // total size of arguments in bytes (including register args (lvIsRegArg))
#ifdef TARGET_ARM
bool compHasSplitParam;
#endif
unsigned compMapILargNum(unsigned ILargNum); // map accounting for hidden args
unsigned compMapILvarNum(unsigned ILvarNum); // map accounting for hidden args
unsigned compMap2ILvarNum(unsigned varNum) const; // map accounting for hidden args
#if defined(TARGET_ARM64)
struct FrameInfo
{
// Frame type (1-5)
int frameType;
// Distance from established (method body) SP to base of callee save area
int calleeSaveSpOffset;
// Amount to subtract from SP before saving (prolog) OR
// to add to SP after restoring (epilog) callee saves
int calleeSaveSpDelta;
// Distance from established SP to where caller's FP was saved
int offsetSpToSavedFp;
} compFrameInfo;
#endif
//-------------------------------------------------------------------------
static void compStartup(); // One-time initialization
static void compShutdown(); // One-time finalization
void compInit(ArenaAllocator* pAlloc,
CORINFO_METHOD_HANDLE methodHnd,
COMP_HANDLE compHnd,
CORINFO_METHOD_INFO* methodInfo,
InlineInfo* inlineInfo);
void compDone();
static void compDisplayStaticSizes(FILE* fout);
//------------ Some utility functions --------------
void* compGetHelperFtn(CorInfoHelpFunc ftnNum, /* IN */
void** ppIndirection); /* OUT */
// Several JIT/EE interface functions return a CorInfoType, and also return a
// class handle as an out parameter if the type is a value class. Returns the
// size of the type these describe.
unsigned compGetTypeSize(CorInfoType cit, CORINFO_CLASS_HANDLE clsHnd);
#ifdef DEBUG
// Components used by the compiler may write unit test suites, and
// have them run within this method. They will be run only once per process, and only
// in debug. (Perhaps should be under the control of a COMPlus_ flag.)
// These should fail by asserting.
void compDoComponentUnitTestsOnce();
#endif // DEBUG
int compCompile(CORINFO_MODULE_HANDLE classPtr,
void** methodCodePtr,
uint32_t* methodCodeSize,
JitFlags* compileFlags);
void compCompileFinish();
int compCompileHelper(CORINFO_MODULE_HANDLE classPtr,
COMP_HANDLE compHnd,
CORINFO_METHOD_INFO* methodInfo,
void** methodCodePtr,
uint32_t* methodCodeSize,
JitFlags* compileFlag);
ArenaAllocator* compGetArenaAllocator();
void generatePatchpointInfo();
#if MEASURE_MEM_ALLOC
static bool s_dspMemStats; // Display per-phase memory statistics for every function
#endif // MEASURE_MEM_ALLOC
#if LOOP_HOIST_STATS
unsigned m_loopsConsidered;
bool m_curLoopHasHoistedExpression;
unsigned m_loopsWithHoistedExpressions;
unsigned m_totalHoistedExpressions;
void AddLoopHoistStats();
void PrintPerMethodLoopHoistStats();
static CritSecObject s_loopHoistStatsLock; // This lock protects the data structures below.
static unsigned s_loopsConsidered;
static unsigned s_loopsWithHoistedExpressions;
static unsigned s_totalHoistedExpressions;
static void PrintAggregateLoopHoistStats(FILE* f);
#endif // LOOP_HOIST_STATS
#if TRACK_ENREG_STATS
class EnregisterStats
{
private:
unsigned m_totalNumberOfVars;
unsigned m_totalNumberOfStructVars;
unsigned m_totalNumberOfEnregVars;
unsigned m_totalNumberOfStructEnregVars;
unsigned m_addrExposed;
unsigned m_hiddenStructArg;
unsigned m_VMNeedsStackAddr;
unsigned m_localField;
unsigned m_blockOp;
unsigned m_dontEnregStructs;
unsigned m_notRegSizeStruct;
unsigned m_structArg;
unsigned m_lclAddrNode;
unsigned m_castTakesAddr;
unsigned m_storeBlkSrc;
unsigned m_oneAsgRetyping;
unsigned m_swizzleArg;
unsigned m_blockOpRet;
unsigned m_returnSpCheck;
unsigned m_simdUserForcesDep;
unsigned m_liveInOutHndlr;
unsigned m_depField;
unsigned m_noRegVars;
unsigned m_minOptsGC;
#ifdef JIT32_GCENCODER
unsigned m_PinningRef;
#endif // JIT32_GCENCODER
#if !defined(TARGET_64BIT)
unsigned m_longParamField;
#endif // !TARGET_64BIT
unsigned m_parentExposed;
unsigned m_tooConservative;
unsigned m_escapeAddress;
unsigned m_osrExposed;
unsigned m_stressLclFld;
unsigned m_copyFldByFld;
unsigned m_dispatchRetBuf;
unsigned m_wideIndir;
public:
void RecordLocal(const LclVarDsc* varDsc);
void Dump(FILE* fout) const;
};
static EnregisterStats s_enregisterStats;
#endif // TRACK_ENREG_STATS
bool compIsForImportOnly();
bool compIsForInlining() const;
bool compDonotInline();
#ifdef DEBUG
// Get the default fill char value we randomize this value when JitStress is enabled.
static unsigned char compGetJitDefaultFill(Compiler* comp);
const char* compLocalVarName(unsigned varNum, unsigned offs);
VarName compVarName(regNumber reg, bool isFloatReg = false);
const char* compRegVarName(regNumber reg, bool displayVar = false, bool isFloatReg = false);
const char* compRegNameForSize(regNumber reg, size_t size);
const char* compFPregVarName(unsigned fpReg, bool displayVar = false);
void compDspSrcLinesByNativeIP(UNATIVE_OFFSET curIP);
void compDspSrcLinesByLineNum(unsigned line, bool seek = false);
#endif // DEBUG
//-------------------------------------------------------------------------
struct VarScopeListNode
{
VarScopeDsc* data;
VarScopeListNode* next;
static VarScopeListNode* Create(VarScopeDsc* value, CompAllocator alloc)
{
VarScopeListNode* node = new (alloc) VarScopeListNode;
node->data = value;
node->next = nullptr;
return node;
}
};
struct VarScopeMapInfo
{
VarScopeListNode* head;
VarScopeListNode* tail;
static VarScopeMapInfo* Create(VarScopeListNode* node, CompAllocator alloc)
{
VarScopeMapInfo* info = new (alloc) VarScopeMapInfo;
info->head = node;
info->tail = node;
return info;
}
};
// Max value of scope count for which we would use linear search; for larger values we would use hashtable lookup.
static const unsigned MAX_LINEAR_FIND_LCL_SCOPELIST = 32;
typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, VarScopeMapInfo*> VarNumToScopeDscMap;
// Map to keep variables' scope indexed by varNum containing it's scope dscs at the index.
VarNumToScopeDscMap* compVarScopeMap;
VarScopeDsc* compFindLocalVar(unsigned varNum, unsigned lifeBeg, unsigned lifeEnd);
VarScopeDsc* compFindLocalVar(unsigned varNum, unsigned offs);
VarScopeDsc* compFindLocalVarLinear(unsigned varNum, unsigned offs);
void compInitVarScopeMap();
VarScopeDsc** compEnterScopeList; // List has the offsets where variables
// enter scope, sorted by instr offset
unsigned compNextEnterScope;
VarScopeDsc** compExitScopeList; // List has the offsets where variables
// go out of scope, sorted by instr offset
unsigned compNextExitScope;
void compInitScopeLists();
void compResetScopeLists();
VarScopeDsc* compGetNextEnterScope(unsigned offs, bool scan = false);
VarScopeDsc* compGetNextExitScope(unsigned offs, bool scan = false);
void compProcessScopesUntil(unsigned offset,
VARSET_TP* inScope,
void (Compiler::*enterScopeFn)(VARSET_TP* inScope, VarScopeDsc*),
void (Compiler::*exitScopeFn)(VARSET_TP* inScope, VarScopeDsc*));
#ifdef DEBUG
void compDispScopeLists();
#endif // DEBUG
bool compIsProfilerHookNeeded();
//-------------------------------------------------------------------------
/* Statistical Data Gathering */
void compJitStats(); // call this function and enable
// various ifdef's below for statistical data
#if CALL_ARG_STATS
void compCallArgStats();
static void compDispCallArgStats(FILE* fout);
#endif
//-------------------------------------------------------------------------
protected:
#ifdef DEBUG
bool skipMethod();
#endif
ArenaAllocator* compArenaAllocator;
public:
void compFunctionTraceStart();
void compFunctionTraceEnd(void* methodCodePtr, ULONG methodCodeSize, bool isNYI);
protected:
size_t compMaxUncheckedOffsetForNullObject;
void compInitOptions(JitFlags* compileFlags);
void compSetProcessor();
void compInitDebuggingInfo();
void compSetOptimizationLevel();
#ifdef TARGET_ARMARCH
bool compRsvdRegCheck(FrameLayoutState curState);
#endif
void compCompile(void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlags);
// Clear annotations produced during optimizations; to be used between iterations when repeating opts.
void ResetOptAnnotations();
// Regenerate loop descriptors; to be used between iterations when repeating opts.
void RecomputeLoopInfo();
#ifdef PROFILING_SUPPORTED
// Data required for generating profiler Enter/Leave/TailCall hooks
bool compProfilerHookNeeded; // Whether profiler Enter/Leave/TailCall hook needs to be generated for the method
void* compProfilerMethHnd; // Profiler handle of the method being compiled. Passed as param to ELT callbacks
bool compProfilerMethHndIndirected; // Whether compProfilerHandle is pointer to the handle or is an actual handle
#endif
public:
// Assumes called as part of process shutdown; does any compiler-specific work associated with that.
static void ProcessShutdownWork(ICorStaticInfo* statInfo);
CompAllocator getAllocator(CompMemKind cmk = CMK_Generic)
{
return CompAllocator(compArenaAllocator, cmk);
}
CompAllocator getAllocatorGC()
{
return getAllocator(CMK_GC);
}
CompAllocator getAllocatorLoopHoist()
{
return getAllocator(CMK_LoopHoist);
}
#ifdef DEBUG
CompAllocator getAllocatorDebugOnly()
{
return getAllocator(CMK_DebugOnly);
}
#endif // DEBUG
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX typeInfo XX
XX XX
XX Checks for type compatibility and merges types XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
public:
// Returns true if child is equal to or a subtype of parent for merge purposes
// This support is necessary to suport attributes that are not described in
// for example, signatures. For example, the permanent home byref (byref that
// points to the gc heap), isn't a property of method signatures, therefore,
// it is safe to have mismatches here (that tiCompatibleWith will not flag),
// but when deciding if we need to reimport a block, we need to take these
// in account
bool tiMergeCompatibleWith(const typeInfo& pChild, const typeInfo& pParent, bool normalisedForStack) const;
// Returns true if child is equal to or a subtype of parent.
// normalisedForStack indicates that both types are normalised for the stack
bool tiCompatibleWith(const typeInfo& pChild, const typeInfo& pParent, bool normalisedForStack) const;
// Merges pDest and pSrc. Returns false if merge is undefined.
// *pDest is modified to represent the merged type. Sets "*changed" to true
// if this changes "*pDest".
bool tiMergeToCommonParent(typeInfo* pDest, const typeInfo* pSrc, bool* changed) const;
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX IL verification stuff XX
XX XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
public:
// The following is used to track liveness of local variables, initialization
// of valueclass constructors, and type safe use of IL instructions.
// dynamic state info needed for verification
EntryState verCurrentState;
// this ptr of object type .ctors are considered intited only after
// the base class ctor is called, or an alternate ctor is called.
// An uninited this ptr can be used to access fields, but cannot
// be used to call a member function.
bool verTrackObjCtorInitState;
void verInitBBEntryState(BasicBlock* block, EntryState* currentState);
// Requires that "tis" is not TIS_Bottom -- it's a definite init/uninit state.
void verSetThisInit(BasicBlock* block, ThisInitState tis);
void verInitCurrentState();
void verResetCurrentState(BasicBlock* block, EntryState* currentState);
// Merges the current verification state into the entry state of "block", return false if that merge fails,
// TRUE if it succeeds. Further sets "*changed" to true if this changes the entry state of "block".
bool verMergeEntryStates(BasicBlock* block, bool* changed);
void verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg));
void verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg));
typeInfo verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd,
bool bashStructToRef = false); // converts from jit type representation to typeInfo
typeInfo verMakeTypeInfo(CorInfoType ciType,
CORINFO_CLASS_HANDLE clsHnd); // converts from jit type representation to typeInfo
bool verIsSDArray(const typeInfo& ti);
typeInfo verGetArrayElemType(const typeInfo& ti);
typeInfo verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args);
bool verIsByRefLike(const typeInfo& ti);
bool verIsSafeToReturnByRef(const typeInfo& ti);
// generic type variables range over types that satisfy IsBoxable
bool verIsBoxable(const typeInfo& ti);
void DECLSPEC_NORETURN verRaiseVerifyException(INDEBUG(const char* reason) DEBUGARG(const char* file)
DEBUGARG(unsigned line));
void verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* reason) DEBUGARG(const char* file)
DEBUGARG(unsigned line));
bool verCheckTailCallConstraint(OPCODE opcode,
CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call
// on a type parameter?
bool speculative // If true, won't throw if verificatoin fails. Instead it will
// return false to the caller.
// If false, it will throw.
);
bool verIsBoxedValueType(const typeInfo& ti);
void verVerifyCall(OPCODE opcode,
CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
bool tailCall,
bool readonlyCall, // is this a "readonly." call?
const BYTE* delegateCreateStart,
const BYTE* codeAddr,
CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName));
bool verCheckDelegateCreation(const BYTE* delegateCreateStart, const BYTE* codeAddr, mdMemberRef& targetMemberRef);
typeInfo verVerifySTIND(const typeInfo& ptr, const typeInfo& value, const typeInfo& instrType);
typeInfo verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType);
void verVerifyField(CORINFO_RESOLVED_TOKEN* pResolvedToken,
const CORINFO_FIELD_INFO& fieldInfo,
const typeInfo* tiThis,
bool mutator,
bool allowPlainStructAsThis = false);
void verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode);
void verVerifyThisPtrInitialised();
bool verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target);
#ifdef DEBUG
// One line log function. Default level is 0. Increasing it gives you
// more log information
// levels are currently unused: #define JITDUMP(level,...) ();
void JitLogEE(unsigned level, const char* fmt, ...);
bool compDebugBreak;
bool compJitHaltMethod();
#endif
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX GS Security checks for unsafe buffers XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
public:
struct ShadowParamVarInfo
{
FixedBitVect* assignGroup; // the closure set of variables whose values depend on each other
unsigned shadowCopy; // Lcl var num, if not valid set to BAD_VAR_NUM
static bool mayNeedShadowCopy(LclVarDsc* varDsc)
{
#if defined(TARGET_AMD64)
// GS cookie logic to create shadow slots, create trees to copy reg args to shadow
// slots and update all trees to refer to shadow slots is done immediately after
// fgMorph(). Lsra could potentially mark a param as DoNotEnregister after JIT determines
// not to shadow a parameter. Also, LSRA could potentially spill a param which is passed
// in register. Therefore, conservatively all params may need a shadow copy. Note that
// GS cookie logic further checks whether the param is a ptr or an unsafe buffer before
// creating a shadow slot even though this routine returns true.
//
// TODO-AMD64-CQ: Revisit this conservative approach as it could create more shadow slots than
// required. There are two cases under which a reg arg could potentially be used from its
// home location:
// a) LSRA marks it as DoNotEnregister (see LinearScan::identifyCandidates())
// b) LSRA spills it
//
// Possible solution to address case (a)
// - The conditions under which LSRA marks a varDsc as DoNotEnregister could be checked
// in this routine. Note that live out of exception handler is something we may not be
// able to do it here since GS cookie logic is invoked ahead of liveness computation.
// Therefore, for methods with exception handling and need GS cookie check we might have
// to take conservative approach.
//
// Possible solution to address case (b)
// - Whenver a parameter passed in an argument register needs to be spilled by LSRA, we
// create a new spill temp if the method needs GS cookie check.
return varDsc->lvIsParam;
#else // !defined(TARGET_AMD64)
return varDsc->lvIsParam && !varDsc->lvIsRegArg;
#endif
}
#ifdef DEBUG
void Print()
{
printf("assignGroup [%p]; shadowCopy: [%d];\n", assignGroup, shadowCopy);
}
#endif
};
GSCookie* gsGlobalSecurityCookieAddr; // Address of global cookie for unsafe buffer checks
GSCookie gsGlobalSecurityCookieVal; // Value of global cookie if addr is NULL
ShadowParamVarInfo* gsShadowVarInfo; // Table used by shadow param analysis code
void gsGSChecksInitCookie(); // Grabs cookie variable
void gsCopyShadowParams(); // Identify vulnerable params and create dhadow copies
bool gsFindVulnerableParams(); // Shadow param analysis code
void gsParamsToShadows(); // Insert copy code and replave param uses by shadow
static fgWalkPreFn gsMarkPtrsAndAssignGroups; // Shadow param analysis tree-walk
static fgWalkPreFn gsReplaceShadowParams; // Shadow param replacement tree-walk
#define DEFAULT_MAX_INLINE_SIZE 100 // Methods with > DEFAULT_MAX_INLINE_SIZE IL bytes will never be inlined.
// This can be overwritten by setting complus_JITInlineSize env variable.
#define DEFAULT_MAX_INLINE_DEPTH 20 // Methods at more than this level deep will not be inlined
#define DEFAULT_MAX_LOCALLOC_TO_LOCAL_SIZE 32 // fixed locallocs of this size or smaller will convert to local buffers
private:
#ifdef FEATURE_JIT_METHOD_PERF
JitTimer* pCompJitTimer; // Timer data structure (by phases) for current compilation.
static CompTimeSummaryInfo s_compJitTimerSummary; // Summary of the Timer information for the whole run.
static LPCWSTR JitTimeLogCsv(); // Retrieve the file name for CSV from ConfigDWORD.
static LPCWSTR compJitTimeLogFilename; // If a log file for JIT time is desired, filename to write it to.
#endif
void BeginPhase(Phases phase); // Indicate the start of the given phase.
void EndPhase(Phases phase); // Indicate the end of the given phase.
#if MEASURE_CLRAPI_CALLS
// Thin wrappers that call into JitTimer (if present).
inline void CLRApiCallEnter(unsigned apix);
inline void CLRApiCallLeave(unsigned apix);
public:
inline void CLR_API_Enter(API_ICorJitInfo_Names ename);
inline void CLR_API_Leave(API_ICorJitInfo_Names ename);
private:
#endif
#if defined(DEBUG) || defined(INLINE_DATA)
// These variables are associated with maintaining SQM data about compile time.
unsigned __int64 m_compCyclesAtEndOfInlining; // The thread-virtualized cycle count at the end of the inlining phase
// in the current compilation.
unsigned __int64 m_compCycles; // Net cycle count for current compilation
DWORD m_compTickCountAtEndOfInlining; // The result of GetTickCount() (# ms since some epoch marker) at the end of
// the inlining phase in the current compilation.
#endif // defined(DEBUG) || defined(INLINE_DATA)
// Records the SQM-relevant (cycles and tick count). Should be called after inlining is complete.
// (We do this after inlining because this marks the last point at which the JIT is likely to cause
// type-loading and class initialization).
void RecordStateAtEndOfInlining();
// Assumes being called at the end of compilation. Update the SQM state.
void RecordStateAtEndOfCompilation();
public:
#if FUNC_INFO_LOGGING
static LPCWSTR compJitFuncInfoFilename; // If a log file for per-function information is required, this is the
// filename to write it to.
static FILE* compJitFuncInfoFile; // And this is the actual FILE* to write to.
#endif // FUNC_INFO_LOGGING
Compiler* prevCompiler; // Previous compiler on stack for TLS Compiler* linked list for reentrant compilers.
#if MEASURE_NOWAY
void RecordNowayAssert(const char* filename, unsigned line, const char* condStr);
#endif // MEASURE_NOWAY
#ifndef FEATURE_TRACELOGGING
// Should we actually fire the noway assert body and the exception handler?
bool compShouldThrowOnNoway();
#else // FEATURE_TRACELOGGING
// Should we actually fire the noway assert body and the exception handler?
bool compShouldThrowOnNoway(const char* filename, unsigned line);
// Telemetry instance to use per method compilation.
JitTelemetry compJitTelemetry;
// Get common parameters that have to be logged with most telemetry data.
void compGetTelemetryDefaults(const char** assemblyName,
const char** scopeName,
const char** methodName,
unsigned* methodHash);
#endif // !FEATURE_TRACELOGGING
#ifdef DEBUG
private:
NodeToTestDataMap* m_nodeTestData;
static const unsigned FIRST_LOOP_HOIST_CSE_CLASS = 1000;
unsigned m_loopHoistCSEClass; // LoopHoist test annotations turn into CSE requirements; we
// label them with CSE Class #'s starting at FIRST_LOOP_HOIST_CSE_CLASS.
// Current kept in this.
public:
NodeToTestDataMap* GetNodeTestData()
{
Compiler* compRoot = impInlineRoot();
if (compRoot->m_nodeTestData == nullptr)
{
compRoot->m_nodeTestData = new (getAllocatorDebugOnly()) NodeToTestDataMap(getAllocatorDebugOnly());
}
return compRoot->m_nodeTestData;
}
typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, int> NodeToIntMap;
// Returns the set (i.e., the domain of the result map) of nodes that are keys in m_nodeTestData, and
// currently occur in the AST graph.
NodeToIntMap* FindReachableNodesInNodeTestData();
// Node "from" is being eliminated, and being replaced by node "to". If "from" had any associated
// test data, associate that data with "to".
void TransferTestDataToNode(GenTree* from, GenTree* to);
// These are the methods that test that the various conditions implied by the
// test attributes are satisfied.
void JitTestCheckSSA(); // SSA builder tests.
void JitTestCheckVN(); // Value numbering tests.
#endif // DEBUG
// The "FieldSeqStore", for canonicalizing field sequences. See the definition of FieldSeqStore for
// operations.
FieldSeqStore* m_fieldSeqStore;
FieldSeqStore* GetFieldSeqStore()
{
Compiler* compRoot = impInlineRoot();
if (compRoot->m_fieldSeqStore == nullptr)
{
// Create a CompAllocator that labels sub-structure with CMK_FieldSeqStore, and use that for allocation.
CompAllocator ialloc(getAllocator(CMK_FieldSeqStore));
compRoot->m_fieldSeqStore = new (ialloc) FieldSeqStore(ialloc);
}
return compRoot->m_fieldSeqStore;
}
typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, FieldSeqNode*> NodeToFieldSeqMap;
// Some nodes of "TYP_BYREF" or "TYP_I_IMPL" actually represent the address of a field within a struct, but since
// the offset of the field is zero, there's no "GT_ADD" node. We normally attach a field sequence to the constant
// that is added, but what do we do when that constant is zero, and is thus not present? We use this mechanism to
// attach the field sequence directly to the address node.
NodeToFieldSeqMap* m_zeroOffsetFieldMap;
NodeToFieldSeqMap* GetZeroOffsetFieldMap()
{
// Don't need to worry about inlining here
if (m_zeroOffsetFieldMap == nullptr)
{
// Create a CompAllocator that labels sub-structure with CMK_ZeroOffsetFieldMap, and use that for
// allocation.
CompAllocator ialloc(getAllocator(CMK_ZeroOffsetFieldMap));
m_zeroOffsetFieldMap = new (ialloc) NodeToFieldSeqMap(ialloc);
}
return m_zeroOffsetFieldMap;
}
// Requires that "op1" is a node of type "TYP_BYREF" or "TYP_I_IMPL". We are dereferencing this with the fields in
// "fieldSeq", whose offsets are required all to be zero. Ensures that any field sequence annotation currently on
// "op1" or its components is augmented by appending "fieldSeq". In practice, if "op1" is a GT_LCL_FLD, it has
// a field sequence as a member; otherwise, it may be the addition of an a byref and a constant, where the const
// has a field sequence -- in this case "fieldSeq" is appended to that of the constant; otherwise, we
// record the the field sequence using the ZeroOffsetFieldMap described above.
//
// One exception above is that "op1" is a node of type "TYP_REF" where "op1" is a GT_LCL_VAR.
// This happens when System.Object vtable pointer is a regular field at offset 0 in System.Private.CoreLib in
// CoreRT. Such case is handled same as the default case.
void fgAddFieldSeqForZeroOffset(GenTree* op1, FieldSeqNode* fieldSeq);
NodeToUnsignedMap* m_memorySsaMap[MemoryKindCount];
// In some cases, we want to assign intermediate SSA #'s to memory states, and know what nodes create those memory
// states. (We do this for try blocks, where, if the try block doesn't do a call that loses track of the memory
// state, all the possible memory states are possible initial states of the corresponding catch block(s).)
NodeToUnsignedMap* GetMemorySsaMap(MemoryKind memoryKind)
{
if (memoryKind == GcHeap && byrefStatesMatchGcHeapStates)
{
// Use the same map for GCHeap and ByrefExposed when their states match.
memoryKind = ByrefExposed;
}
assert(memoryKind < MemoryKindCount);
Compiler* compRoot = impInlineRoot();
if (compRoot->m_memorySsaMap[memoryKind] == nullptr)
{
// Create a CompAllocator that labels sub-structure with CMK_MemorySsaMap, and use that for allocation.
CompAllocator ialloc(getAllocator(CMK_MemorySsaMap));
compRoot->m_memorySsaMap[memoryKind] = new (ialloc) NodeToUnsignedMap(ialloc);
}
return compRoot->m_memorySsaMap[memoryKind];
}
// The Refany type is the only struct type whose structure is implicitly assumed by IL. We need its fields.
CORINFO_CLASS_HANDLE m_refAnyClass;
CORINFO_FIELD_HANDLE GetRefanyDataField()
{
if (m_refAnyClass == nullptr)
{
m_refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF);
}
return info.compCompHnd->getFieldInClass(m_refAnyClass, 0);
}
CORINFO_FIELD_HANDLE GetRefanyTypeField()
{
if (m_refAnyClass == nullptr)
{
m_refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF);
}
return info.compCompHnd->getFieldInClass(m_refAnyClass, 1);
}
#if VARSET_COUNTOPS
static BitSetSupport::BitSetOpCounter m_varsetOpCounter;
#endif
#if ALLVARSET_COUNTOPS
static BitSetSupport::BitSetOpCounter m_allvarsetOpCounter;
#endif
static HelperCallProperties s_helperCallProperties;
#ifdef UNIX_AMD64_ABI
static var_types GetTypeFromClassificationAndSizes(SystemVClassificationType classType, int size);
static var_types GetEightByteType(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR& structDesc,
unsigned slotNum);
static void GetStructTypeOffset(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR& structDesc,
var_types* type0,
var_types* type1,
unsigned __int8* offset0,
unsigned __int8* offset1);
void GetStructTypeOffset(CORINFO_CLASS_HANDLE typeHnd,
var_types* type0,
var_types* type1,
unsigned __int8* offset0,
unsigned __int8* offset1);
#endif // defined(UNIX_AMD64_ABI)
void fgMorphMultiregStructArgs(GenTreeCall* call);
GenTree* fgMorphMultiregStructArg(GenTree* arg, fgArgTabEntry* fgEntryPtr);
bool killGCRefs(GenTree* tree);
}; // end of class Compiler
//---------------------------------------------------------------------------------------------------------------------
// GenTreeVisitor: a flexible tree walker implemented using the curiously-recurring-template pattern.
//
// This class implements a configurable walker for IR trees. There are five configuration options (defaults values are
// shown in parentheses):
//
// - ComputeStack (false): when true, the walker will push each node onto the `m_ancestors` stack. "Ancestors" is a bit
// of a misnomer, as the first entry will always be the current node.
//
// - DoPreOrder (false): when true, the walker will invoke `TVisitor::PreOrderVisit` with the current node as an
// argument before visiting the node's operands.
//
// - DoPostOrder (false): when true, the walker will invoke `TVisitor::PostOrderVisit` with the current node as an
// argument after visiting the node's operands.
//
// - DoLclVarsOnly (false): when true, the walker will only invoke `TVisitor::PreOrderVisit` for lclVar nodes.
// `DoPreOrder` must be true if this option is true.
//
// - UseExecutionOrder (false): when true, then walker will visit a node's operands in execution order (e.g. if a
// binary operator has the `GTF_REVERSE_OPS` flag set, the second operand will be
// visited before the first).
//
// At least one of `DoPreOrder` and `DoPostOrder` must be specified.
//
// A simple pre-order visitor might look something like the following:
//
// class CountingVisitor final : public GenTreeVisitor<CountingVisitor>
// {
// public:
// enum
// {
// DoPreOrder = true
// };
//
// unsigned m_count;
//
// CountingVisitor(Compiler* compiler)
// : GenTreeVisitor<CountingVisitor>(compiler), m_count(0)
// {
// }
//
// Compiler::fgWalkResult PreOrderVisit(GenTree* node)
// {
// m_count++;
// }
// };
//
// This visitor would then be used like so:
//
// CountingVisitor countingVisitor(compiler);
// countingVisitor.WalkTree(root);
//
template <typename TVisitor>
class GenTreeVisitor
{
protected:
typedef Compiler::fgWalkResult fgWalkResult;
enum
{
ComputeStack = false,
DoPreOrder = false,
DoPostOrder = false,
DoLclVarsOnly = false,
UseExecutionOrder = false,
};
Compiler* m_compiler;
ArrayStack<GenTree*> m_ancestors;
GenTreeVisitor(Compiler* compiler) : m_compiler(compiler), m_ancestors(compiler->getAllocator(CMK_ArrayStack))
{
assert(compiler != nullptr);
static_assert_no_msg(TVisitor::DoPreOrder || TVisitor::DoPostOrder);
static_assert_no_msg(!TVisitor::DoLclVarsOnly || TVisitor::DoPreOrder);
}
fgWalkResult PreOrderVisit(GenTree** use, GenTree* user)
{
return fgWalkResult::WALK_CONTINUE;
}
fgWalkResult PostOrderVisit(GenTree** use, GenTree* user)
{
return fgWalkResult::WALK_CONTINUE;
}
public:
fgWalkResult WalkTree(GenTree** use, GenTree* user)
{
assert(use != nullptr);
GenTree* node = *use;
if (TVisitor::ComputeStack)
{
m_ancestors.Push(node);
}
fgWalkResult result = fgWalkResult::WALK_CONTINUE;
if (TVisitor::DoPreOrder && !TVisitor::DoLclVarsOnly)
{
result = reinterpret_cast<TVisitor*>(this)->PreOrderVisit(use, user);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
node = *use;
if ((node == nullptr) || (result == fgWalkResult::WALK_SKIP_SUBTREES))
{
goto DONE;
}
}
switch (node->OperGet())
{
// Leaf lclVars
case GT_LCL_VAR:
case GT_LCL_FLD:
case GT_LCL_VAR_ADDR:
case GT_LCL_FLD_ADDR:
if (TVisitor::DoLclVarsOnly)
{
result = reinterpret_cast<TVisitor*>(this)->PreOrderVisit(use, user);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
FALLTHROUGH;
// Leaf nodes
case GT_CATCH_ARG:
case GT_LABEL:
case GT_FTN_ADDR:
case GT_RET_EXPR:
case GT_CNS_INT:
case GT_CNS_LNG:
case GT_CNS_DBL:
case GT_CNS_STR:
case GT_MEMORYBARRIER:
case GT_JMP:
case GT_JCC:
case GT_SETCC:
case GT_NO_OP:
case GT_START_NONGC:
case GT_START_PREEMPTGC:
case GT_PROF_HOOK:
#if !defined(FEATURE_EH_FUNCLETS)
case GT_END_LFIN:
#endif // !FEATURE_EH_FUNCLETS
case GT_PHI_ARG:
case GT_JMPTABLE:
case GT_CLS_VAR:
case GT_CLS_VAR_ADDR:
case GT_ARGPLACE:
case GT_PHYSREG:
case GT_EMITNOP:
case GT_PINVOKE_PROLOG:
case GT_PINVOKE_EPILOG:
case GT_IL_OFFSET:
break;
// Lclvar unary operators
case GT_STORE_LCL_VAR:
case GT_STORE_LCL_FLD:
if (TVisitor::DoLclVarsOnly)
{
result = reinterpret_cast<TVisitor*>(this)->PreOrderVisit(use, user);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
FALLTHROUGH;
// Standard unary operators
case GT_NOT:
case GT_NEG:
case GT_BSWAP:
case GT_BSWAP16:
case GT_COPY:
case GT_RELOAD:
case GT_ARR_LENGTH:
case GT_CAST:
case GT_BITCAST:
case GT_CKFINITE:
case GT_LCLHEAP:
case GT_ADDR:
case GT_IND:
case GT_OBJ:
case GT_BLK:
case GT_BOX:
case GT_ALLOCOBJ:
case GT_INIT_VAL:
case GT_JTRUE:
case GT_SWITCH:
case GT_NULLCHECK:
case GT_PUTARG_REG:
case GT_PUTARG_STK:
case GT_PUTARG_TYPE:
case GT_RETURNTRAP:
case GT_NOP:
case GT_FIELD:
case GT_RETURN:
case GT_RETFILT:
case GT_RUNTIMELOOKUP:
case GT_ARR_ADDR:
case GT_KEEPALIVE:
case GT_INC_SATURATE:
{
GenTreeUnOp* const unOp = node->AsUnOp();
if (unOp->gtOp1 != nullptr)
{
result = WalkTree(&unOp->gtOp1, unOp);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
break;
}
// Special nodes
case GT_PHI:
for (GenTreePhi::Use& use : node->AsPhi()->Uses())
{
result = WalkTree(&use.NodeRef(), node);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
break;
case GT_FIELD_LIST:
for (GenTreeFieldList::Use& use : node->AsFieldList()->Uses())
{
result = WalkTree(&use.NodeRef(), node);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
break;
case GT_CMPXCHG:
{
GenTreeCmpXchg* const cmpXchg = node->AsCmpXchg();
result = WalkTree(&cmpXchg->gtOpLocation, cmpXchg);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
result = WalkTree(&cmpXchg->gtOpValue, cmpXchg);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
result = WalkTree(&cmpXchg->gtOpComparand, cmpXchg);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
break;
}
case GT_ARR_ELEM:
{
GenTreeArrElem* const arrElem = node->AsArrElem();
result = WalkTree(&arrElem->gtArrObj, arrElem);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
const unsigned rank = arrElem->gtArrRank;
for (unsigned dim = 0; dim < rank; dim++)
{
result = WalkTree(&arrElem->gtArrInds[dim], arrElem);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
break;
}
case GT_ARR_OFFSET:
{
GenTreeArrOffs* const arrOffs = node->AsArrOffs();
result = WalkTree(&arrOffs->gtOffset, arrOffs);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
result = WalkTree(&arrOffs->gtIndex, arrOffs);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
result = WalkTree(&arrOffs->gtArrObj, arrOffs);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
break;
}
case GT_STORE_DYN_BLK:
{
GenTreeStoreDynBlk* const dynBlock = node->AsStoreDynBlk();
GenTree** op1Use = &dynBlock->gtOp1;
GenTree** op2Use = &dynBlock->gtOp2;
GenTree** op3Use = &dynBlock->gtDynamicSize;
result = WalkTree(op1Use, dynBlock);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
result = WalkTree(op2Use, dynBlock);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
result = WalkTree(op3Use, dynBlock);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
break;
}
case GT_CALL:
{
GenTreeCall* const call = node->AsCall();
if (call->gtCallThisArg != nullptr)
{
result = WalkTree(&call->gtCallThisArg->NodeRef(), call);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
for (GenTreeCall::Use& use : call->Args())
{
result = WalkTree(&use.NodeRef(), call);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
for (GenTreeCall::Use& use : call->LateArgs())
{
result = WalkTree(&use.NodeRef(), call);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
if (call->gtCallType == CT_INDIRECT)
{
if (call->gtCallCookie != nullptr)
{
result = WalkTree(&call->gtCallCookie, call);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
result = WalkTree(&call->gtCallAddr, call);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
if (call->gtControlExpr != nullptr)
{
result = WalkTree(&call->gtControlExpr, call);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
break;
}
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
#if defined(FEATURE_SIMD)
case GT_SIMD:
#endif
#if defined(FEATURE_HW_INTRINSICS)
case GT_HWINTRINSIC:
#endif
if (TVisitor::UseExecutionOrder && node->IsReverseOp())
{
assert(node->AsMultiOp()->GetOperandCount() == 2);
result = WalkTree(&node->AsMultiOp()->Op(2), node);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
result = WalkTree(&node->AsMultiOp()->Op(1), node);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
else
{
for (GenTree** use : node->AsMultiOp()->UseEdges())
{
result = WalkTree(use, node);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
}
break;
#endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
// Binary nodes
default:
{
assert(node->OperIsBinary());
GenTreeOp* const op = node->AsOp();
GenTree** op1Use = &op->gtOp1;
GenTree** op2Use = &op->gtOp2;
if (TVisitor::UseExecutionOrder && node->IsReverseOp())
{
std::swap(op1Use, op2Use);
}
if (*op1Use != nullptr)
{
result = WalkTree(op1Use, op);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
if (*op2Use != nullptr)
{
result = WalkTree(op2Use, op);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
break;
}
}
DONE:
// Finally, visit the current node
if (TVisitor::DoPostOrder)
{
result = reinterpret_cast<TVisitor*>(this)->PostOrderVisit(use, user);
}
if (TVisitor::ComputeStack)
{
m_ancestors.Pop();
}
return result;
}
};
template <bool computeStack, bool doPreOrder, bool doPostOrder, bool doLclVarsOnly, bool useExecutionOrder>
class GenericTreeWalker final
: public GenTreeVisitor<GenericTreeWalker<computeStack, doPreOrder, doPostOrder, doLclVarsOnly, useExecutionOrder>>
{
public:
enum
{
ComputeStack = computeStack,
DoPreOrder = doPreOrder,
DoPostOrder = doPostOrder,
DoLclVarsOnly = doLclVarsOnly,
UseExecutionOrder = useExecutionOrder,
};
private:
Compiler::fgWalkData* m_walkData;
public:
GenericTreeWalker(Compiler::fgWalkData* walkData)
: GenTreeVisitor<GenericTreeWalker<computeStack, doPreOrder, doPostOrder, doLclVarsOnly, useExecutionOrder>>(
walkData->compiler)
, m_walkData(walkData)
{
assert(walkData != nullptr);
if (computeStack)
{
walkData->parentStack = &this->m_ancestors;
}
}
Compiler::fgWalkResult PreOrderVisit(GenTree** use, GenTree* user)
{
m_walkData->parent = user;
return m_walkData->wtprVisitorFn(use, m_walkData);
}
Compiler::fgWalkResult PostOrderVisit(GenTree** use, GenTree* user)
{
m_walkData->parent = user;
return m_walkData->wtpoVisitorFn(use, m_walkData);
}
};
// A dominator tree visitor implemented using the curiously-recurring-template pattern, similar to GenTreeVisitor.
template <typename TVisitor>
class DomTreeVisitor
{
protected:
Compiler* const m_compiler;
DomTreeNode* const m_domTree;
DomTreeVisitor(Compiler* compiler, DomTreeNode* domTree) : m_compiler(compiler), m_domTree(domTree)
{
}
void Begin()
{
}
void PreOrderVisit(BasicBlock* block)
{
}
void PostOrderVisit(BasicBlock* block)
{
}
void End()
{
}
public:
//------------------------------------------------------------------------
// WalkTree: Walk the dominator tree, starting from fgFirstBB.
//
// Notes:
// This performs a non-recursive, non-allocating walk of the tree by using
// DomTreeNode's firstChild and nextSibling links to locate the children of
// a node and BasicBlock's bbIDom parent link to go back up the tree when
// no more children are left.
//
// Forests are also supported, provided that all the roots are chained via
// DomTreeNode::nextSibling to fgFirstBB.
//
void WalkTree()
{
static_cast<TVisitor*>(this)->Begin();
for (BasicBlock *next, *block = m_compiler->fgFirstBB; block != nullptr; block = next)
{
static_cast<TVisitor*>(this)->PreOrderVisit(block);
next = m_domTree[block->bbNum].firstChild;
if (next != nullptr)
{
assert(next->bbIDom == block);
continue;
}
do
{
static_cast<TVisitor*>(this)->PostOrderVisit(block);
next = m_domTree[block->bbNum].nextSibling;
if (next != nullptr)
{
assert(next->bbIDom == block->bbIDom);
break;
}
block = block->bbIDom;
} while (block != nullptr);
}
static_cast<TVisitor*>(this)->End();
}
};
// EHClauses: adapter class for forward iteration of the exception handling table using range-based `for`, e.g.:
// for (EHblkDsc* const ehDsc : EHClauses(compiler))
//
class EHClauses
{
EHblkDsc* m_begin;
EHblkDsc* m_end;
// Forward iterator for the exception handling table entries. Iteration is in table order.
//
class iterator
{
EHblkDsc* m_ehDsc;
public:
iterator(EHblkDsc* ehDsc) : m_ehDsc(ehDsc)
{
}
EHblkDsc* operator*() const
{
return m_ehDsc;
}
iterator& operator++()
{
++m_ehDsc;
return *this;
}
bool operator!=(const iterator& i) const
{
return m_ehDsc != i.m_ehDsc;
}
};
public:
EHClauses(Compiler* comp) : m_begin(comp->compHndBBtab), m_end(comp->compHndBBtab + comp->compHndBBtabCount)
{
assert((m_begin != nullptr) || (m_begin == m_end));
}
iterator begin() const
{
return iterator(m_begin);
}
iterator end() const
{
return iterator(m_end);
}
};
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Miscellaneous Compiler stuff XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
// Values used to mark the types a stack slot is used for
const unsigned TYPE_REF_INT = 0x01; // slot used as a 32-bit int
const unsigned TYPE_REF_LNG = 0x02; // slot used as a 64-bit long
const unsigned TYPE_REF_FLT = 0x04; // slot used as a 32-bit float
const unsigned TYPE_REF_DBL = 0x08; // slot used as a 64-bit float
const unsigned TYPE_REF_PTR = 0x10; // slot used as a 32-bit pointer
const unsigned TYPE_REF_BYR = 0x20; // slot used as a byref pointer
const unsigned TYPE_REF_STC = 0x40; // slot used as a struct
const unsigned TYPE_REF_TYPEMASK = 0x7F; // bits that represent the type
// const unsigned TYPE_REF_ADDR_TAKEN = 0x80; // slots address was taken
/*****************************************************************************
*
* Variables to keep track of total code amounts.
*/
#if DISPLAY_SIZES
extern size_t grossVMsize;
extern size_t grossNCsize;
extern size_t totalNCsize;
extern unsigned genMethodICnt;
extern unsigned genMethodNCnt;
extern size_t gcHeaderISize;
extern size_t gcPtrMapISize;
extern size_t gcHeaderNSize;
extern size_t gcPtrMapNSize;
#endif // DISPLAY_SIZES
/*****************************************************************************
*
* Variables to keep track of basic block counts (more data on 1 BB methods)
*/
#if COUNT_BASIC_BLOCKS
extern Histogram bbCntTable;
extern Histogram bbOneBBSizeTable;
#endif
/*****************************************************************************
*
* Used by optFindNaturalLoops to gather statistical information such as
* - total number of natural loops
* - number of loops with 1, 2, ... exit conditions
* - number of loops that have an iterator (for like)
* - number of loops that have a constant iterator
*/
#if COUNT_LOOPS
extern unsigned totalLoopMethods; // counts the total number of methods that have natural loops
extern unsigned maxLoopsPerMethod; // counts the maximum number of loops a method has
extern unsigned totalLoopOverflows; // # of methods that identified more loops than we can represent
extern unsigned totalLoopCount; // counts the total number of natural loops
extern unsigned totalUnnatLoopCount; // counts the total number of (not-necessarily natural) loops
extern unsigned totalUnnatLoopOverflows; // # of methods that identified more unnatural loops than we can represent
extern unsigned iterLoopCount; // counts the # of loops with an iterator (for like)
extern unsigned constIterLoopCount; // counts the # of loops with a constant iterator (for like)
extern bool hasMethodLoops; // flag to keep track if we already counted a method as having loops
extern unsigned loopsThisMethod; // counts the number of loops in the current method
extern bool loopOverflowThisMethod; // True if we exceeded the max # of loops in the method.
extern Histogram loopCountTable; // Histogram of loop counts
extern Histogram loopExitCountTable; // Histogram of loop exit counts
#endif // COUNT_LOOPS
/*****************************************************************************
* variables to keep track of how many iterations we go in a dataflow pass
*/
#if DATAFLOW_ITER
extern unsigned CSEiterCount; // counts the # of iteration for the CSE dataflow
extern unsigned CFiterCount; // counts the # of iteration for the Const Folding dataflow
#endif // DATAFLOW_ITER
#if MEASURE_BLOCK_SIZE
extern size_t genFlowNodeSize;
extern size_t genFlowNodeCnt;
#endif // MEASURE_BLOCK_SIZE
#if MEASURE_NODE_SIZE
struct NodeSizeStats
{
void Init()
{
genTreeNodeCnt = 0;
genTreeNodeSize = 0;
genTreeNodeActualSize = 0;
}
// Count of tree nodes allocated.
unsigned __int64 genTreeNodeCnt;
// The size we allocate.
unsigned __int64 genTreeNodeSize;
// The actual size of the node. Note that the actual size will likely be smaller
// than the allocated size, but we sometimes use SetOper()/ChangeOper() to change
// a smaller node to a larger one. TODO-Cleanup: add stats on
// SetOper()/ChangeOper() usage to quantify this.
unsigned __int64 genTreeNodeActualSize;
};
extern NodeSizeStats genNodeSizeStats; // Total node size stats
extern NodeSizeStats genNodeSizeStatsPerFunc; // Per-function node size stats
extern Histogram genTreeNcntHist;
extern Histogram genTreeNsizHist;
#endif // MEASURE_NODE_SIZE
/*****************************************************************************
* Count fatal errors (including noway_asserts).
*/
#if MEASURE_FATAL
extern unsigned fatal_badCode;
extern unsigned fatal_noWay;
extern unsigned fatal_implLimitation;
extern unsigned fatal_NOMEM;
extern unsigned fatal_noWayAssertBody;
#ifdef DEBUG
extern unsigned fatal_noWayAssertBodyArgs;
#endif // DEBUG
extern unsigned fatal_NYI;
#endif // MEASURE_FATAL
/*****************************************************************************
* Codegen
*/
#ifdef TARGET_XARCH
const instruction INS_SHIFT_LEFT_LOGICAL = INS_shl;
const instruction INS_SHIFT_RIGHT_LOGICAL = INS_shr;
const instruction INS_SHIFT_RIGHT_ARITHM = INS_sar;
const instruction INS_AND = INS_and;
const instruction INS_OR = INS_or;
const instruction INS_XOR = INS_xor;
const instruction INS_NEG = INS_neg;
const instruction INS_TEST = INS_test;
const instruction INS_MUL = INS_imul;
const instruction INS_SIGNED_DIVIDE = INS_idiv;
const instruction INS_UNSIGNED_DIVIDE = INS_div;
const instruction INS_BREAKPOINT = INS_int3;
const instruction INS_ADDC = INS_adc;
const instruction INS_SUBC = INS_sbb;
const instruction INS_NOT = INS_not;
#endif // TARGET_XARCH
#ifdef TARGET_ARM
const instruction INS_SHIFT_LEFT_LOGICAL = INS_lsl;
const instruction INS_SHIFT_RIGHT_LOGICAL = INS_lsr;
const instruction INS_SHIFT_RIGHT_ARITHM = INS_asr;
const instruction INS_AND = INS_and;
const instruction INS_OR = INS_orr;
const instruction INS_XOR = INS_eor;
const instruction INS_NEG = INS_rsb;
const instruction INS_TEST = INS_tst;
const instruction INS_MUL = INS_mul;
const instruction INS_MULADD = INS_mla;
const instruction INS_SIGNED_DIVIDE = INS_sdiv;
const instruction INS_UNSIGNED_DIVIDE = INS_udiv;
const instruction INS_BREAKPOINT = INS_bkpt;
const instruction INS_ADDC = INS_adc;
const instruction INS_SUBC = INS_sbc;
const instruction INS_NOT = INS_mvn;
const instruction INS_ABS = INS_vabs;
const instruction INS_SQRT = INS_vsqrt;
#endif // TARGET_ARM
#ifdef TARGET_ARM64
const instruction INS_MULADD = INS_madd;
inline const instruction INS_BREAKPOINT_osHelper()
{
// GDB needs the encoding of brk #0
// Windbg needs the encoding of brk #F000
return TargetOS::IsUnix ? INS_brk_unix : INS_brk_windows;
}
#define INS_BREAKPOINT INS_BREAKPOINT_osHelper()
const instruction INS_ABS = INS_fabs;
const instruction INS_SQRT = INS_fsqrt;
#endif // TARGET_ARM64
/*****************************************************************************/
extern const BYTE genTypeSizes[];
extern const BYTE genTypeAlignments[];
extern const BYTE genTypeStSzs[];
extern const BYTE genActualTypes[];
/*****************************************************************************/
#ifdef DEBUG
void dumpConvertedVarSet(Compiler* comp, VARSET_VALARG_TP vars);
#endif // DEBUG
#include "compiler.hpp" // All the shared inline functions
/*****************************************************************************/
#endif //_COMPILER_H_
/*****************************************************************************/
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Compiler XX
XX XX
XX Represents the method data we are currently JIT-compiling. XX
XX An instance of this class is created for every method we JIT. XX
XX This contains all the info needed for the method. So allocating a XX
XX a new instance per method makes it thread-safe. XX
XX It should be used to do all the memory management for the compiler run. XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
/*****************************************************************************/
#ifndef _COMPILER_H_
#define _COMPILER_H_
/*****************************************************************************/
#include "jit.h"
#include "opcode.h"
#include "varset.h"
#include "jitstd.h"
#include "jithashtable.h"
#include "gentree.h"
#include "debuginfo.h"
#include "lir.h"
#include "block.h"
#include "inline.h"
#include "jiteh.h"
#include "instr.h"
#include "regalloc.h"
#include "sm.h"
#include "cycletimer.h"
#include "blockset.h"
#include "arraystack.h"
#include "hashbv.h"
#include "jitexpandarray.h"
#include "tinyarray.h"
#include "valuenum.h"
#include "jittelemetry.h"
#include "namedintrinsiclist.h"
#ifdef LATE_DISASM
#include "disasm.h"
#endif
#include "codegeninterface.h"
#include "regset.h"
#include "jitgcinfo.h"
#if DUMP_GC_TABLES && defined(JIT32_GCENCODER)
#include "gcdump.h"
#endif
#include "emit.h"
#include "hwintrinsic.h"
#include "simd.h"
#include "simdashwintrinsic.h"
// This is only used locally in the JIT to indicate that
// a verification block should be inserted
#define SEH_VERIFICATION_EXCEPTION 0xe0564552 // VER
/*****************************************************************************
* Forward declarations
*/
struct InfoHdr; // defined in GCInfo.h
struct escapeMapping_t; // defined in fgdiagnostic.cpp
class emitter; // defined in emit.h
struct ShadowParamVarInfo; // defined in GSChecks.cpp
struct InitVarDscInfo; // defined in register_arg_convention.h
class FgStack; // defined in fgbasic.cpp
class Instrumentor; // defined in fgprofile.cpp
class SpanningTreeVisitor; // defined in fgprofile.cpp
class CSE_DataFlow; // defined in OptCSE.cpp
class OptBoolsDsc; // defined in optimizer.cpp
#ifdef DEBUG
struct IndentStack;
#endif
class Lowering; // defined in lower.h
// The following are defined in this file, Compiler.h
class Compiler;
/*****************************************************************************
* Unwind info
*/
#include "unwind.h"
/*****************************************************************************/
//
// Declare global operator new overloads that use the compiler's arena allocator
//
// I wanted to make the second argument optional, with default = CMK_Unknown, but that
// caused these to be ambiguous with the global placement new operators.
void* __cdecl operator new(size_t n, Compiler* context, CompMemKind cmk);
void* __cdecl operator new[](size_t n, Compiler* context, CompMemKind cmk);
void* __cdecl operator new(size_t n, void* p, const jitstd::placement_t& syntax_difference);
// Requires the definitions of "operator new" so including "LoopCloning.h" after the definitions.
#include "loopcloning.h"
/*****************************************************************************/
/* This is included here and not earlier as it needs the definition of "CSE"
* which is defined in the section above */
/*****************************************************************************/
unsigned genLog2(unsigned value);
unsigned genLog2(unsigned __int64 value);
unsigned ReinterpretHexAsDecimal(unsigned in);
/*****************************************************************************/
const unsigned FLG_CCTOR = (CORINFO_FLG_CONSTRUCTOR | CORINFO_FLG_STATIC);
#ifdef DEBUG
const int BAD_STK_OFFS = 0xBAADF00D; // for LclVarDsc::lvStkOffs
#endif
//------------------------------------------------------------------------
// HFA info shared by LclVarDsc and fgArgTabEntry
//------------------------------------------------------------------------
inline bool IsHfa(CorInfoHFAElemType kind)
{
return kind != CORINFO_HFA_ELEM_NONE;
}
inline var_types HfaTypeFromElemKind(CorInfoHFAElemType kind)
{
switch (kind)
{
case CORINFO_HFA_ELEM_FLOAT:
return TYP_FLOAT;
case CORINFO_HFA_ELEM_DOUBLE:
return TYP_DOUBLE;
#ifdef FEATURE_SIMD
case CORINFO_HFA_ELEM_VECTOR64:
return TYP_SIMD8;
case CORINFO_HFA_ELEM_VECTOR128:
return TYP_SIMD16;
#endif
case CORINFO_HFA_ELEM_NONE:
return TYP_UNDEF;
default:
assert(!"Invalid HfaElemKind");
return TYP_UNDEF;
}
}
inline CorInfoHFAElemType HfaElemKindFromType(var_types type)
{
switch (type)
{
case TYP_FLOAT:
return CORINFO_HFA_ELEM_FLOAT;
case TYP_DOUBLE:
return CORINFO_HFA_ELEM_DOUBLE;
#ifdef FEATURE_SIMD
case TYP_SIMD8:
return CORINFO_HFA_ELEM_VECTOR64;
case TYP_SIMD16:
return CORINFO_HFA_ELEM_VECTOR128;
#endif
case TYP_UNDEF:
return CORINFO_HFA_ELEM_NONE;
default:
assert(!"Invalid HFA Type");
return CORINFO_HFA_ELEM_NONE;
}
}
// The following holds the Local var info (scope information)
typedef const char* VarName; // Actual ASCII string
struct VarScopeDsc
{
unsigned vsdVarNum; // (remapped) LclVarDsc number
unsigned vsdLVnum; // 'which' in eeGetLVinfo().
// Also, it is the index of this entry in the info.compVarScopes array,
// which is useful since the array is also accessed via the
// compEnterScopeList and compExitScopeList sorted arrays.
IL_OFFSET vsdLifeBeg; // instr offset of beg of life
IL_OFFSET vsdLifeEnd; // instr offset of end of life
#ifdef DEBUG
VarName vsdName; // name of the var
#endif
};
// This class stores information associated with a LclVar SSA definition.
class LclSsaVarDsc
{
// The basic block where the definition occurs. Definitions of uninitialized variables
// are considered to occur at the start of the first basic block (fgFirstBB).
//
// TODO-Cleanup: In the case of uninitialized variables the block is set to nullptr by
// SsaBuilder and changed to fgFirstBB during value numbering. It would be useful to
// investigate and perhaps eliminate this rather unexpected behavior.
BasicBlock* m_block;
// The GT_ASG node that generates the definition, or nullptr for definitions
// of uninitialized variables.
GenTreeOp* m_asg;
public:
LclSsaVarDsc() : m_block(nullptr), m_asg(nullptr)
{
}
LclSsaVarDsc(BasicBlock* block) : m_block(block), m_asg(nullptr)
{
}
LclSsaVarDsc(BasicBlock* block, GenTreeOp* asg) : m_block(block), m_asg(asg)
{
assert((asg == nullptr) || asg->OperIs(GT_ASG));
}
BasicBlock* GetBlock() const
{
return m_block;
}
void SetBlock(BasicBlock* block)
{
m_block = block;
}
GenTreeOp* GetAssignment() const
{
return m_asg;
}
void SetAssignment(GenTreeOp* asg)
{
assert((asg == nullptr) || asg->OperIs(GT_ASG));
m_asg = asg;
}
ValueNumPair m_vnPair;
};
// This class stores information associated with a memory SSA definition.
class SsaMemDef
{
public:
ValueNumPair m_vnPair;
};
//------------------------------------------------------------------------
// SsaDefArray: A resizable array of SSA definitions.
//
// Unlike an ordinary resizable array implementation, this allows only element
// addition (by calling AllocSsaNum) and has special handling for RESERVED_SSA_NUM
// (basically it's a 1-based array). The array doesn't impose any particular
// requirements on the elements it stores and AllocSsaNum forwards its arguments
// to the array element constructor, this way the array supports both LclSsaVarDsc
// and SsaMemDef elements.
//
template <typename T>
class SsaDefArray
{
T* m_array;
unsigned m_arraySize;
unsigned m_count;
static_assert_no_msg(SsaConfig::RESERVED_SSA_NUM == 0);
static_assert_no_msg(SsaConfig::FIRST_SSA_NUM == 1);
// Get the minimum valid SSA number.
unsigned GetMinSsaNum() const
{
return SsaConfig::FIRST_SSA_NUM;
}
// Increase (double) the size of the array.
void GrowArray(CompAllocator alloc)
{
unsigned oldSize = m_arraySize;
unsigned newSize = max(2, oldSize * 2);
T* newArray = alloc.allocate<T>(newSize);
for (unsigned i = 0; i < oldSize; i++)
{
newArray[i] = m_array[i];
}
m_array = newArray;
m_arraySize = newSize;
}
public:
// Construct an empty SsaDefArray.
SsaDefArray() : m_array(nullptr), m_arraySize(0), m_count(0)
{
}
// Reset the array (used only if the SSA form is reconstructed).
void Reset()
{
m_count = 0;
}
// Allocate a new SSA number (starting with SsaConfig::FIRST_SSA_NUM).
template <class... Args>
unsigned AllocSsaNum(CompAllocator alloc, Args&&... args)
{
if (m_count == m_arraySize)
{
GrowArray(alloc);
}
unsigned ssaNum = GetMinSsaNum() + m_count;
m_array[m_count++] = T(std::forward<Args>(args)...);
// Ensure that the first SSA number we allocate is SsaConfig::FIRST_SSA_NUM
assert((ssaNum == SsaConfig::FIRST_SSA_NUM) || (m_count > 1));
return ssaNum;
}
// Get the number of SSA definitions in the array.
unsigned GetCount() const
{
return m_count;
}
// Get a pointer to the SSA definition at the specified index.
T* GetSsaDefByIndex(unsigned index)
{
assert(index < m_count);
return &m_array[index];
}
// Check if the specified SSA number is valid.
bool IsValidSsaNum(unsigned ssaNum) const
{
return (GetMinSsaNum() <= ssaNum) && (ssaNum < (GetMinSsaNum() + m_count));
}
// Get a pointer to the SSA definition associated with the specified SSA number.
T* GetSsaDef(unsigned ssaNum)
{
assert(ssaNum != SsaConfig::RESERVED_SSA_NUM);
return GetSsaDefByIndex(ssaNum - GetMinSsaNum());
}
// Get an SSA number associated with the specified SSA def (that must be in this array).
unsigned GetSsaNum(T* ssaDef)
{
assert((m_array <= ssaDef) && (ssaDef < &m_array[m_count]));
return GetMinSsaNum() + static_cast<unsigned>(ssaDef - &m_array[0]);
}
};
enum RefCountState
{
RCS_INVALID, // not valid to get/set ref counts
RCS_EARLY, // early counts for struct promotion and struct passing
RCS_NORMAL, // normal ref counts (from lvaMarkRefs onward)
};
#ifdef DEBUG
// Reasons why we can't enregister a local.
enum class DoNotEnregisterReason
{
None,
AddrExposed, // the address of this local is exposed.
DontEnregStructs, // struct enregistration is disabled.
NotRegSizeStruct, // the struct size does not much any register size, usually the struct size is too big.
LocalField, // the local is accessed with LCL_FLD, note we can do it not only for struct locals.
VMNeedsStackAddr,
LiveInOutOfHandler, // the local is alive in and out of exception handler and not signle def.
BlockOp, // Is read or written via a block operation.
IsStructArg, // Is a struct passed as an argument in a way that requires a stack location.
DepField, // It is a field of a dependently promoted struct
NoRegVars, // opts.compFlags & CLFLG_REGVAR is not set
MinOptsGC, // It is a GC Ref and we are compiling MinOpts
#if !defined(TARGET_64BIT)
LongParamField, // It is a decomposed field of a long parameter.
#endif
#ifdef JIT32_GCENCODER
PinningRef,
#endif
LclAddrNode, // the local is accessed with LCL_ADDR_VAR/FLD.
CastTakesAddr,
StoreBlkSrc, // the local is used as STORE_BLK source.
OneAsgRetyping, // fgMorphOneAsgBlockOp prevents this local from being enregister.
SwizzleArg, // the local is passed using LCL_FLD as another type.
BlockOpRet, // the struct is returned and it promoted or there is a cast.
ReturnSpCheck, // the local is used to do SP check
SimdUserForcesDep, // a promoted struct was used by a SIMD/HWI node; it must be dependently promoted
HiddenBufferStructArg // the argument is a hidden return buffer passed to a method.
};
enum class AddressExposedReason
{
NONE,
PARENT_EXPOSED, // This is a promoted field but the parent is exposed.
TOO_CONSERVATIVE, // Were marked as exposed to be conservative, fix these places.
ESCAPE_ADDRESS, // The address is escaping, for example, passed as call argument.
WIDE_INDIR, // We access via indirection with wider type.
OSR_EXPOSED, // It was exposed in the original method, osr has to repeat it.
STRESS_LCL_FLD, // Stress mode replaces localVar with localFld and makes them addrExposed.
COPY_FLD_BY_FLD, // Field by field copy takes the address of the local, can be fixed.
DISPATCH_RET_BUF // Caller return buffer dispatch.
};
#endif // DEBUG
class LclVarDsc
{
public:
// The constructor. Most things can just be zero'ed.
//
// Initialize the ArgRegs to REG_STK.
// Morph will update if this local is passed in a register.
LclVarDsc()
: _lvArgReg(REG_STK)
,
#if FEATURE_MULTIREG_ARGS
_lvOtherArgReg(REG_STK)
,
#endif // FEATURE_MULTIREG_ARGS
lvClassHnd(NO_CLASS_HANDLE)
, lvRefBlks(BlockSetOps::UninitVal())
, lvPerSsaData()
{
}
// note this only packs because var_types is a typedef of unsigned char
var_types lvType : 5; // TYP_INT/LONG/FLOAT/DOUBLE/REF
unsigned char lvIsParam : 1; // is this a parameter?
unsigned char lvIsRegArg : 1; // is this an argument that was passed by register?
unsigned char lvFramePointerBased : 1; // 0 = off of REG_SPBASE (e.g., ESP), 1 = off of REG_FPBASE (e.g., EBP)
unsigned char lvOnFrame : 1; // (part of) the variable lives on the frame
unsigned char lvRegister : 1; // assigned to live in a register? For RyuJIT backend, this is only set if the
// variable is in the same register for the entire function.
unsigned char lvTracked : 1; // is this a tracked variable?
bool lvTrackedNonStruct()
{
return lvTracked && lvType != TYP_STRUCT;
}
unsigned char lvPinned : 1; // is this a pinned variable?
unsigned char lvMustInit : 1; // must be initialized
private:
bool m_addrExposed : 1; // The address of this variable is "exposed" -- passed as an argument, stored in a
// global location, etc.
// We cannot reason reliably about the value of the variable.
public:
unsigned char lvDoNotEnregister : 1; // Do not enregister this variable.
unsigned char lvFieldAccessed : 1; // The var is a struct local, and a field of the variable is accessed. Affects
// struct promotion.
unsigned char lvLiveInOutOfHndlr : 1; // The variable is live in or out of an exception handler, and therefore must
// be on the stack (at least at those boundaries.)
unsigned char lvInSsa : 1; // The variable is in SSA form (set by SsaBuilder)
unsigned char lvIsCSE : 1; // Indicates if this LclVar is a CSE variable.
unsigned char lvHasLdAddrOp : 1; // has ldloca or ldarga opcode on this local.
unsigned char lvStackByref : 1; // This is a compiler temporary of TYP_BYREF that is known to point into our local
// stack frame.
unsigned char lvHasILStoreOp : 1; // there is at least one STLOC or STARG on this local
unsigned char lvHasMultipleILStoreOp : 1; // there is more than one STLOC on this local
unsigned char lvIsTemp : 1; // Short-lifetime compiler temp
#if defined(TARGET_AMD64) || defined(TARGET_ARM64)
unsigned char lvIsImplicitByRef : 1; // Set if the argument is an implicit byref.
#endif // defined(TARGET_AMD64) || defined(TARGET_ARM64)
unsigned char lvIsBoolean : 1; // set if variable is boolean
unsigned char lvSingleDef : 1; // variable has a single def
// before lvaMarkLocalVars: identifies ref type locals that can get type updates
// after lvaMarkLocalVars: identifies locals that are suitable for optAddCopies
unsigned char lvSingleDefRegCandidate : 1; // variable has a single def and hence is a register candidate
// Currently, this is only used to decide if an EH variable can be
// a register candiate or not.
unsigned char lvDisqualifySingleDefRegCandidate : 1; // tracks variable that are disqualified from register
// candidancy
unsigned char lvSpillAtSingleDef : 1; // variable has a single def (as determined by LSRA interval scan)
// and is spilled making it candidate to spill right after the
// first (and only) definition.
// Note: We cannot reuse lvSingleDefRegCandidate because it is set
// in earlier phase and the information might not be appropriate
// in LSRA.
unsigned char lvDisqualify : 1; // variable is no longer OK for add copy optimization
unsigned char lvVolatileHint : 1; // hint for AssertionProp
#ifndef TARGET_64BIT
unsigned char lvStructDoubleAlign : 1; // Must we double align this struct?
#endif // !TARGET_64BIT
#ifdef TARGET_64BIT
unsigned char lvQuirkToLong : 1; // Quirk to allocate this LclVar as a 64-bit long
#endif
#ifdef DEBUG
unsigned char lvKeepType : 1; // Don't change the type of this variable
unsigned char lvNoLclFldStress : 1; // Can't apply local field stress on this one
#endif
unsigned char lvIsPtr : 1; // Might this be used in an address computation? (used by buffer overflow security
// checks)
unsigned char lvIsUnsafeBuffer : 1; // Does this contain an unsafe buffer requiring buffer overflow security checks?
unsigned char lvPromoted : 1; // True when this local is a promoted struct, a normed struct, or a "split" long on a
// 32-bit target. For implicit byref parameters, this gets hijacked between
// fgRetypeImplicitByRefArgs and fgMarkDemotedImplicitByRefArgs to indicate whether
// references to the arg are being rewritten as references to a promoted shadow local.
unsigned char lvIsStructField : 1; // Is this local var a field of a promoted struct local?
unsigned char lvOverlappingFields : 1; // True when we have a struct with possibly overlapping fields
unsigned char lvContainsHoles : 1; // True when we have a promoted struct that contains holes
unsigned char lvCustomLayout : 1; // True when this struct has "CustomLayout"
unsigned char lvIsMultiRegArg : 1; // true if this is a multireg LclVar struct used in an argument context
unsigned char lvIsMultiRegRet : 1; // true if this is a multireg LclVar struct assigned from a multireg call
#ifdef DEBUG
unsigned char lvHiddenBufferStructArg : 1; // True when this struct (or its field) are passed as hidden buffer
// pointer.
#endif
#ifdef FEATURE_HFA_FIELDS_PRESENT
CorInfoHFAElemType _lvHfaElemKind : 3; // What kind of an HFA this is (CORINFO_HFA_ELEM_NONE if it is not an HFA).
#endif // FEATURE_HFA_FIELDS_PRESENT
#ifdef DEBUG
// TODO-Cleanup: See the note on lvSize() - this flag is only in use by asserts that are checking for struct
// types, and is needed because of cases where TYP_STRUCT is bashed to an integral type.
// Consider cleaning this up so this workaround is not required.
unsigned char lvUnusedStruct : 1; // All references to this promoted struct are through its field locals.
// I.e. there is no longer any reference to the struct directly.
// In this case we can simply remove this struct local.
unsigned char lvUndoneStructPromotion : 1; // The struct promotion was undone and hence there should be no
// reference to the fields of this struct.
#endif
unsigned char lvLRACandidate : 1; // Tracked for linear scan register allocation purposes
#ifdef FEATURE_SIMD
// Note that both SIMD vector args and locals are marked as lvSIMDType = true, but the
// type of an arg node is TYP_BYREF and a local node is TYP_SIMD*.
unsigned char lvSIMDType : 1; // This is a SIMD struct
unsigned char lvUsedInSIMDIntrinsic : 1; // This tells lclvar is used for simd intrinsic
unsigned char lvSimdBaseJitType : 5; // Note: this only packs because CorInfoType has less than 32 entries
CorInfoType GetSimdBaseJitType() const
{
return (CorInfoType)lvSimdBaseJitType;
}
void SetSimdBaseJitType(CorInfoType simdBaseJitType)
{
assert(simdBaseJitType < (1 << 5));
lvSimdBaseJitType = (unsigned char)simdBaseJitType;
}
var_types GetSimdBaseType() const;
#endif // FEATURE_SIMD
unsigned char lvRegStruct : 1; // This is a reg-sized non-field-addressed struct.
unsigned char lvClassIsExact : 1; // lvClassHandle is the exact type
#ifdef DEBUG
unsigned char lvClassInfoUpdated : 1; // true if this var has updated class handle or exactness
#endif
unsigned char lvImplicitlyReferenced : 1; // true if there are non-IR references to this local (prolog, epilog, gc,
// eh)
unsigned char lvSuppressedZeroInit : 1; // local needs zero init if we transform tail call to loop
unsigned char lvHasExplicitInit : 1; // The local is explicitly initialized and doesn't need zero initialization in
// the prolog. If the local has gc pointers, there are no gc-safe points
// between the prolog and the explicit initialization.
unsigned char lvIsOSRLocal : 1; // Root method local in an OSR method. Any stack home will be on the Tier0 frame.
// Initial value will be defined by Tier0. Requires special handing in prolog.
union {
unsigned lvFieldLclStart; // The index of the local var representing the first field in the promoted struct
// local. For implicit byref parameters, this gets hijacked between
// fgRetypeImplicitByRefArgs and fgMarkDemotedImplicitByRefArgs to point to the
// struct local created to model the parameter's struct promotion, if any.
unsigned lvParentLcl; // The index of the local var representing the parent (i.e. the promoted struct local).
// Valid on promoted struct local fields.
};
unsigned char lvFieldCnt; // Number of fields in the promoted VarDsc.
unsigned char lvFldOffset;
unsigned char lvFldOrdinal;
#ifdef DEBUG
unsigned char lvSingleDefDisqualifyReason = 'H';
#endif
#if FEATURE_MULTIREG_ARGS
regNumber lvRegNumForSlot(unsigned slotNum)
{
if (slotNum == 0)
{
return (regNumber)_lvArgReg;
}
else if (slotNum == 1)
{
return GetOtherArgReg();
}
else
{
assert(false && "Invalid slotNum!");
}
unreached();
}
#endif // FEATURE_MULTIREG_ARGS
CorInfoHFAElemType GetLvHfaElemKind() const
{
#ifdef FEATURE_HFA_FIELDS_PRESENT
return _lvHfaElemKind;
#else
NOWAY_MSG("GetLvHfaElemKind");
return CORINFO_HFA_ELEM_NONE;
#endif // FEATURE_HFA_FIELDS_PRESENT
}
void SetLvHfaElemKind(CorInfoHFAElemType elemKind)
{
#ifdef FEATURE_HFA_FIELDS_PRESENT
_lvHfaElemKind = elemKind;
#else
NOWAY_MSG("SetLvHfaElemKind");
#endif // FEATURE_HFA_FIELDS_PRESENT
}
bool lvIsHfa() const
{
if (GlobalJitOptions::compFeatureHfa)
{
return IsHfa(GetLvHfaElemKind());
}
else
{
return false;
}
}
bool lvIsHfaRegArg() const
{
if (GlobalJitOptions::compFeatureHfa)
{
return lvIsRegArg && lvIsHfa();
}
else
{
return false;
}
}
//------------------------------------------------------------------------------
// lvHfaSlots: Get the number of slots used by an HFA local
//
// Return Value:
// On Arm64 - Returns 1-4 indicating the number of register slots used by the HFA
// On Arm32 - Returns the total number of single FP register slots used by the HFA, max is 8
//
unsigned lvHfaSlots() const
{
assert(lvIsHfa());
assert(varTypeIsStruct(lvType));
unsigned slots = 0;
#ifdef TARGET_ARM
slots = lvExactSize / sizeof(float);
assert(slots <= 8);
#elif defined(TARGET_ARM64)
switch (GetLvHfaElemKind())
{
case CORINFO_HFA_ELEM_NONE:
assert(!"lvHfaSlots called for non-HFA");
break;
case CORINFO_HFA_ELEM_FLOAT:
assert((lvExactSize % 4) == 0);
slots = lvExactSize >> 2;
break;
case CORINFO_HFA_ELEM_DOUBLE:
case CORINFO_HFA_ELEM_VECTOR64:
assert((lvExactSize % 8) == 0);
slots = lvExactSize >> 3;
break;
case CORINFO_HFA_ELEM_VECTOR128:
assert((lvExactSize % 16) == 0);
slots = lvExactSize >> 4;
break;
default:
unreached();
}
assert(slots <= 4);
#endif // TARGET_ARM64
return slots;
}
// lvIsMultiRegArgOrRet()
// returns true if this is a multireg LclVar struct used in an argument context
// or if this is a multireg LclVar struct assigned from a multireg call
bool lvIsMultiRegArgOrRet()
{
return lvIsMultiRegArg || lvIsMultiRegRet;
}
#if defined(DEBUG)
private:
DoNotEnregisterReason m_doNotEnregReason;
AddressExposedReason m_addrExposedReason;
public:
void SetDoNotEnregReason(DoNotEnregisterReason reason)
{
m_doNotEnregReason = reason;
}
DoNotEnregisterReason GetDoNotEnregReason() const
{
return m_doNotEnregReason;
}
AddressExposedReason GetAddrExposedReason() const
{
return m_addrExposedReason;
}
#endif // DEBUG
public:
void SetAddressExposed(bool value DEBUGARG(AddressExposedReason reason))
{
m_addrExposed = value;
INDEBUG(m_addrExposedReason = reason);
}
void CleanAddressExposed()
{
m_addrExposed = false;
}
bool IsAddressExposed() const
{
return m_addrExposed;
}
#ifdef DEBUG
void SetHiddenBufferStructArg(char value)
{
lvHiddenBufferStructArg = value;
}
bool IsHiddenBufferStructArg() const
{
return lvHiddenBufferStructArg;
}
#endif
private:
regNumberSmall _lvRegNum; // Used to store the register this variable is in (or, the low register of a
// register pair). It is set during codegen any time the
// variable is enregistered (lvRegister is only set
// to non-zero if the variable gets the same register assignment for its entire
// lifetime).
#if !defined(TARGET_64BIT)
regNumberSmall _lvOtherReg; // Used for "upper half" of long var.
#endif // !defined(TARGET_64BIT)
regNumberSmall _lvArgReg; // The (first) register in which this argument is passed.
#if FEATURE_MULTIREG_ARGS
regNumberSmall _lvOtherArgReg; // Used for the second part of the struct passed in a register.
// Note this is defined but not used by ARM32
#endif // FEATURE_MULTIREG_ARGS
regNumberSmall _lvArgInitReg; // the register into which the argument is moved at entry
public:
// The register number is stored in a small format (8 bits), but the getters return and the setters take
// a full-size (unsigned) format, to localize the casts here.
/////////////////////
regNumber GetRegNum() const
{
return (regNumber)_lvRegNum;
}
void SetRegNum(regNumber reg)
{
_lvRegNum = (regNumberSmall)reg;
assert(_lvRegNum == reg);
}
/////////////////////
#if defined(TARGET_64BIT)
regNumber GetOtherReg() const
{
assert(!"shouldn't get here"); // can't use "unreached();" because it's NORETURN, which causes C4072
// "unreachable code" warnings
return REG_NA;
}
void SetOtherReg(regNumber reg)
{
assert(!"shouldn't get here"); // can't use "unreached();" because it's NORETURN, which causes C4072
// "unreachable code" warnings
}
#else // !TARGET_64BIT
regNumber GetOtherReg() const
{
return (regNumber)_lvOtherReg;
}
void SetOtherReg(regNumber reg)
{
_lvOtherReg = (regNumberSmall)reg;
assert(_lvOtherReg == reg);
}
#endif // !TARGET_64BIT
/////////////////////
regNumber GetArgReg() const
{
return (regNumber)_lvArgReg;
}
void SetArgReg(regNumber reg)
{
_lvArgReg = (regNumberSmall)reg;
assert(_lvArgReg == reg);
}
#if FEATURE_MULTIREG_ARGS
regNumber GetOtherArgReg() const
{
return (regNumber)_lvOtherArgReg;
}
void SetOtherArgReg(regNumber reg)
{
_lvOtherArgReg = (regNumberSmall)reg;
assert(_lvOtherArgReg == reg);
}
#endif // FEATURE_MULTIREG_ARGS
#ifdef FEATURE_SIMD
// Is this is a SIMD struct?
bool lvIsSIMDType() const
{
return lvSIMDType;
}
// Is this is a SIMD struct which is used for SIMD intrinsic?
bool lvIsUsedInSIMDIntrinsic() const
{
return lvUsedInSIMDIntrinsic;
}
#else
// If feature_simd not enabled, return false
bool lvIsSIMDType() const
{
return false;
}
bool lvIsUsedInSIMDIntrinsic() const
{
return false;
}
#endif
/////////////////////
regNumber GetArgInitReg() const
{
return (regNumber)_lvArgInitReg;
}
void SetArgInitReg(regNumber reg)
{
_lvArgInitReg = (regNumberSmall)reg;
assert(_lvArgInitReg == reg);
}
/////////////////////
bool lvIsRegCandidate() const
{
return lvLRACandidate != 0;
}
bool lvIsInReg() const
{
return lvIsRegCandidate() && (GetRegNum() != REG_STK);
}
regMaskTP lvRegMask() const
{
regMaskTP regMask = RBM_NONE;
if (varTypeUsesFloatReg(TypeGet()))
{
if (GetRegNum() != REG_STK)
{
regMask = genRegMaskFloat(GetRegNum(), TypeGet());
}
}
else
{
if (GetRegNum() != REG_STK)
{
regMask = genRegMask(GetRegNum());
}
}
return regMask;
}
unsigned short lvVarIndex; // variable tracking index
private:
unsigned short m_lvRefCnt; // unweighted (real) reference count. For implicit by reference
// parameters, this gets hijacked from fgResetImplicitByRefRefCount
// through fgMarkDemotedImplicitByRefArgs, to provide a static
// appearance count (computed during address-exposed analysis)
// that fgMakeOutgoingStructArgCopy consults during global morph
// to determine if eliding its copy is legal.
weight_t m_lvRefCntWtd; // weighted reference count
public:
unsigned short lvRefCnt(RefCountState state = RCS_NORMAL) const;
void incLvRefCnt(unsigned short delta, RefCountState state = RCS_NORMAL);
void setLvRefCnt(unsigned short newValue, RefCountState state = RCS_NORMAL);
weight_t lvRefCntWtd(RefCountState state = RCS_NORMAL) const;
void incLvRefCntWtd(weight_t delta, RefCountState state = RCS_NORMAL);
void setLvRefCntWtd(weight_t newValue, RefCountState state = RCS_NORMAL);
private:
int lvStkOffs; // stack offset of home in bytes.
public:
int GetStackOffset() const
{
return lvStkOffs;
}
void SetStackOffset(int offset)
{
lvStkOffs = offset;
}
unsigned lvExactSize; // (exact) size of the type in bytes
// Is this a promoted struct?
// This method returns true only for structs (including SIMD structs), not for
// locals that are split on a 32-bit target.
// It is only necessary to use this:
// 1) if only structs are wanted, and
// 2) if Lowering has already been done.
// Otherwise lvPromoted is valid.
bool lvPromotedStruct()
{
#if !defined(TARGET_64BIT)
return (lvPromoted && !varTypeIsLong(lvType));
#else // defined(TARGET_64BIT)
return lvPromoted;
#endif // defined(TARGET_64BIT)
}
unsigned lvSize() const;
size_t lvArgStackSize() const;
unsigned lvSlotNum; // original slot # (if remapped)
typeInfo lvVerTypeInfo; // type info needed for verification
// class handle for the local or null if not known or not a class,
// for a struct handle use `GetStructHnd()`.
CORINFO_CLASS_HANDLE lvClassHnd;
// Get class handle for a struct local or implicitByRef struct local.
CORINFO_CLASS_HANDLE GetStructHnd() const
{
#ifdef FEATURE_SIMD
if (lvSIMDType && (m_layout == nullptr))
{
return NO_CLASS_HANDLE;
}
#endif
assert(m_layout != nullptr);
#if defined(TARGET_AMD64) || defined(TARGET_ARM64)
assert(varTypeIsStruct(TypeGet()) || (lvIsImplicitByRef && (TypeGet() == TYP_BYREF)));
#else
assert(varTypeIsStruct(TypeGet()));
#endif
CORINFO_CLASS_HANDLE structHnd = m_layout->GetClassHandle();
assert(structHnd != NO_CLASS_HANDLE);
return structHnd;
}
CORINFO_FIELD_HANDLE lvFieldHnd; // field handle for promoted struct fields
private:
ClassLayout* m_layout; // layout info for structs
public:
BlockSet lvRefBlks; // Set of blocks that contain refs
Statement* lvDefStmt; // Pointer to the statement with the single definition
void lvaDisqualifyVar(); // Call to disqualify a local variable from use in optAddCopies
var_types TypeGet() const
{
return (var_types)lvType;
}
bool lvStackAligned() const
{
assert(lvIsStructField);
return ((lvFldOffset % TARGET_POINTER_SIZE) == 0);
}
bool lvNormalizeOnLoad() const
{
return varTypeIsSmall(TypeGet()) &&
// lvIsStructField is treated the same as the aliased local, see fgDoNormalizeOnStore.
(lvIsParam || m_addrExposed || lvIsStructField);
}
bool lvNormalizeOnStore() const
{
return varTypeIsSmall(TypeGet()) &&
// lvIsStructField is treated the same as the aliased local, see fgDoNormalizeOnStore.
!(lvIsParam || m_addrExposed || lvIsStructField);
}
void incRefCnts(weight_t weight, Compiler* pComp, RefCountState state = RCS_NORMAL, bool propagate = true);
var_types GetHfaType() const
{
if (GlobalJitOptions::compFeatureHfa)
{
assert(lvIsHfa());
return HfaTypeFromElemKind(GetLvHfaElemKind());
}
else
{
return TYP_UNDEF;
}
}
void SetHfaType(var_types type)
{
if (GlobalJitOptions::compFeatureHfa)
{
CorInfoHFAElemType elemKind = HfaElemKindFromType(type);
SetLvHfaElemKind(elemKind);
// Ensure we've allocated enough bits.
assert(GetLvHfaElemKind() == elemKind);
}
}
// Returns true if this variable contains GC pointers (including being a GC pointer itself).
bool HasGCPtr() const
{
return varTypeIsGC(lvType) || ((lvType == TYP_STRUCT) && m_layout->HasGCPtr());
}
// Returns the layout of a struct variable.
ClassLayout* GetLayout() const
{
assert(varTypeIsStruct(lvType));
return m_layout;
}
// Sets the layout of a struct variable.
void SetLayout(ClassLayout* layout)
{
assert(varTypeIsStruct(lvType));
assert((m_layout == nullptr) || ClassLayout::AreCompatible(m_layout, layout));
m_layout = layout;
}
SsaDefArray<LclSsaVarDsc> lvPerSsaData;
// Returns the address of the per-Ssa data for the given ssaNum (which is required
// not to be the SsaConfig::RESERVED_SSA_NUM, which indicates that the variable is
// not an SSA variable).
LclSsaVarDsc* GetPerSsaData(unsigned ssaNum)
{
return lvPerSsaData.GetSsaDef(ssaNum);
}
// Returns the SSA number for "ssaDef". Requires "ssaDef" to be a valid definition
// of this variable.
unsigned GetSsaNumForSsaDef(LclSsaVarDsc* ssaDef)
{
return lvPerSsaData.GetSsaNum(ssaDef);
}
var_types GetRegisterType(const GenTreeLclVarCommon* tree) const;
var_types GetRegisterType() const;
var_types GetActualRegisterType() const;
bool IsEnregisterableType() const
{
return GetRegisterType() != TYP_UNDEF;
}
bool IsEnregisterableLcl() const
{
if (lvDoNotEnregister)
{
return false;
}
return IsEnregisterableType();
}
//-----------------------------------------------------------------------------
// IsAlwaysAliveInMemory: Determines if this variable's value is always
// up-to-date on stack. This is possible if this is an EH-var or
// we decided to spill after single-def.
//
bool IsAlwaysAliveInMemory() const
{
return lvLiveInOutOfHndlr || lvSpillAtSingleDef;
}
bool CanBeReplacedWithItsField(Compiler* comp) const;
#ifdef DEBUG
public:
const char* lvReason;
void PrintVarReg() const
{
printf("%s", getRegName(GetRegNum()));
}
#endif // DEBUG
}; // class LclVarDsc
enum class SymbolicIntegerValue : int32_t
{
LongMin,
IntMin,
ShortMin,
ByteMin,
Zero,
One,
ByteMax,
UByteMax,
ShortMax,
UShortMax,
IntMax,
UIntMax,
LongMax,
};
inline constexpr bool operator>(SymbolicIntegerValue left, SymbolicIntegerValue right)
{
return static_cast<int32_t>(left) > static_cast<int32_t>(right);
}
inline constexpr bool operator>=(SymbolicIntegerValue left, SymbolicIntegerValue right)
{
return static_cast<int32_t>(left) >= static_cast<int32_t>(right);
}
inline constexpr bool operator<(SymbolicIntegerValue left, SymbolicIntegerValue right)
{
return static_cast<int32_t>(left) < static_cast<int32_t>(right);
}
inline constexpr bool operator<=(SymbolicIntegerValue left, SymbolicIntegerValue right)
{
return static_cast<int32_t>(left) <= static_cast<int32_t>(right);
}
// Represents an integral range useful for reasoning about integral casts.
// It uses a symbolic representation for lower and upper bounds so
// that it can efficiently handle integers of all sizes on all hosts.
//
// Note that the ranges represented by this class are **always** in the
// "signed" domain. This is so that if we know the range a node produces, it
// can be trivially used to determine if a cast above the node does or does not
// overflow, which requires that the interpretation of integers be the same both
// for the "input" and "output". We choose signed interpretation here because it
// produces nice continuous ranges and because IR uses sign-extension for constants.
//
// Some examples of how ranges are computed for casts:
// 1. CAST_OVF(ubyte <- uint): does not overflow for [0..UBYTE_MAX], produces the
// same range - all casts that do not change the representation, i. e. have the same
// "actual" input and output type, have the same "input" and "output" range.
// 2. CAST_OVF(ulong <- uint): never oveflows => the "input" range is [INT_MIN..INT_MAX]
// (aka all possible 32 bit integers). Produces [0..UINT_MAX] (aka all possible 32
// bit integers zero-extended to 64 bits).
// 3. CAST_OVF(int <- uint): overflows for inputs larger than INT_MAX <=> less than 0
// when interpreting as signed => the "input" range is [0..INT_MAX], the same range
// being the produced one as the node does not change the width of the integer.
//
class IntegralRange
{
private:
SymbolicIntegerValue m_lowerBound;
SymbolicIntegerValue m_upperBound;
public:
IntegralRange() = default;
IntegralRange(SymbolicIntegerValue lowerBound, SymbolicIntegerValue upperBound)
: m_lowerBound(lowerBound), m_upperBound(upperBound)
{
assert(lowerBound <= upperBound);
}
bool Contains(int64_t value) const;
bool Contains(IntegralRange other) const
{
return (m_lowerBound <= other.m_lowerBound) && (other.m_upperBound <= m_upperBound);
}
bool IsPositive()
{
return m_lowerBound >= SymbolicIntegerValue::Zero;
}
bool Equals(IntegralRange other) const
{
return (m_lowerBound == other.m_lowerBound) && (m_upperBound == other.m_upperBound);
}
static int64_t SymbolicToRealValue(SymbolicIntegerValue value);
static SymbolicIntegerValue LowerBoundForType(var_types type);
static SymbolicIntegerValue UpperBoundForType(var_types type);
static IntegralRange ForType(var_types type)
{
return {LowerBoundForType(type), UpperBoundForType(type)};
}
static IntegralRange ForNode(GenTree* node, Compiler* compiler);
static IntegralRange ForCastInput(GenTreeCast* cast);
static IntegralRange ForCastOutput(GenTreeCast* cast);
#ifdef DEBUG
static void Print(IntegralRange range);
#endif // DEBUG
};
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX TempsInfo XX
XX XX
XX The temporary lclVars allocated by the compiler for code generation XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
/*****************************************************************************
*
* The following keeps track of temporaries allocated in the stack frame
* during code-generation (after register allocation). These spill-temps are
* only used if we run out of registers while evaluating a tree.
*
* These are different from the more common temps allocated by lvaGrabTemp().
*/
class TempDsc
{
public:
TempDsc* tdNext;
private:
int tdOffs;
#ifdef DEBUG
static const int BAD_TEMP_OFFSET = 0xDDDDDDDD; // used as a sentinel "bad value" for tdOffs in DEBUG
#endif // DEBUG
int tdNum;
BYTE tdSize;
var_types tdType;
public:
TempDsc(int _tdNum, unsigned _tdSize, var_types _tdType) : tdNum(_tdNum), tdSize((BYTE)_tdSize), tdType(_tdType)
{
#ifdef DEBUG
// temps must have a negative number (so they have a different number from all local variables)
assert(tdNum < 0);
tdOffs = BAD_TEMP_OFFSET;
#endif // DEBUG
if (tdNum != _tdNum)
{
IMPL_LIMITATION("too many spill temps");
}
}
#ifdef DEBUG
bool tdLegalOffset() const
{
return tdOffs != BAD_TEMP_OFFSET;
}
#endif // DEBUG
int tdTempOffs() const
{
assert(tdLegalOffset());
return tdOffs;
}
void tdSetTempOffs(int offs)
{
tdOffs = offs;
assert(tdLegalOffset());
}
void tdAdjustTempOffs(int offs)
{
tdOffs += offs;
assert(tdLegalOffset());
}
int tdTempNum() const
{
assert(tdNum < 0);
return tdNum;
}
unsigned tdTempSize() const
{
return tdSize;
}
var_types tdTempType() const
{
return tdType;
}
};
// interface to hide linearscan implementation from rest of compiler
class LinearScanInterface
{
public:
virtual void doLinearScan() = 0;
virtual void recordVarLocationsAtStartOfBB(BasicBlock* bb) = 0;
virtual bool willEnregisterLocalVars() const = 0;
#if TRACK_LSRA_STATS
virtual void dumpLsraStatsCsv(FILE* file) = 0;
virtual void dumpLsraStatsSummary(FILE* file) = 0;
#endif // TRACK_LSRA_STATS
};
LinearScanInterface* getLinearScanAllocator(Compiler* comp);
// This enumeration names the phases into which we divide compilation. The phases should completely
// partition a compilation.
enum Phases
{
#define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) enum_nm,
#include "compphases.h"
PHASE_NUMBER_OF
};
extern const char* PhaseNames[];
extern const char* PhaseEnums[];
extern const LPCWSTR PhaseShortNames[];
// Specify which checks should be run after each phase
//
enum class PhaseChecks
{
CHECK_NONE,
CHECK_ALL
};
// Specify compiler data that a phase might modify
enum class PhaseStatus : unsigned
{
MODIFIED_NOTHING,
MODIFIED_EVERYTHING
};
// The following enum provides a simple 1:1 mapping to CLR API's
enum API_ICorJitInfo_Names
{
#define DEF_CLR_API(name) API_##name,
#include "ICorJitInfo_API_names.h"
API_COUNT
};
//---------------------------------------------------------------
// Compilation time.
//
// A "CompTimeInfo" is a structure for tracking the compilation time of one or more methods.
// We divide a compilation into a sequence of contiguous phases, and track the total (per-thread) cycles
// of the compilation, as well as the cycles for each phase. We also track the number of bytecodes.
// If there is a failure in reading a timer at any point, the "CompTimeInfo" becomes invalid, as indicated
// by "m_timerFailure" being true.
// If FEATURE_JIT_METHOD_PERF is not set, we define a minimal form of this, enough to let other code compile.
struct CompTimeInfo
{
#ifdef FEATURE_JIT_METHOD_PERF
// The string names of the phases.
static const char* PhaseNames[];
static bool PhaseHasChildren[];
static int PhaseParent[];
static bool PhaseReportsIRSize[];
unsigned m_byteCodeBytes;
unsigned __int64 m_totalCycles;
unsigned __int64 m_invokesByPhase[PHASE_NUMBER_OF];
unsigned __int64 m_cyclesByPhase[PHASE_NUMBER_OF];
#if MEASURE_CLRAPI_CALLS
unsigned __int64 m_CLRinvokesByPhase[PHASE_NUMBER_OF];
unsigned __int64 m_CLRcyclesByPhase[PHASE_NUMBER_OF];
#endif
unsigned m_nodeCountAfterPhase[PHASE_NUMBER_OF];
// For better documentation, we call EndPhase on
// non-leaf phases. We should also call EndPhase on the
// last leaf subphase; obviously, the elapsed cycles between the EndPhase
// for the last leaf subphase and the EndPhase for an ancestor should be very small.
// We add all such "redundant end phase" intervals to this variable below; we print
// it out in a report, so we can verify that it is, indeed, very small. If it ever
// isn't, this means that we're doing something significant between the end of the last
// declared subphase and the end of its parent.
unsigned __int64 m_parentPhaseEndSlop;
bool m_timerFailure;
#if MEASURE_CLRAPI_CALLS
// The following measures the time spent inside each individual CLR API call.
unsigned m_allClrAPIcalls;
unsigned m_perClrAPIcalls[API_ICorJitInfo_Names::API_COUNT];
unsigned __int64 m_allClrAPIcycles;
unsigned __int64 m_perClrAPIcycles[API_ICorJitInfo_Names::API_COUNT];
unsigned __int32 m_maxClrAPIcycles[API_ICorJitInfo_Names::API_COUNT];
#endif // MEASURE_CLRAPI_CALLS
CompTimeInfo(unsigned byteCodeBytes);
#endif
};
#ifdef FEATURE_JIT_METHOD_PERF
#if MEASURE_CLRAPI_CALLS
struct WrapICorJitInfo;
#endif
// This class summarizes the JIT time information over the course of a run: the number of methods compiled,
// and the total and maximum timings. (These are instances of the "CompTimeInfo" type described above).
// The operation of adding a single method's timing to the summary may be performed concurrently by several
// threads, so it is protected by a lock.
// This class is intended to be used as a singleton type, with only a single instance.
class CompTimeSummaryInfo
{
// This lock protects the fields of all CompTimeSummaryInfo(s) (of which we expect there to be one).
static CritSecObject s_compTimeSummaryLock;
int m_numMethods;
int m_totMethods;
CompTimeInfo m_total;
CompTimeInfo m_maximum;
int m_numFilteredMethods;
CompTimeInfo m_filtered;
// This can use what ever data you want to determine if the value to be added
// belongs in the filtered section (it's always included in the unfiltered section)
bool IncludedInFilteredData(CompTimeInfo& info);
public:
// This is the unique CompTimeSummaryInfo object for this instance of the runtime.
static CompTimeSummaryInfo s_compTimeSummary;
CompTimeSummaryInfo()
: m_numMethods(0), m_totMethods(0), m_total(0), m_maximum(0), m_numFilteredMethods(0), m_filtered(0)
{
}
// Assumes that "info" is a completed CompTimeInfo for a compilation; adds it to the summary.
// This is thread safe.
void AddInfo(CompTimeInfo& info, bool includePhases);
// Print the summary information to "f".
// This is not thread-safe; assumed to be called by only one thread.
void Print(FILE* f);
};
// A JitTimer encapsulates a CompTimeInfo for a single compilation. It also tracks the start of compilation,
// and when the current phase started. This is intended to be part of a Compilation object.
//
class JitTimer
{
unsigned __int64 m_start; // Start of the compilation.
unsigned __int64 m_curPhaseStart; // Start of the current phase.
#if MEASURE_CLRAPI_CALLS
unsigned __int64 m_CLRcallStart; // Start of the current CLR API call (if any).
unsigned __int64 m_CLRcallInvokes; // CLR API invokes under current outer so far
unsigned __int64 m_CLRcallCycles; // CLR API cycles under current outer so far.
int m_CLRcallAPInum; // The enum/index of the current CLR API call (or -1).
static double s_cyclesPerSec; // Cached for speedier measurements
#endif
#ifdef DEBUG
Phases m_lastPhase; // The last phase that was completed (or (Phases)-1 to start).
#endif
CompTimeInfo m_info; // The CompTimeInfo for this compilation.
static CritSecObject s_csvLock; // Lock to protect the time log file.
static FILE* s_csvFile; // The time log file handle.
void PrintCsvMethodStats(Compiler* comp);
private:
void* operator new(size_t);
void* operator new[](size_t);
void operator delete(void*);
void operator delete[](void*);
public:
// Initialized the timer instance
JitTimer(unsigned byteCodeSize);
static JitTimer* Create(Compiler* comp, unsigned byteCodeSize)
{
return ::new (comp, CMK_Unknown) JitTimer(byteCodeSize);
}
static void PrintCsvHeader();
// Ends the current phase (argument is for a redundant check).
void EndPhase(Compiler* compiler, Phases phase);
#if MEASURE_CLRAPI_CALLS
// Start and end a timed CLR API call.
void CLRApiCallEnter(unsigned apix);
void CLRApiCallLeave(unsigned apix);
#endif // MEASURE_CLRAPI_CALLS
// Completes the timing of the current method, which is assumed to have "byteCodeBytes" bytes of bytecode,
// and adds it to "sum".
void Terminate(Compiler* comp, CompTimeSummaryInfo& sum, bool includePhases);
// Attempts to query the cycle counter of the current thread. If successful, returns "true" and sets
// *cycles to the cycle counter value. Otherwise, returns false and sets the "m_timerFailure" flag of
// "m_info" to true.
bool GetThreadCycles(unsigned __int64* cycles)
{
bool res = CycleTimer::GetThreadCyclesS(cycles);
if (!res)
{
m_info.m_timerFailure = true;
}
return res;
}
static void Shutdown();
};
#endif // FEATURE_JIT_METHOD_PERF
//------------------- Function/Funclet info -------------------------------
enum FuncKind : BYTE
{
FUNC_ROOT, // The main/root function (always id==0)
FUNC_HANDLER, // a funclet associated with an EH handler (finally, fault, catch, filter handler)
FUNC_FILTER, // a funclet associated with an EH filter
FUNC_COUNT
};
class emitLocation;
struct FuncInfoDsc
{
FuncKind funKind;
BYTE funFlags; // Currently unused, just here for padding
unsigned short funEHIndex; // index, into the ebd table, of innermost EH clause corresponding to this
// funclet. It is only valid if funKind field indicates this is a
// EH-related funclet: FUNC_HANDLER or FUNC_FILTER
#if defined(TARGET_AMD64)
// TODO-AMD64-Throughput: make the AMD64 info more like the ARM info to avoid having this large static array.
emitLocation* startLoc;
emitLocation* endLoc;
emitLocation* coldStartLoc; // locations for the cold section, if there is one.
emitLocation* coldEndLoc;
UNWIND_INFO unwindHeader;
// Maximum of 255 UNWIND_CODE 'nodes' and then the unwind header. If there are an odd
// number of codes, the VM or Zapper will 4-byte align the whole thing.
BYTE unwindCodes[offsetof(UNWIND_INFO, UnwindCode) + (0xFF * sizeof(UNWIND_CODE))];
unsigned unwindCodeSlot;
#elif defined(TARGET_X86)
emitLocation* startLoc;
emitLocation* endLoc;
emitLocation* coldStartLoc; // locations for the cold section, if there is one.
emitLocation* coldEndLoc;
#elif defined(TARGET_ARMARCH)
UnwindInfo uwi; // Unwind information for this function/funclet's hot section
UnwindInfo* uwiCold; // Unwind information for this function/funclet's cold section
// Note: we only have a pointer here instead of the actual object,
// to save memory in the JIT case (compared to the NGEN case),
// where we don't have any cold section.
// Note 2: we currently don't support hot/cold splitting in functions
// with EH, so uwiCold will be NULL for all funclets.
emitLocation* startLoc;
emitLocation* endLoc;
emitLocation* coldStartLoc; // locations for the cold section, if there is one.
emitLocation* coldEndLoc;
#endif // TARGET_ARMARCH
#if defined(FEATURE_CFI_SUPPORT)
jitstd::vector<CFI_CODE>* cfiCodes;
#endif // FEATURE_CFI_SUPPORT
// Eventually we may want to move rsModifiedRegsMask, lvaOutgoingArgSize, and anything else
// that isn't shared between the main function body and funclets.
};
struct fgArgTabEntry
{
GenTreeCall::Use* use; // Points to the argument's GenTreeCall::Use in gtCallArgs or gtCallThisArg.
GenTreeCall::Use* lateUse; // Points to the argument's GenTreeCall::Use in gtCallLateArgs, if any.
// Get the node that coresponds to this argument entry.
// This is the "real" node and not a placeholder or setup node.
GenTree* GetNode() const
{
return lateUse == nullptr ? use->GetNode() : lateUse->GetNode();
}
unsigned argNum; // The original argument number, also specifies the required argument evaluation order from the IL
private:
regNumberSmall regNums[MAX_ARG_REG_COUNT]; // The registers to use when passing this argument, set to REG_STK for
// arguments passed on the stack
public:
unsigned numRegs; // Count of number of registers that this argument uses.
// Note that on ARM, if we have a double hfa, this reflects the number
// of DOUBLE registers.
#if defined(UNIX_AMD64_ABI)
// Unix amd64 will split floating point types and integer types in structs
// between floating point and general purpose registers. Keep track of that
// information so we do not need to recompute it later.
unsigned structIntRegs;
unsigned structFloatRegs;
#endif // UNIX_AMD64_ABI
#if defined(DEBUG_ARG_SLOTS)
// These fields were used to calculate stack size in stack slots for arguments
// but now they are replaced by precise `m_byteOffset/m_byteSize` because of
// arm64 apple abi requirements.
// A slot is a pointer sized region in the OutArg area.
unsigned slotNum; // When an argument is passed in the OutArg area this is the slot number in the OutArg area
unsigned numSlots; // Count of number of slots that this argument uses
#endif // DEBUG_ARG_SLOTS
// Return number of stack slots that this argument is taking.
// TODO-Cleanup: this function does not align with arm64 apple model,
// delete it. In most cases we just want to know if we it is using stack or not
// but in some cases we are checking if it is a multireg arg, like:
// `numRegs + GetStackSlotsNumber() > 1` that is harder to replace.
//
unsigned GetStackSlotsNumber() const
{
return roundUp(GetStackByteSize(), TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE;
}
private:
unsigned _lateArgInx; // index into gtCallLateArgs list; UINT_MAX if this is not a late arg.
public:
unsigned tmpNum; // the LclVar number if we had to force evaluation of this arg
var_types argType; // The type used to pass this argument. This is generally the original argument type, but when a
// struct is passed as a scalar type, this is that type.
// Note that if a struct is passed by reference, this will still be the struct type.
bool needTmp : 1; // True when we force this argument's evaluation into a temp LclVar
bool needPlace : 1; // True when we must replace this argument with a placeholder node
bool isTmp : 1; // True when we setup a temp LclVar for this argument due to size issues with the struct
bool processed : 1; // True when we have decided the evaluation order for this argument in the gtCallLateArgs
bool isBackFilled : 1; // True when the argument fills a register slot skipped due to alignment requirements of
// previous arguments.
NonStandardArgKind nonStandardArgKind : 4; // The non-standard arg kind. Non-standard args are args that are forced
// to be in certain registers or on the stack, regardless of where they
// appear in the arg list.
bool isStruct : 1; // True if this is a struct arg
bool _isVararg : 1; // True if the argument is in a vararg context.
bool passedByRef : 1; // True iff the argument is passed by reference.
#if FEATURE_ARG_SPLIT
bool _isSplit : 1; // True when this argument is split between the registers and OutArg area
#endif // FEATURE_ARG_SPLIT
#ifdef FEATURE_HFA_FIELDS_PRESENT
CorInfoHFAElemType _hfaElemKind : 3; // What kind of an HFA this is (CORINFO_HFA_ELEM_NONE if it is not an HFA).
#endif
CorInfoHFAElemType GetHfaElemKind() const
{
#ifdef FEATURE_HFA_FIELDS_PRESENT
return _hfaElemKind;
#else
NOWAY_MSG("GetHfaElemKind");
return CORINFO_HFA_ELEM_NONE;
#endif
}
void SetHfaElemKind(CorInfoHFAElemType elemKind)
{
#ifdef FEATURE_HFA_FIELDS_PRESENT
_hfaElemKind = elemKind;
#else
NOWAY_MSG("SetHfaElemKind");
#endif
}
bool isNonStandard() const
{
return nonStandardArgKind != NonStandardArgKind::None;
}
// Returns true if the IR node for this non-standarg arg is added by fgInitArgInfo.
// In this case, it must be removed by GenTreeCall::ResetArgInfo.
bool isNonStandardArgAddedLate() const
{
switch (static_cast<NonStandardArgKind>(nonStandardArgKind))
{
case NonStandardArgKind::None:
case NonStandardArgKind::PInvokeFrame:
case NonStandardArgKind::ShiftLow:
case NonStandardArgKind::ShiftHigh:
case NonStandardArgKind::FixedRetBuffer:
case NonStandardArgKind::ValidateIndirectCallTarget:
return false;
case NonStandardArgKind::WrapperDelegateCell:
case NonStandardArgKind::VirtualStubCell:
case NonStandardArgKind::PInvokeCookie:
case NonStandardArgKind::PInvokeTarget:
case NonStandardArgKind::R2RIndirectionCell:
return true;
default:
unreached();
}
}
bool isLateArg() const
{
bool isLate = (_lateArgInx != UINT_MAX);
return isLate;
}
unsigned GetLateArgInx() const
{
assert(isLateArg());
return _lateArgInx;
}
void SetLateArgInx(unsigned inx)
{
_lateArgInx = inx;
}
regNumber GetRegNum() const
{
return (regNumber)regNums[0];
}
regNumber GetOtherRegNum() const
{
return (regNumber)regNums[1];
}
#if defined(UNIX_AMD64_ABI)
SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc;
#endif
void setRegNum(unsigned int i, regNumber regNum)
{
assert(i < MAX_ARG_REG_COUNT);
regNums[i] = (regNumberSmall)regNum;
}
regNumber GetRegNum(unsigned int i)
{
assert(i < MAX_ARG_REG_COUNT);
return (regNumber)regNums[i];
}
bool IsSplit() const
{
#if FEATURE_ARG_SPLIT
return compFeatureArgSplit() && _isSplit;
#else // FEATURE_ARG_SPLIT
return false;
#endif
}
void SetSplit(bool value)
{
#if FEATURE_ARG_SPLIT
_isSplit = value;
#endif
}
bool IsVararg() const
{
return compFeatureVarArg() && _isVararg;
}
void SetIsVararg(bool value)
{
if (compFeatureVarArg())
{
_isVararg = value;
}
}
bool IsHfaArg() const
{
if (GlobalJitOptions::compFeatureHfa)
{
return IsHfa(GetHfaElemKind());
}
else
{
return false;
}
}
bool IsHfaRegArg() const
{
if (GlobalJitOptions::compFeatureHfa)
{
return IsHfa(GetHfaElemKind()) && isPassedInRegisters();
}
else
{
return false;
}
}
unsigned intRegCount() const
{
#if defined(UNIX_AMD64_ABI)
if (this->isStruct)
{
return this->structIntRegs;
}
#endif // defined(UNIX_AMD64_ABI)
if (!this->isPassedInFloatRegisters())
{
return this->numRegs;
}
return 0;
}
unsigned floatRegCount() const
{
#if defined(UNIX_AMD64_ABI)
if (this->isStruct)
{
return this->structFloatRegs;
}
#endif // defined(UNIX_AMD64_ABI)
if (this->isPassedInFloatRegisters())
{
return this->numRegs;
}
return 0;
}
// Get the number of bytes that this argument is occupying on the stack,
// including padding up to the target pointer size for platforms
// where a stack argument can't take less.
unsigned GetStackByteSize() const
{
if (!IsSplit() && numRegs > 0)
{
return 0;
}
assert(!IsHfaArg() || !IsSplit());
assert(GetByteSize() > TARGET_POINTER_SIZE * numRegs);
const unsigned stackByteSize = GetByteSize() - TARGET_POINTER_SIZE * numRegs;
return stackByteSize;
}
var_types GetHfaType() const
{
if (GlobalJitOptions::compFeatureHfa)
{
return HfaTypeFromElemKind(GetHfaElemKind());
}
else
{
return TYP_UNDEF;
}
}
void SetHfaType(var_types type, unsigned hfaSlots)
{
if (GlobalJitOptions::compFeatureHfa)
{
if (type != TYP_UNDEF)
{
// We must already have set the passing mode.
assert(numRegs != 0 || GetStackByteSize() != 0);
// We originally set numRegs according to the size of the struct, but if the size of the
// hfaType is not the same as the pointer size, we need to correct it.
// Note that hfaSlots is the number of registers we will use. For ARM, that is twice
// the number of "double registers".
unsigned numHfaRegs = hfaSlots;
#ifdef TARGET_ARM
if (type == TYP_DOUBLE)
{
// Must be an even number of registers.
assert((numRegs & 1) == 0);
numHfaRegs = hfaSlots / 2;
}
#endif // TARGET_ARM
if (!IsHfaArg())
{
// We haven't previously set this; do so now.
CorInfoHFAElemType elemKind = HfaElemKindFromType(type);
SetHfaElemKind(elemKind);
// Ensure we've allocated enough bits.
assert(GetHfaElemKind() == elemKind);
if (isPassedInRegisters())
{
numRegs = numHfaRegs;
}
}
else
{
// We've already set this; ensure that it's consistent.
if (isPassedInRegisters())
{
assert(numRegs == numHfaRegs);
}
assert(type == HfaTypeFromElemKind(GetHfaElemKind()));
}
}
}
}
#ifdef TARGET_ARM
void SetIsBackFilled(bool backFilled)
{
isBackFilled = backFilled;
}
bool IsBackFilled() const
{
return isBackFilled;
}
#else // !TARGET_ARM
void SetIsBackFilled(bool backFilled)
{
}
bool IsBackFilled() const
{
return false;
}
#endif // !TARGET_ARM
bool isPassedInRegisters() const
{
return !IsSplit() && (numRegs != 0);
}
bool isPassedInFloatRegisters() const
{
#ifdef TARGET_X86
return false;
#else
return isValidFloatArgReg(GetRegNum());
#endif
}
// Can we replace the struct type of this node with a primitive type for argument passing?
bool TryPassAsPrimitive() const
{
return !IsSplit() && ((numRegs == 1) || (m_byteSize <= TARGET_POINTER_SIZE));
}
#if defined(DEBUG_ARG_SLOTS)
// Returns the number of "slots" used, where for this purpose a
// register counts as a slot.
unsigned getSlotCount() const
{
if (isBackFilled)
{
assert(isPassedInRegisters());
assert(numRegs == 1);
}
else if (GetRegNum() == REG_STK)
{
assert(!isPassedInRegisters());
assert(numRegs == 0);
}
else
{
assert(numRegs > 0);
}
return numSlots + numRegs;
}
#endif
#if defined(DEBUG_ARG_SLOTS)
// Returns the size as a multiple of pointer-size.
// For targets without HFAs, this is the same as getSlotCount().
unsigned getSize() const
{
unsigned size = getSlotCount();
if (GlobalJitOptions::compFeatureHfa)
{
if (IsHfaRegArg())
{
#ifdef TARGET_ARM
// We counted the number of regs, but if they are DOUBLE hfa regs we have to double the size.
if (GetHfaType() == TYP_DOUBLE)
{
assert(!IsSplit());
size <<= 1;
}
#elif defined(TARGET_ARM64)
// We counted the number of regs, but if they are FLOAT hfa regs we have to halve the size,
// or if they are SIMD16 vector hfa regs we have to double the size.
if (GetHfaType() == TYP_FLOAT)
{
// Round up in case of odd HFA count.
size = (size + 1) >> 1;
}
#ifdef FEATURE_SIMD
else if (GetHfaType() == TYP_SIMD16)
{
size <<= 1;
}
#endif // FEATURE_SIMD
#endif // TARGET_ARM64
}
}
return size;
}
#endif // DEBUG_ARG_SLOTS
private:
unsigned m_byteOffset;
// byte size that this argument takes including the padding after.
// For example, 1-byte arg on x64 with 8-byte alignment
// will have `m_byteSize == 8`, the same arg on apple arm64 will have `m_byteSize == 1`.
unsigned m_byteSize;
unsigned m_byteAlignment; // usually 4 or 8 bytes (slots/registers).
public:
void SetByteOffset(unsigned byteOffset)
{
DEBUG_ARG_SLOTS_ASSERT(byteOffset / TARGET_POINTER_SIZE == slotNum);
m_byteOffset = byteOffset;
}
unsigned GetByteOffset() const
{
DEBUG_ARG_SLOTS_ASSERT(m_byteOffset / TARGET_POINTER_SIZE == slotNum);
return m_byteOffset;
}
void SetByteSize(unsigned byteSize, bool isStruct, bool isFloatHfa)
{
unsigned roundedByteSize;
if (compMacOsArm64Abi())
{
// Only struct types need extension or rounding to pointer size, but HFA<float> does not.
if (isStruct && !isFloatHfa)
{
roundedByteSize = roundUp(byteSize, TARGET_POINTER_SIZE);
}
else
{
roundedByteSize = byteSize;
}
}
else
{
roundedByteSize = roundUp(byteSize, TARGET_POINTER_SIZE);
}
#if !defined(TARGET_ARM)
// Arm32 could have a struct with 8 byte alignment
// which rounded size % 8 is not 0.
assert(m_byteAlignment != 0);
assert(roundedByteSize % m_byteAlignment == 0);
#endif // TARGET_ARM
#if defined(DEBUG_ARG_SLOTS)
if (!compMacOsArm64Abi() && !isStruct)
{
assert(roundedByteSize == getSlotCount() * TARGET_POINTER_SIZE);
}
#endif
m_byteSize = roundedByteSize;
}
unsigned GetByteSize() const
{
return m_byteSize;
}
void SetByteAlignment(unsigned byteAlignment)
{
m_byteAlignment = byteAlignment;
}
unsigned GetByteAlignment() const
{
return m_byteAlignment;
}
// Set the register numbers for a multireg argument.
// There's nothing to do on x64/Ux because the structDesc has already been used to set the
// register numbers.
void SetMultiRegNums()
{
#if FEATURE_MULTIREG_ARGS && !defined(UNIX_AMD64_ABI)
if (numRegs == 1)
{
return;
}
regNumber argReg = GetRegNum(0);
#ifdef TARGET_ARM
unsigned int regSize = (GetHfaType() == TYP_DOUBLE) ? 2 : 1;
#else
unsigned int regSize = 1;
#endif
if (numRegs > MAX_ARG_REG_COUNT)
NO_WAY("Multireg argument exceeds the maximum length");
for (unsigned int regIndex = 1; regIndex < numRegs; regIndex++)
{
argReg = (regNumber)(argReg + regSize);
setRegNum(regIndex, argReg);
}
#endif // FEATURE_MULTIREG_ARGS && !defined(UNIX_AMD64_ABI)
}
#ifdef DEBUG
// Check that the value of 'isStruct' is consistent.
// A struct arg must be one of the following:
// - A node of struct type,
// - A GT_FIELD_LIST, or
// - A node of a scalar type, passed in a single register or slot
// (or two slots in the case of a struct pass on the stack as TYP_DOUBLE).
//
void checkIsStruct() const
{
GenTree* node = GetNode();
if (isStruct)
{
if (!varTypeIsStruct(node) && !node->OperIs(GT_FIELD_LIST))
{
// This is the case where we are passing a struct as a primitive type.
// On most targets, this is always a single register or slot.
// However, on ARM this could be two slots if it is TYP_DOUBLE.
bool isPassedAsPrimitiveType =
((numRegs == 1) || ((numRegs == 0) && (GetByteSize() <= TARGET_POINTER_SIZE)));
#ifdef TARGET_ARM
if (!isPassedAsPrimitiveType)
{
if (node->TypeGet() == TYP_DOUBLE && numRegs == 0 && (numSlots == 2))
{
isPassedAsPrimitiveType = true;
}
}
#endif // TARGET_ARM
assert(isPassedAsPrimitiveType);
}
}
else
{
assert(!varTypeIsStruct(node));
}
}
void Dump() const;
#endif
};
//-------------------------------------------------------------------------
//
// The class fgArgInfo is used to handle the arguments
// when morphing a GT_CALL node.
//
class fgArgInfo
{
Compiler* compiler; // Back pointer to the compiler instance so that we can allocate memory
GenTreeCall* callTree; // Back pointer to the GT_CALL node for this fgArgInfo
unsigned argCount; // Updatable arg count value
#if defined(DEBUG_ARG_SLOTS)
unsigned nextSlotNum; // Updatable slot count value
#endif
unsigned nextStackByteOffset;
unsigned stkLevel; // Stack depth when we make this call (for x86)
#if defined(UNIX_X86_ABI)
bool alignmentDone; // Updateable flag, set to 'true' after we've done any required alignment.
unsigned stkSizeBytes; // Size of stack used by this call, in bytes. Calculated during fgMorphArgs().
unsigned padStkAlign; // Stack alignment in bytes required before arguments are pushed for this call.
// Computed dynamically during codegen, based on stkSizeBytes and the current
// stack level (genStackLevel) when the first stack adjustment is made for
// this call.
#endif
#if FEATURE_FIXED_OUT_ARGS
unsigned outArgSize; // Size of the out arg area for the call, will be at least MIN_ARG_AREA_FOR_CALL
#endif
unsigned argTableSize; // size of argTable array (equal to the argCount when done with fgMorphArgs)
bool hasRegArgs; // true if we have one or more register arguments
bool hasStackArgs; // true if we have one or more stack arguments
bool argsComplete; // marker for state
bool argsSorted; // marker for state
bool needsTemps; // one or more arguments must be copied to a temp by EvalArgsToTemps
fgArgTabEntry** argTable; // variable sized array of per argument descrption: (i.e. argTable[argTableSize])
private:
void AddArg(fgArgTabEntry* curArgTabEntry);
public:
fgArgInfo(Compiler* comp, GenTreeCall* call, unsigned argCount);
fgArgInfo(GenTreeCall* newCall, GenTreeCall* oldCall);
fgArgTabEntry* AddRegArg(unsigned argNum,
GenTree* node,
GenTreeCall::Use* use,
regNumber regNum,
unsigned numRegs,
unsigned byteSize,
unsigned byteAlignment,
bool isStruct,
bool isFloatHfa,
bool isVararg = false);
#ifdef UNIX_AMD64_ABI
fgArgTabEntry* AddRegArg(unsigned argNum,
GenTree* node,
GenTreeCall::Use* use,
regNumber regNum,
unsigned numRegs,
unsigned byteSize,
unsigned byteAlignment,
const bool isStruct,
const bool isFloatHfa,
const bool isVararg,
const regNumber otherRegNum,
const unsigned structIntRegs,
const unsigned structFloatRegs,
const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* const structDescPtr = nullptr);
#endif // UNIX_AMD64_ABI
fgArgTabEntry* AddStkArg(unsigned argNum,
GenTree* node,
GenTreeCall::Use* use,
unsigned numSlots,
unsigned byteSize,
unsigned byteAlignment,
bool isStruct,
bool isFloatHfa,
bool isVararg = false);
void RemorphReset();
void UpdateRegArg(fgArgTabEntry* argEntry, GenTree* node, bool reMorphing);
void UpdateStkArg(fgArgTabEntry* argEntry, GenTree* node, bool reMorphing);
void SplitArg(unsigned argNum, unsigned numRegs, unsigned numSlots);
void EvalToTmp(fgArgTabEntry* curArgTabEntry, unsigned tmpNum, GenTree* newNode);
void ArgsComplete();
void SortArgs();
void EvalArgsToTemps();
unsigned ArgCount() const
{
return argCount;
}
fgArgTabEntry** ArgTable() const
{
return argTable;
}
#if defined(DEBUG_ARG_SLOTS)
unsigned GetNextSlotNum() const
{
return nextSlotNum;
}
#endif
unsigned GetNextSlotByteOffset() const
{
return nextStackByteOffset;
}
bool HasRegArgs() const
{
return hasRegArgs;
}
bool NeedsTemps() const
{
return needsTemps;
}
bool HasStackArgs() const
{
return hasStackArgs;
}
bool AreArgsComplete() const
{
return argsComplete;
}
#if FEATURE_FIXED_OUT_ARGS
unsigned GetOutArgSize() const
{
return outArgSize;
}
void SetOutArgSize(unsigned newVal)
{
outArgSize = newVal;
}
#endif // FEATURE_FIXED_OUT_ARGS
#if defined(UNIX_X86_ABI)
void ComputeStackAlignment(unsigned curStackLevelInBytes)
{
padStkAlign = AlignmentPad(curStackLevelInBytes, STACK_ALIGN);
}
unsigned GetStkAlign() const
{
return padStkAlign;
}
void SetStkSizeBytes(unsigned newStkSizeBytes)
{
stkSizeBytes = newStkSizeBytes;
}
unsigned GetStkSizeBytes() const
{
return stkSizeBytes;
}
bool IsStkAlignmentDone() const
{
return alignmentDone;
}
void SetStkAlignmentDone()
{
alignmentDone = true;
}
#endif // defined(UNIX_X86_ABI)
// Get the fgArgTabEntry for the arg at position argNum.
fgArgTabEntry* GetArgEntry(unsigned argNum, bool reMorphing = true) const
{
fgArgTabEntry* curArgTabEntry = nullptr;
if (!reMorphing)
{
// The arg table has not yet been sorted.
curArgTabEntry = argTable[argNum];
assert(curArgTabEntry->argNum == argNum);
return curArgTabEntry;
}
for (unsigned i = 0; i < argCount; i++)
{
curArgTabEntry = argTable[i];
if (curArgTabEntry->argNum == argNum)
{
return curArgTabEntry;
}
}
noway_assert(!"GetArgEntry: argNum not found");
return nullptr;
}
void SetNeedsTemps()
{
needsTemps = true;
}
// Get the node for the arg at position argIndex.
// Caller must ensure that this index is a valid arg index.
GenTree* GetArgNode(unsigned argIndex) const
{
return GetArgEntry(argIndex)->GetNode();
}
void Dump(Compiler* compiler) const;
};
#ifdef DEBUG
// XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
// We have the ability to mark source expressions with "Test Labels."
// These drive assertions within the JIT, or internal JIT testing. For example, we could label expressions
// that should be CSE defs, and other expressions that should uses of those defs, with a shared label.
enum TestLabel // This must be kept identical to System.Runtime.CompilerServices.JitTestLabel.TestLabel.
{
TL_SsaName,
TL_VN, // Defines a "VN equivalence class". (For full VN, including exceptions thrown).
TL_VNNorm, // Like above, but uses the non-exceptional value of the expression.
TL_CSE_Def, // This must be identified in the JIT as a CSE def
TL_CSE_Use, // This must be identified in the JIT as a CSE use
TL_LoopHoist, // Expression must (or must not) be hoisted out of the loop.
};
struct TestLabelAndNum
{
TestLabel m_tl;
ssize_t m_num;
TestLabelAndNum() : m_tl(TestLabel(0)), m_num(0)
{
}
};
typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, TestLabelAndNum> NodeToTestDataMap;
// XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#endif // DEBUG
//-------------------------------------------------------------------------
// LoopFlags: flags for the loop table.
//
enum LoopFlags : unsigned short
{
LPFLG_EMPTY = 0,
// LPFLG_UNUSED = 0x0001,
// LPFLG_UNUSED = 0x0002,
LPFLG_ITER = 0x0004, // loop of form: for (i = icon or expression; test_condition(); i++)
// LPFLG_UNUSED = 0x0008,
LPFLG_CONTAINS_CALL = 0x0010, // If executing the loop body *may* execute a call
// LPFLG_UNUSED = 0x0020,
LPFLG_CONST_INIT = 0x0040, // iterator is initialized with a constant (found in lpConstInit)
LPFLG_SIMD_LIMIT = 0x0080, // iterator is compared with vector element count (found in lpConstLimit)
LPFLG_VAR_LIMIT = 0x0100, // iterator is compared with a local var (var # found in lpVarLimit)
LPFLG_CONST_LIMIT = 0x0200, // iterator is compared with a constant (found in lpConstLimit)
LPFLG_ARRLEN_LIMIT = 0x0400, // iterator is compared with a.len or a[i].len (found in lpArrLenLimit)
LPFLG_HAS_PREHEAD = 0x0800, // lpHead is known to be a preHead for this loop
LPFLG_REMOVED = 0x1000, // has been removed from the loop table (unrolled or optimized away)
LPFLG_DONT_UNROLL = 0x2000, // do not unroll this loop
LPFLG_ASGVARS_YES = 0x4000, // "lpAsgVars" has been computed
LPFLG_ASGVARS_INC = 0x8000, // "lpAsgVars" is incomplete -- vars beyond those representable in an AllVarSet
// type are assigned to.
};
inline constexpr LoopFlags operator~(LoopFlags a)
{
return (LoopFlags)(~(unsigned short)a);
}
inline constexpr LoopFlags operator|(LoopFlags a, LoopFlags b)
{
return (LoopFlags)((unsigned short)a | (unsigned short)b);
}
inline constexpr LoopFlags operator&(LoopFlags a, LoopFlags b)
{
return (LoopFlags)((unsigned short)a & (unsigned short)b);
}
inline LoopFlags& operator|=(LoopFlags& a, LoopFlags b)
{
return a = (LoopFlags)((unsigned short)a | (unsigned short)b);
}
inline LoopFlags& operator&=(LoopFlags& a, LoopFlags b)
{
return a = (LoopFlags)((unsigned short)a & (unsigned short)b);
}
// The following holds information about instr offsets in terms of generated code.
enum class IPmappingDscKind
{
Prolog, // The mapping represents the start of a prolog.
Epilog, // The mapping represents the start of an epilog.
NoMapping, // This does not map to any IL offset.
Normal, // The mapping maps to an IL offset.
};
struct IPmappingDsc
{
emitLocation ipmdNativeLoc; // the emitter location of the native code corresponding to the IL offset
IPmappingDscKind ipmdKind; // The kind of mapping
ILLocation ipmdLoc; // The location for normal mappings
bool ipmdIsLabel; // Can this code be a branch label?
};
struct PreciseIPMapping
{
emitLocation nativeLoc;
DebugInfo debugInfo;
};
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX The big guy. The sections are currently organized as : XX
XX XX
XX o GenTree and BasicBlock XX
XX o LclVarsInfo XX
XX o Importer XX
XX o FlowGraph XX
XX o Optimizer XX
XX o RegAlloc XX
XX o EEInterface XX
XX o TempsInfo XX
XX o RegSet XX
XX o GCInfo XX
XX o Instruction XX
XX o ScopeInfo XX
XX o PrologScopeInfo XX
XX o CodeGenerator XX
XX o UnwindInfo XX
XX o Compiler XX
XX o typeInfo XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
struct HWIntrinsicInfo;
class Compiler
{
friend class emitter;
friend class UnwindInfo;
friend class UnwindFragmentInfo;
friend class UnwindEpilogInfo;
friend class JitTimer;
friend class LinearScan;
friend class fgArgInfo;
friend class Rationalizer;
friend class Phase;
friend class Lowering;
friend class CSE_DataFlow;
friend class CSE_Heuristic;
friend class CodeGenInterface;
friend class CodeGen;
friend class LclVarDsc;
friend class TempDsc;
friend class LIR;
friend class ObjectAllocator;
friend class LocalAddressVisitor;
friend struct GenTree;
friend class MorphInitBlockHelper;
friend class MorphCopyBlockHelper;
#ifdef FEATURE_HW_INTRINSICS
friend struct HWIntrinsicInfo;
#endif // FEATURE_HW_INTRINSICS
#ifndef TARGET_64BIT
friend class DecomposeLongs;
#endif // !TARGET_64BIT
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Misc structs definitions XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
public:
hashBvGlobalData hbvGlobalData; // Used by the hashBv bitvector package.
#ifdef DEBUG
bool verbose;
bool verboseTrees;
bool shouldUseVerboseTrees();
bool asciiTrees; // If true, dump trees using only ASCII characters
bool shouldDumpASCIITrees();
bool verboseSsa; // If true, produce especially verbose dump output in SSA construction.
bool shouldUseVerboseSsa();
bool treesBeforeAfterMorph; // If true, print trees before/after morphing (paired by an intra-compilation id:
int morphNum; // This counts the the trees that have been morphed, allowing us to label each uniquely.
bool doExtraSuperPmiQueries;
void makeExtraStructQueries(CORINFO_CLASS_HANDLE structHandle, int level); // Make queries recursively 'level' deep.
const char* VarNameToStr(VarName name)
{
return name;
}
DWORD expensiveDebugCheckLevel;
#endif
#if FEATURE_MULTIREG_RET
GenTree* impAssignMultiRegTypeToVar(GenTree* op,
CORINFO_CLASS_HANDLE hClass DEBUGARG(CorInfoCallConvExtension callConv));
#endif // FEATURE_MULTIREG_RET
#ifdef TARGET_X86
bool isTrivialPointerSizedStruct(CORINFO_CLASS_HANDLE clsHnd) const;
#endif // TARGET_X86
//-------------------------------------------------------------------------
// Functions to handle homogeneous floating-point aggregates (HFAs) in ARM/ARM64.
// HFAs are one to four element structs where each element is the same
// type, either all float or all double. We handle HVAs (one to four elements of
// vector types) uniformly with HFAs. HFAs are treated specially
// in the ARM/ARM64 Procedure Call Standards, specifically, they are passed in
// floating-point registers instead of the general purpose registers.
//
bool IsHfa(CORINFO_CLASS_HANDLE hClass);
bool IsHfa(GenTree* tree);
var_types GetHfaType(GenTree* tree);
unsigned GetHfaCount(GenTree* tree);
var_types GetHfaType(CORINFO_CLASS_HANDLE hClass);
unsigned GetHfaCount(CORINFO_CLASS_HANDLE hClass);
bool IsMultiRegReturnedType(CORINFO_CLASS_HANDLE hClass, CorInfoCallConvExtension callConv);
//-------------------------------------------------------------------------
// The following is used for validating format of EH table
//
struct EHNodeDsc;
typedef struct EHNodeDsc* pEHNodeDsc;
EHNodeDsc* ehnTree; // root of the tree comprising the EHnodes.
EHNodeDsc* ehnNext; // root of the tree comprising the EHnodes.
struct EHNodeDsc
{
enum EHBlockType
{
TryNode,
FilterNode,
HandlerNode,
FinallyNode,
FaultNode
};
EHBlockType ehnBlockType; // kind of EH block
IL_OFFSET ehnStartOffset; // IL offset of start of the EH block
IL_OFFSET ehnEndOffset; // IL offset past end of the EH block. (TODO: looks like verInsertEhNode() sets this to
// the last IL offset, not "one past the last one", i.e., the range Start to End is
// inclusive).
pEHNodeDsc ehnNext; // next (non-nested) block in sequential order
pEHNodeDsc ehnChild; // leftmost nested block
union {
pEHNodeDsc ehnTryNode; // for filters and handlers, the corresponding try node
pEHNodeDsc ehnHandlerNode; // for a try node, the corresponding handler node
};
pEHNodeDsc ehnFilterNode; // if this is a try node and has a filter, otherwise 0
pEHNodeDsc ehnEquivalent; // if blockType=tryNode, start offset and end offset is same,
void ehnSetTryNodeType()
{
ehnBlockType = TryNode;
}
void ehnSetFilterNodeType()
{
ehnBlockType = FilterNode;
}
void ehnSetHandlerNodeType()
{
ehnBlockType = HandlerNode;
}
void ehnSetFinallyNodeType()
{
ehnBlockType = FinallyNode;
}
void ehnSetFaultNodeType()
{
ehnBlockType = FaultNode;
}
bool ehnIsTryBlock()
{
return ehnBlockType == TryNode;
}
bool ehnIsFilterBlock()
{
return ehnBlockType == FilterNode;
}
bool ehnIsHandlerBlock()
{
return ehnBlockType == HandlerNode;
}
bool ehnIsFinallyBlock()
{
return ehnBlockType == FinallyNode;
}
bool ehnIsFaultBlock()
{
return ehnBlockType == FaultNode;
}
// returns true if there is any overlap between the two nodes
static bool ehnIsOverlap(pEHNodeDsc node1, pEHNodeDsc node2)
{
if (node1->ehnStartOffset < node2->ehnStartOffset)
{
return (node1->ehnEndOffset >= node2->ehnStartOffset);
}
else
{
return (node1->ehnStartOffset <= node2->ehnEndOffset);
}
}
// fails with BADCODE if inner is not completely nested inside outer
static bool ehnIsNested(pEHNodeDsc inner, pEHNodeDsc outer)
{
return ((inner->ehnStartOffset >= outer->ehnStartOffset) && (inner->ehnEndOffset <= outer->ehnEndOffset));
}
};
//-------------------------------------------------------------------------
// Exception handling functions
//
#if !defined(FEATURE_EH_FUNCLETS)
bool ehNeedsShadowSPslots()
{
return (info.compXcptnsCount || opts.compDbgEnC);
}
// 0 for methods with no EH
// 1 for methods with non-nested EH, or where only the try blocks are nested
// 2 for a method with a catch within a catch
// etc.
unsigned ehMaxHndNestingCount;
#endif // !FEATURE_EH_FUNCLETS
static bool jitIsBetween(unsigned value, unsigned start, unsigned end);
static bool jitIsBetweenInclusive(unsigned value, unsigned start, unsigned end);
bool bbInCatchHandlerILRange(BasicBlock* blk);
bool bbInFilterILRange(BasicBlock* blk);
bool bbInTryRegions(unsigned regionIndex, BasicBlock* blk);
bool bbInExnFlowRegions(unsigned regionIndex, BasicBlock* blk);
bool bbInHandlerRegions(unsigned regionIndex, BasicBlock* blk);
bool bbInCatchHandlerRegions(BasicBlock* tryBlk, BasicBlock* hndBlk);
unsigned short bbFindInnermostCommonTryRegion(BasicBlock* bbOne, BasicBlock* bbTwo);
unsigned short bbFindInnermostTryRegionContainingHandlerRegion(unsigned handlerIndex);
unsigned short bbFindInnermostHandlerRegionContainingTryRegion(unsigned tryIndex);
// Returns true if "block" is the start of a try region.
bool bbIsTryBeg(BasicBlock* block);
// Returns true if "block" is the start of a handler or filter region.
bool bbIsHandlerBeg(BasicBlock* block);
// Returns true iff "block" is where control flows if an exception is raised in the
// try region, and sets "*regionIndex" to the index of the try for the handler.
// Differs from "IsHandlerBeg" in the case of filters, where this is true for the first
// block of the filter, but not for the filter's handler.
bool bbIsExFlowBlock(BasicBlock* block, unsigned* regionIndex);
bool ehHasCallableHandlers();
// Return the EH descriptor for the given region index.
EHblkDsc* ehGetDsc(unsigned regionIndex);
// Return the EH index given a region descriptor.
unsigned ehGetIndex(EHblkDsc* ehDsc);
// Return the EH descriptor index of the enclosing try, for the given region index.
unsigned ehGetEnclosingTryIndex(unsigned regionIndex);
// Return the EH descriptor index of the enclosing handler, for the given region index.
unsigned ehGetEnclosingHndIndex(unsigned regionIndex);
// Return the EH descriptor for the most nested 'try' region this BasicBlock is a member of (or nullptr if this
// block is not in a 'try' region).
EHblkDsc* ehGetBlockTryDsc(BasicBlock* block);
// Return the EH descriptor for the most nested filter or handler region this BasicBlock is a member of (or nullptr
// if this block is not in a filter or handler region).
EHblkDsc* ehGetBlockHndDsc(BasicBlock* block);
// Return the EH descriptor for the most nested region that may handle exceptions raised in this BasicBlock (or
// nullptr if this block's exceptions propagate to caller).
EHblkDsc* ehGetBlockExnFlowDsc(BasicBlock* block);
EHblkDsc* ehIsBlockTryLast(BasicBlock* block);
EHblkDsc* ehIsBlockHndLast(BasicBlock* block);
bool ehIsBlockEHLast(BasicBlock* block);
bool ehBlockHasExnFlowDsc(BasicBlock* block);
// Return the region index of the most nested EH region this block is in.
unsigned ehGetMostNestedRegionIndex(BasicBlock* block, bool* inTryRegion);
// Find the true enclosing try index, ignoring 'mutual protect' try. Uses IL ranges to check.
unsigned ehTrueEnclosingTryIndexIL(unsigned regionIndex);
// Return the index of the most nested enclosing region for a particular EH region. Returns NO_ENCLOSING_INDEX
// if there is no enclosing region. If the returned index is not NO_ENCLOSING_INDEX, then '*inTryRegion'
// is set to 'true' if the enclosing region is a 'try', or 'false' if the enclosing region is a handler.
// (It can never be a filter.)
unsigned ehGetEnclosingRegionIndex(unsigned regionIndex, bool* inTryRegion);
// A block has been deleted. Update the EH table appropriately.
void ehUpdateForDeletedBlock(BasicBlock* block);
// Determine whether a block can be deleted while preserving the EH normalization rules.
bool ehCanDeleteEmptyBlock(BasicBlock* block);
// Update the 'last' pointers in the EH table to reflect new or deleted blocks in an EH region.
void ehUpdateLastBlocks(BasicBlock* oldLast, BasicBlock* newLast);
// For a finally handler, find the region index that the BBJ_CALLFINALLY lives in that calls the handler,
// or NO_ENCLOSING_INDEX if the BBJ_CALLFINALLY lives in the main function body. Normally, the index
// is the same index as the handler (and the BBJ_CALLFINALLY lives in the 'try' region), but for AMD64 the
// BBJ_CALLFINALLY lives in the enclosing try or handler region, whichever is more nested, or the main function
// body. If the returned index is not NO_ENCLOSING_INDEX, then '*inTryRegion' is set to 'true' if the
// BBJ_CALLFINALLY lives in the returned index's 'try' region, or 'false' if lives in the handler region. (It never
// lives in a filter.)
unsigned ehGetCallFinallyRegionIndex(unsigned finallyIndex, bool* inTryRegion);
// Find the range of basic blocks in which all BBJ_CALLFINALLY will be found that target the 'finallyIndex' region's
// handler. Set begBlk to the first block, and endBlk to the block after the last block of the range
// (nullptr if the last block is the last block in the program).
// Precondition: 'finallyIndex' is the EH region of a try/finally clause.
void ehGetCallFinallyBlockRange(unsigned finallyIndex, BasicBlock** begBlk, BasicBlock** endBlk);
#ifdef DEBUG
// Given a BBJ_CALLFINALLY block and the EH region index of the finally it is calling, return
// 'true' if the BBJ_CALLFINALLY is in the correct EH region.
bool ehCallFinallyInCorrectRegion(BasicBlock* blockCallFinally, unsigned finallyIndex);
#endif // DEBUG
#if defined(FEATURE_EH_FUNCLETS)
// Do we need a PSPSym in the main function? For codegen purposes, we only need one
// if there is a filter that protects a region with a nested EH clause (such as a
// try/catch nested in the 'try' body of a try/filter/filter-handler). See
// genFuncletProlog() for more details. However, the VM seems to use it for more
// purposes, maybe including debugging. Until we are sure otherwise, always create
// a PSPSym for functions with any EH.
bool ehNeedsPSPSym() const
{
#ifdef TARGET_X86
return false;
#else // TARGET_X86
return compHndBBtabCount > 0;
#endif // TARGET_X86
}
bool ehAnyFunclets(); // Are there any funclets in this function?
unsigned ehFuncletCount(); // Return the count of funclets in the function
unsigned bbThrowIndex(BasicBlock* blk); // Get the index to use as the cache key for sharing throw blocks
#else // !FEATURE_EH_FUNCLETS
bool ehAnyFunclets()
{
return false;
}
unsigned ehFuncletCount()
{
return 0;
}
unsigned bbThrowIndex(BasicBlock* blk)
{
return blk->bbTryIndex;
} // Get the index to use as the cache key for sharing throw blocks
#endif // !FEATURE_EH_FUNCLETS
// Returns a flowList representing the "EH predecessors" of "blk". These are the normal predecessors of
// "blk", plus one special case: if "blk" is the first block of a handler, considers the predecessor(s) of the first
// first block of the corresponding try region to be "EH predecessors". (If there is a single such predecessor,
// for example, we want to consider that the immediate dominator of the catch clause start block, so it's
// convenient to also consider it a predecessor.)
flowList* BlockPredsWithEH(BasicBlock* blk);
// This table is useful for memoization of the method above.
typedef JitHashTable<BasicBlock*, JitPtrKeyFuncs<BasicBlock>, flowList*> BlockToFlowListMap;
BlockToFlowListMap* m_blockToEHPreds;
BlockToFlowListMap* GetBlockToEHPreds()
{
if (m_blockToEHPreds == nullptr)
{
m_blockToEHPreds = new (getAllocator()) BlockToFlowListMap(getAllocator());
}
return m_blockToEHPreds;
}
void* ehEmitCookie(BasicBlock* block);
UNATIVE_OFFSET ehCodeOffset(BasicBlock* block);
EHblkDsc* ehInitHndRange(BasicBlock* src, IL_OFFSET* hndBeg, IL_OFFSET* hndEnd, bool* inFilter);
EHblkDsc* ehInitTryRange(BasicBlock* src, IL_OFFSET* tryBeg, IL_OFFSET* tryEnd);
EHblkDsc* ehInitHndBlockRange(BasicBlock* blk, BasicBlock** hndBeg, BasicBlock** hndLast, bool* inFilter);
EHblkDsc* ehInitTryBlockRange(BasicBlock* blk, BasicBlock** tryBeg, BasicBlock** tryLast);
void fgSetTryBeg(EHblkDsc* handlerTab, BasicBlock* newTryBeg);
void fgSetTryEnd(EHblkDsc* handlerTab, BasicBlock* newTryLast);
void fgSetHndEnd(EHblkDsc* handlerTab, BasicBlock* newHndLast);
void fgSkipRmvdBlocks(EHblkDsc* handlerTab);
void fgAllocEHTable();
void fgRemoveEHTableEntry(unsigned XTnum);
#if defined(FEATURE_EH_FUNCLETS)
EHblkDsc* fgAddEHTableEntry(unsigned XTnum);
#endif // FEATURE_EH_FUNCLETS
#if !FEATURE_EH
void fgRemoveEH();
#endif // !FEATURE_EH
void fgSortEHTable();
// Causes the EH table to obey some well-formedness conditions, by inserting
// empty BB's when necessary:
// * No block is both the first block of a handler and the first block of a try.
// * No block is the first block of multiple 'try' regions.
// * No block is the last block of multiple EH regions.
void fgNormalizeEH();
bool fgNormalizeEHCase1();
bool fgNormalizeEHCase2();
bool fgNormalizeEHCase3();
void fgCheckForLoopsInHandlers();
#ifdef DEBUG
void dispIncomingEHClause(unsigned num, const CORINFO_EH_CLAUSE& clause);
void dispOutgoingEHClause(unsigned num, const CORINFO_EH_CLAUSE& clause);
void fgVerifyHandlerTab();
void fgDispHandlerTab();
#endif // DEBUG
bool fgNeedToSortEHTable;
void verInitEHTree(unsigned numEHClauses);
void verInsertEhNode(CORINFO_EH_CLAUSE* clause, EHblkDsc* handlerTab);
void verInsertEhNodeInTree(EHNodeDsc** ppRoot, EHNodeDsc* node);
void verInsertEhNodeParent(EHNodeDsc** ppRoot, EHNodeDsc* node);
void verCheckNestingLevel(EHNodeDsc* initRoot);
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX GenTree and BasicBlock XX
XX XX
XX Functions to allocate and display the GenTrees and BasicBlocks XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
// Functions to create nodes
Statement* gtNewStmt(GenTree* expr = nullptr);
Statement* gtNewStmt(GenTree* expr, const DebugInfo& di);
// For unary opers.
GenTree* gtNewOperNode(genTreeOps oper, var_types type, GenTree* op1, bool doSimplifications = TRUE);
// For binary opers.
GenTree* gtNewOperNode(genTreeOps oper, var_types type, GenTree* op1, GenTree* op2);
GenTreeColon* gtNewColonNode(var_types type, GenTree* elseNode, GenTree* thenNode);
GenTreeQmark* gtNewQmarkNode(var_types type, GenTree* cond, GenTreeColon* colon);
GenTree* gtNewLargeOperNode(genTreeOps oper,
var_types type = TYP_I_IMPL,
GenTree* op1 = nullptr,
GenTree* op2 = nullptr);
GenTreeIntCon* gtNewIconNode(ssize_t value, var_types type = TYP_INT);
GenTreeIntCon* gtNewIconNode(unsigned fieldOffset, FieldSeqNode* fieldSeq);
GenTreeIntCon* gtNewNull();
GenTreeIntCon* gtNewTrue();
GenTreeIntCon* gtNewFalse();
GenTree* gtNewPhysRegNode(regNumber reg, var_types type);
GenTree* gtNewJmpTableNode();
GenTree* gtNewIndOfIconHandleNode(var_types indType, size_t value, GenTreeFlags iconFlags, bool isInvariant);
GenTree* gtNewIconHandleNode(size_t value, GenTreeFlags flags, FieldSeqNode* fields = nullptr);
GenTreeFlags gtTokenToIconFlags(unsigned token);
GenTree* gtNewIconEmbHndNode(void* value, void* pValue, GenTreeFlags flags, void* compileTimeHandle);
GenTree* gtNewIconEmbScpHndNode(CORINFO_MODULE_HANDLE scpHnd);
GenTree* gtNewIconEmbClsHndNode(CORINFO_CLASS_HANDLE clsHnd);
GenTree* gtNewIconEmbMethHndNode(CORINFO_METHOD_HANDLE methHnd);
GenTree* gtNewIconEmbFldHndNode(CORINFO_FIELD_HANDLE fldHnd);
GenTree* gtNewStringLiteralNode(InfoAccessType iat, void* pValue);
GenTreeIntCon* gtNewStringLiteralLength(GenTreeStrCon* node);
GenTree* gtNewLconNode(__int64 value);
GenTree* gtNewDconNode(double value, var_types type = TYP_DOUBLE);
GenTree* gtNewSconNode(int CPX, CORINFO_MODULE_HANDLE scpHandle);
GenTree* gtNewZeroConNode(var_types type);
GenTree* gtNewOneConNode(var_types type);
GenTreeLclVar* gtNewStoreLclVar(unsigned dstLclNum, GenTree* src);
#ifdef FEATURE_SIMD
GenTree* gtNewSIMDVectorZero(var_types simdType, CorInfoType simdBaseJitType, unsigned simdSize);
#endif
GenTree* gtNewBlkOpNode(GenTree* dst, GenTree* srcOrFillVal, bool isVolatile, bool isCopyBlock);
GenTree* gtNewPutArgReg(var_types type, GenTree* arg, regNumber argReg);
GenTree* gtNewBitCastNode(var_types type, GenTree* arg);
protected:
void gtBlockOpInit(GenTree* result, GenTree* dst, GenTree* srcOrFillVal, bool isVolatile);
public:
GenTreeObj* gtNewObjNode(CORINFO_CLASS_HANDLE structHnd, GenTree* addr);
void gtSetObjGcInfo(GenTreeObj* objNode);
GenTree* gtNewStructVal(CORINFO_CLASS_HANDLE structHnd, GenTree* addr);
GenTree* gtNewBlockVal(GenTree* addr, unsigned size);
GenTree* gtNewCpObjNode(GenTree* dst, GenTree* src, CORINFO_CLASS_HANDLE structHnd, bool isVolatile);
GenTreeCall::Use* gtNewCallArgs(GenTree* node);
GenTreeCall::Use* gtNewCallArgs(GenTree* node1, GenTree* node2);
GenTreeCall::Use* gtNewCallArgs(GenTree* node1, GenTree* node2, GenTree* node3);
GenTreeCall::Use* gtNewCallArgs(GenTree* node1, GenTree* node2, GenTree* node3, GenTree* node4);
GenTreeCall::Use* gtPrependNewCallArg(GenTree* node, GenTreeCall::Use* args);
GenTreeCall::Use* gtInsertNewCallArgAfter(GenTree* node, GenTreeCall::Use* after);
GenTreeCall* gtNewCallNode(gtCallTypes callType,
CORINFO_METHOD_HANDLE handle,
var_types type,
GenTreeCall::Use* args,
const DebugInfo& di = DebugInfo());
GenTreeCall* gtNewIndCallNode(GenTree* addr,
var_types type,
GenTreeCall::Use* args,
const DebugInfo& di = DebugInfo());
GenTreeCall* gtNewHelperCallNode(unsigned helper, var_types type, GenTreeCall::Use* args = nullptr);
GenTreeCall* gtNewRuntimeLookupHelperCallNode(CORINFO_RUNTIME_LOOKUP* pRuntimeLookup,
GenTree* ctxTree,
void* compileTimeHandle);
GenTreeLclVar* gtNewLclvNode(unsigned lnum, var_types type DEBUGARG(IL_OFFSET offs = BAD_IL_OFFSET));
GenTreeLclVar* gtNewLclLNode(unsigned lnum, var_types type DEBUGARG(IL_OFFSET offs = BAD_IL_OFFSET));
GenTreeLclVar* gtNewLclVarAddrNode(unsigned lclNum, var_types type = TYP_I_IMPL);
GenTreeLclFld* gtNewLclFldAddrNode(unsigned lclNum,
unsigned lclOffs,
FieldSeqNode* fieldSeq,
var_types type = TYP_I_IMPL);
#ifdef FEATURE_SIMD
GenTreeSIMD* gtNewSIMDNode(
var_types type, GenTree* op1, SIMDIntrinsicID simdIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize);
GenTreeSIMD* gtNewSIMDNode(var_types type,
GenTree* op1,
GenTree* op2,
SIMDIntrinsicID simdIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize);
void SetOpLclRelatedToSIMDIntrinsic(GenTree* op);
#endif
#ifdef FEATURE_HW_INTRINSICS
GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic = false);
GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type,
GenTree* op1,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic = false);
GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type,
GenTree* op1,
GenTree* op2,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic = false);
GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type,
GenTree* op1,
GenTree* op2,
GenTree* op3,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic = false);
GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type,
GenTree* op1,
GenTree* op2,
GenTree* op3,
GenTree* op4,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic = false);
GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type,
GenTree** operands,
size_t operandCount,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic = false);
GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type,
IntrinsicNodeBuilder&& nodeBuilder,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic = false);
GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(var_types type,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize)
{
bool isSimdAsHWIntrinsic = true;
return gtNewSimdHWIntrinsicNode(type, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(
var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize)
{
bool isSimdAsHWIntrinsic = true;
return gtNewSimdHWIntrinsicNode(type, op1, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(var_types type,
GenTree* op1,
GenTree* op2,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize)
{
bool isSimdAsHWIntrinsic = true;
return gtNewSimdHWIntrinsicNode(type, op1, op2, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(var_types type,
GenTree* op1,
GenTree* op2,
GenTree* op3,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize)
{
bool isSimdAsHWIntrinsic = true;
return gtNewSimdHWIntrinsicNode(type, op1, op2, op3, hwIntrinsicID, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
GenTree* gtNewSimdAbsNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdBinOpNode(genTreeOps op,
var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdCeilNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdCmpOpNode(genTreeOps op,
var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdCmpOpAllNode(genTreeOps op,
var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdCmpOpAnyNode(genTreeOps op,
var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdCndSelNode(var_types type,
GenTree* op1,
GenTree* op2,
GenTree* op3,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdCreateBroadcastNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdDotProdNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdFloorNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdGetElementNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdMaxNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdMinNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdNarrowNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdSqrtNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdSumNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdUnOpNode(genTreeOps op,
var_types type,
GenTree* op1,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdWidenLowerNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdWidenUpperNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdWithElementNode(var_types type,
GenTree* op1,
GenTree* op2,
GenTree* op3,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic);
GenTree* gtNewSimdZeroNode(var_types type,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic);
GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode(var_types type, NamedIntrinsic hwIntrinsicID);
GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode(var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID);
GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode(var_types type,
GenTree* op1,
GenTree* op2,
NamedIntrinsic hwIntrinsicID);
GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode(
var_types type, GenTree* op1, GenTree* op2, GenTree* op3, NamedIntrinsic hwIntrinsicID);
CORINFO_CLASS_HANDLE gtGetStructHandleForHWSIMD(var_types simdType, CorInfoType simdBaseJitType);
CorInfoType getBaseJitTypeFromArgIfNeeded(NamedIntrinsic intrinsic,
CORINFO_CLASS_HANDLE clsHnd,
CORINFO_SIG_INFO* sig,
CorInfoType simdBaseJitType);
#endif // FEATURE_HW_INTRINSICS
GenTree* gtNewMustThrowException(unsigned helper, var_types type, CORINFO_CLASS_HANDLE clsHnd);
GenTreeLclFld* gtNewLclFldNode(unsigned lnum, var_types type, unsigned offset);
GenTree* gtNewInlineCandidateReturnExpr(GenTree* inlineCandidate, var_types type, BasicBlockFlags bbFlags);
GenTreeField* gtNewFieldRef(var_types type, CORINFO_FIELD_HANDLE fldHnd, GenTree* obj = nullptr, DWORD offset = 0);
GenTree* gtNewIndexRef(var_types typ, GenTree* arrayOp, GenTree* indexOp);
GenTreeArrLen* gtNewArrLen(var_types typ, GenTree* arrayOp, int lenOffset, BasicBlock* block);
GenTreeIndir* gtNewIndir(var_types typ, GenTree* addr);
GenTree* gtNewNullCheck(GenTree* addr, BasicBlock* basicBlock);
var_types gtTypeForNullCheck(GenTree* tree);
void gtChangeOperToNullCheck(GenTree* tree, BasicBlock* block);
static fgArgTabEntry* gtArgEntryByArgNum(GenTreeCall* call, unsigned argNum);
static fgArgTabEntry* gtArgEntryByNode(GenTreeCall* call, GenTree* node);
fgArgTabEntry* gtArgEntryByLateArgIndex(GenTreeCall* call, unsigned lateArgInx);
static GenTree* gtArgNodeByLateArgInx(GenTreeCall* call, unsigned lateArgInx);
GenTreeOp* gtNewAssignNode(GenTree* dst, GenTree* src);
GenTree* gtNewTempAssign(unsigned tmp,
GenTree* val,
Statement** pAfterStmt = nullptr,
const DebugInfo& di = DebugInfo(),
BasicBlock* block = nullptr);
GenTree* gtNewRefCOMfield(GenTree* objPtr,
CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_ACCESS_FLAGS access,
CORINFO_FIELD_INFO* pFieldInfo,
var_types lclTyp,
CORINFO_CLASS_HANDLE structType,
GenTree* assg);
GenTree* gtNewNothingNode();
GenTree* gtNewArgPlaceHolderNode(var_types type, CORINFO_CLASS_HANDLE clsHnd);
GenTree* gtUnusedValNode(GenTree* expr);
GenTree* gtNewKeepAliveNode(GenTree* op);
GenTreeCast* gtNewCastNode(var_types typ, GenTree* op1, bool fromUnsigned, var_types castType);
GenTreeCast* gtNewCastNodeL(var_types typ, GenTree* op1, bool fromUnsigned, var_types castType);
GenTreeAllocObj* gtNewAllocObjNode(
unsigned int helper, bool helperHasSideEffects, CORINFO_CLASS_HANDLE clsHnd, var_types type, GenTree* op1);
GenTreeAllocObj* gtNewAllocObjNode(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool useParent);
GenTree* gtNewRuntimeLookup(CORINFO_GENERIC_HANDLE hnd, CorInfoGenericHandleType hndTyp, GenTree* lookupTree);
GenTreeIndir* gtNewMethodTableLookup(GenTree* obj);
//------------------------------------------------------------------------
// Other GenTree functions
GenTree* gtClone(GenTree* tree, bool complexOK = false);
// If `tree` is a lclVar with lclNum `varNum`, return an IntCns with value `varVal`; otherwise,
// create a copy of `tree`, adding specified flags, replacing uses of lclVar `deepVarNum` with
// IntCnses with value `deepVarVal`.
GenTree* gtCloneExpr(
GenTree* tree, GenTreeFlags addFlags, unsigned varNum, int varVal, unsigned deepVarNum, int deepVarVal);
// Create a copy of `tree`, optionally adding specifed flags, and optionally mapping uses of local
// `varNum` to int constants with value `varVal`.
GenTree* gtCloneExpr(GenTree* tree,
GenTreeFlags addFlags = GTF_EMPTY,
unsigned varNum = BAD_VAR_NUM,
int varVal = 0)
{
return gtCloneExpr(tree, addFlags, varNum, varVal, varNum, varVal);
}
Statement* gtCloneStmt(Statement* stmt)
{
GenTree* exprClone = gtCloneExpr(stmt->GetRootNode());
return gtNewStmt(exprClone, stmt->GetDebugInfo());
}
// Internal helper for cloning a call
GenTreeCall* gtCloneExprCallHelper(GenTreeCall* call,
GenTreeFlags addFlags = GTF_EMPTY,
unsigned deepVarNum = BAD_VAR_NUM,
int deepVarVal = 0);
// Create copy of an inline or guarded devirtualization candidate tree.
GenTreeCall* gtCloneCandidateCall(GenTreeCall* call);
void gtUpdateSideEffects(Statement* stmt, GenTree* tree);
void gtUpdateTreeAncestorsSideEffects(GenTree* tree);
void gtUpdateStmtSideEffects(Statement* stmt);
void gtUpdateNodeSideEffects(GenTree* tree);
void gtUpdateNodeOperSideEffects(GenTree* tree);
void gtUpdateNodeOperSideEffectsPost(GenTree* tree);
// Returns "true" iff the complexity (not formally defined, but first interpretation
// is #of nodes in subtree) of "tree" is greater than "limit".
// (This is somewhat redundant with the "GetCostEx()/GetCostSz()" fields, but can be used
// before they have been set.)
bool gtComplexityExceeds(GenTree** tree, unsigned limit);
GenTree* gtReverseCond(GenTree* tree);
static bool gtHasRef(GenTree* tree, ssize_t lclNum);
bool gtHasLocalsWithAddrOp(GenTree* tree);
unsigned gtSetCallArgsOrder(const GenTreeCall::UseList& args, bool lateArgs, int* callCostEx, int* callCostSz);
unsigned gtSetMultiOpOrder(GenTreeMultiOp* multiOp);
void gtWalkOp(GenTree** op1, GenTree** op2, GenTree* base, bool constOnly);
#ifdef DEBUG
unsigned gtHashValue(GenTree* tree);
GenTree* gtWalkOpEffectiveVal(GenTree* op);
#endif
void gtPrepareCost(GenTree* tree);
bool gtIsLikelyRegVar(GenTree* tree);
// Returns true iff the secondNode can be swapped with firstNode.
bool gtCanSwapOrder(GenTree* firstNode, GenTree* secondNode);
// Given an address expression, compute its costs and addressing mode opportunities,
// and mark addressing mode candidates as GTF_DONT_CSE.
// TODO-Throughput - Consider actually instantiating these early, to avoid
// having to re-run the algorithm that looks for them (might also improve CQ).
bool gtMarkAddrMode(GenTree* addr, int* costEx, int* costSz, var_types type);
unsigned gtSetEvalOrder(GenTree* tree);
void gtSetStmtInfo(Statement* stmt);
// Returns "true" iff "node" has any of the side effects in "flags".
bool gtNodeHasSideEffects(GenTree* node, GenTreeFlags flags);
// Returns "true" iff "tree" or its (transitive) children have any of the side effects in "flags".
bool gtTreeHasSideEffects(GenTree* tree, GenTreeFlags flags);
// Appends 'expr' in front of 'list'
// 'list' will typically start off as 'nullptr'
// when 'list' is non-null a GT_COMMA node is used to insert 'expr'
GenTree* gtBuildCommaList(GenTree* list, GenTree* expr);
void gtExtractSideEffList(GenTree* expr,
GenTree** pList,
GenTreeFlags GenTreeFlags = GTF_SIDE_EFFECT,
bool ignoreRoot = false);
GenTree* gtGetThisArg(GenTreeCall* call);
// Static fields of struct types (and sometimes the types that those are reduced to) are represented by having the
// static field contain an object pointer to the boxed struct. This simplifies the GC implementation...but
// complicates the JIT somewhat. This predicate returns "true" iff a node with type "fieldNodeType", representing
// the given "fldHnd", is such an object pointer.
bool gtIsStaticFieldPtrToBoxedStruct(var_types fieldNodeType, CORINFO_FIELD_HANDLE fldHnd);
// Return true if call is a recursive call; return false otherwise.
// Note when inlining, this looks for calls back to the root method.
bool gtIsRecursiveCall(GenTreeCall* call)
{
return gtIsRecursiveCall(call->gtCallMethHnd);
}
bool gtIsRecursiveCall(CORINFO_METHOD_HANDLE callMethodHandle)
{
return (callMethodHandle == impInlineRoot()->info.compMethodHnd);
}
//-------------------------------------------------------------------------
GenTree* gtFoldExpr(GenTree* tree);
GenTree* gtFoldExprConst(GenTree* tree);
GenTree* gtFoldExprSpecial(GenTree* tree);
GenTree* gtFoldBoxNullable(GenTree* tree);
GenTree* gtFoldExprCompare(GenTree* tree);
GenTree* gtCreateHandleCompare(genTreeOps oper,
GenTree* op1,
GenTree* op2,
CorInfoInlineTypeCheck typeCheckInliningResult);
GenTree* gtFoldExprCall(GenTreeCall* call);
GenTree* gtFoldTypeCompare(GenTree* tree);
GenTree* gtFoldTypeEqualityCall(bool isEq, GenTree* op1, GenTree* op2);
// Options to control behavior of gtTryRemoveBoxUpstreamEffects
enum BoxRemovalOptions
{
BR_REMOVE_AND_NARROW, // remove effects, minimize remaining work, return possibly narrowed source tree
BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE, // remove effects and minimize remaining work, return type handle tree
BR_REMOVE_BUT_NOT_NARROW, // remove effects, return original source tree
BR_DONT_REMOVE, // check if removal is possible, return copy source tree
BR_DONT_REMOVE_WANT_TYPE_HANDLE, // check if removal is possible, return type handle tree
BR_MAKE_LOCAL_COPY // revise box to copy to temp local and return local's address
};
GenTree* gtTryRemoveBoxUpstreamEffects(GenTree* tree, BoxRemovalOptions options = BR_REMOVE_AND_NARROW);
GenTree* gtOptimizeEnumHasFlag(GenTree* thisOp, GenTree* flagOp);
//-------------------------------------------------------------------------
// Get the handle, if any.
CORINFO_CLASS_HANDLE gtGetStructHandleIfPresent(GenTree* tree);
// Get the handle, and assert if not found.
CORINFO_CLASS_HANDLE gtGetStructHandle(GenTree* tree);
// Get the handle for a ref type.
CORINFO_CLASS_HANDLE gtGetClassHandle(GenTree* tree, bool* pIsExact, bool* pIsNonNull);
// Get the class handle for an helper call
CORINFO_CLASS_HANDLE gtGetHelperCallClassHandle(GenTreeCall* call, bool* pIsExact, bool* pIsNonNull);
// Get the element handle for an array of ref type.
CORINFO_CLASS_HANDLE gtGetArrayElementClassHandle(GenTree* array);
// Get a class handle from a helper call argument
CORINFO_CLASS_HANDLE gtGetHelperArgClassHandle(GenTree* array);
// Get the class handle for a field
CORINFO_CLASS_HANDLE gtGetFieldClassHandle(CORINFO_FIELD_HANDLE fieldHnd, bool* pIsExact, bool* pIsNonNull);
// Check if this tree is a gc static base helper call
bool gtIsStaticGCBaseHelperCall(GenTree* tree);
//-------------------------------------------------------------------------
// Functions to display the trees
#ifdef DEBUG
void gtDispNode(GenTree* tree, IndentStack* indentStack, _In_z_ const char* msg, bool isLIR);
void gtDispConst(GenTree* tree);
void gtDispLeaf(GenTree* tree, IndentStack* indentStack);
void gtDispNodeName(GenTree* tree);
#if FEATURE_MULTIREG_RET
unsigned gtDispMultiRegCount(GenTree* tree);
#endif
void gtDispRegVal(GenTree* tree);
void gtDispZeroFieldSeq(GenTree* tree);
void gtDispVN(GenTree* tree);
void gtDispCommonEndLine(GenTree* tree);
enum IndentInfo
{
IINone,
IIArc,
IIArcTop,
IIArcBottom,
IIEmbedded,
IIError,
IndentInfoCount
};
void gtDispChild(GenTree* child,
IndentStack* indentStack,
IndentInfo arcType,
_In_opt_ const char* msg = nullptr,
bool topOnly = false);
void gtDispTree(GenTree* tree,
IndentStack* indentStack = nullptr,
_In_opt_ const char* msg = nullptr,
bool topOnly = false,
bool isLIR = false);
void gtGetLclVarNameInfo(unsigned lclNum, const char** ilKindOut, const char** ilNameOut, unsigned* ilNumOut);
int gtGetLclVarName(unsigned lclNum, char* buf, unsigned buf_remaining);
char* gtGetLclVarName(unsigned lclNum);
void gtDispLclVar(unsigned lclNum, bool padForBiggestDisp = true);
void gtDispLclVarStructType(unsigned lclNum);
void gtDispClassLayout(ClassLayout* layout, var_types type);
void gtDispILLocation(const ILLocation& loc);
void gtDispStmt(Statement* stmt, const char* msg = nullptr);
void gtDispBlockStmts(BasicBlock* block);
void gtGetArgMsg(GenTreeCall* call, GenTree* arg, unsigned argNum, char* bufp, unsigned bufLength);
void gtGetLateArgMsg(GenTreeCall* call, GenTree* arg, int argNum, char* bufp, unsigned bufLength);
void gtDispArgList(GenTreeCall* call, GenTree* lastCallOperand, IndentStack* indentStack);
void gtDispAnyFieldSeq(FieldSeqNode* fieldSeq);
void gtDispFieldSeq(FieldSeqNode* pfsn);
void gtDispRange(LIR::ReadOnlyRange const& range);
void gtDispTreeRange(LIR::Range& containingRange, GenTree* tree);
void gtDispLIRNode(GenTree* node, const char* prefixMsg = nullptr);
#endif
// For tree walks
enum fgWalkResult
{
WALK_CONTINUE,
WALK_SKIP_SUBTREES,
WALK_ABORT
};
struct fgWalkData;
typedef fgWalkResult(fgWalkPreFn)(GenTree** pTree, fgWalkData* data);
typedef fgWalkResult(fgWalkPostFn)(GenTree** pTree, fgWalkData* data);
static fgWalkPreFn gtMarkColonCond;
static fgWalkPreFn gtClearColonCond;
struct FindLinkData
{
GenTree* nodeToFind;
GenTree** result;
GenTree* parent;
};
FindLinkData gtFindLink(Statement* stmt, GenTree* node);
bool gtHasCatchArg(GenTree* tree);
typedef ArrayStack<GenTree*> GenTreeStack;
static bool gtHasCallOnStack(GenTreeStack* parentStack);
//=========================================================================
// BasicBlock functions
#ifdef DEBUG
// This is a debug flag we will use to assert when creating block during codegen
// as this interferes with procedure splitting. If you know what you're doing, set
// it to true before creating the block. (DEBUG only)
bool fgSafeBasicBlockCreation;
#endif
BasicBlock* bbNewBasicBlock(BBjumpKinds jumpKind);
void placeLoopAlignInstructions();
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX LclVarsInfo XX
XX XX
XX The variables to be used by the code generator. XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
//
// For both PROMOTION_TYPE_NONE and PROMOTION_TYPE_DEPENDENT the struct will
// be placed in the stack frame and it's fields must be laid out sequentially.
//
// For PROMOTION_TYPE_INDEPENDENT each of the struct's fields is replaced by
// a local variable that can be enregistered or placed in the stack frame.
// The fields do not need to be laid out sequentially
//
enum lvaPromotionType
{
PROMOTION_TYPE_NONE, // The struct local is not promoted
PROMOTION_TYPE_INDEPENDENT, // The struct local is promoted,
// and its field locals are independent of its parent struct local.
PROMOTION_TYPE_DEPENDENT // The struct local is promoted,
// but its field locals depend on its parent struct local.
};
/*****************************************************************************/
enum FrameLayoutState
{
NO_FRAME_LAYOUT,
INITIAL_FRAME_LAYOUT,
PRE_REGALLOC_FRAME_LAYOUT,
REGALLOC_FRAME_LAYOUT,
TENTATIVE_FRAME_LAYOUT,
FINAL_FRAME_LAYOUT
};
public:
RefCountState lvaRefCountState; // Current local ref count state
bool lvaLocalVarRefCounted() const
{
return lvaRefCountState == RCS_NORMAL;
}
bool lvaTrackedFixed; // true: We cannot add new 'tracked' variable
unsigned lvaCount; // total number of locals, which includes function arguments,
// special arguments, IL local variables, and JIT temporary variables
LclVarDsc* lvaTable; // variable descriptor table
unsigned lvaTableCnt; // lvaTable size (>= lvaCount)
unsigned lvaTrackedCount; // actual # of locals being tracked
unsigned lvaTrackedCountInSizeTUnits; // min # of size_t's sufficient to hold a bit for all the locals being tracked
#ifdef DEBUG
VARSET_TP lvaTrackedVars; // set of tracked variables
#endif
#ifndef TARGET_64BIT
VARSET_TP lvaLongVars; // set of long (64-bit) variables
#endif
VARSET_TP lvaFloatVars; // set of floating-point (32-bit and 64-bit) variables
unsigned lvaCurEpoch; // VarSets are relative to a specific set of tracked var indices.
// It that changes, this changes. VarSets from different epochs
// cannot be meaningfully combined.
unsigned GetCurLVEpoch()
{
return lvaCurEpoch;
}
// reverse map of tracked number to var number
unsigned lvaTrackedToVarNumSize;
unsigned* lvaTrackedToVarNum;
#if DOUBLE_ALIGN
#ifdef DEBUG
// # of procs compiled a with double-aligned stack
static unsigned s_lvaDoubleAlignedProcsCount;
#endif
#endif
// Getters and setters for address-exposed and do-not-enregister local var properties.
bool lvaVarAddrExposed(unsigned varNum) const;
void lvaSetVarAddrExposed(unsigned varNum DEBUGARG(AddressExposedReason reason));
void lvaSetHiddenBufferStructArg(unsigned varNum);
void lvaSetVarLiveInOutOfHandler(unsigned varNum);
bool lvaVarDoNotEnregister(unsigned varNum);
void lvSetMinOptsDoNotEnreg();
bool lvaEnregEHVars;
bool lvaEnregMultiRegVars;
void lvaSetVarDoNotEnregister(unsigned varNum DEBUGARG(DoNotEnregisterReason reason));
unsigned lvaVarargsHandleArg;
#ifdef TARGET_X86
unsigned lvaVarargsBaseOfStkArgs; // Pointer (computed based on incoming varargs handle) to the start of the stack
// arguments
#endif // TARGET_X86
unsigned lvaInlinedPInvokeFrameVar; // variable representing the InlinedCallFrame
unsigned lvaReversePInvokeFrameVar; // variable representing the reverse PInvoke frame
#if FEATURE_FIXED_OUT_ARGS
unsigned lvaPInvokeFrameRegSaveVar; // variable representing the RegSave for PInvoke inlining.
#endif
unsigned lvaMonAcquired; // boolean variable introduced into in synchronized methods
// that tracks whether the lock has been taken
unsigned lvaArg0Var; // The lclNum of arg0. Normally this will be info.compThisArg.
// However, if there is a "ldarga 0" or "starg 0" in the IL,
// we will redirect all "ldarg(a) 0" and "starg 0" to this temp.
unsigned lvaInlineeReturnSpillTemp; // The temp to spill the non-VOID return expression
// in case there are multiple BBJ_RETURN blocks in the inlinee
// or if the inlinee has GC ref locals.
#if FEATURE_FIXED_OUT_ARGS
unsigned lvaOutgoingArgSpaceVar; // dummy TYP_LCLBLK var for fixed outgoing argument space
PhasedVar<unsigned> lvaOutgoingArgSpaceSize; // size of fixed outgoing argument space
#endif // FEATURE_FIXED_OUT_ARGS
static unsigned GetOutgoingArgByteSize(unsigned sizeWithoutPadding)
{
return roundUp(sizeWithoutPadding, TARGET_POINTER_SIZE);
}
// Variable representing the return address. The helper-based tailcall
// mechanism passes the address of the return address to a runtime helper
// where it is used to detect tail-call chains.
unsigned lvaRetAddrVar;
#if defined(DEBUG) && defined(TARGET_XARCH)
unsigned lvaReturnSpCheck; // Stores SP to confirm it is not corrupted on return.
#endif // defined(DEBUG) && defined(TARGET_XARCH)
#if defined(DEBUG) && defined(TARGET_X86)
unsigned lvaCallSpCheck; // Stores SP to confirm it is not corrupted after every call.
#endif // defined(DEBUG) && defined(TARGET_X86)
bool lvaGenericsContextInUse;
bool lvaKeepAliveAndReportThis(); // Synchronized instance method of a reference type, or
// CORINFO_GENERICS_CTXT_FROM_THIS?
bool lvaReportParamTypeArg(); // Exceptions and CORINFO_GENERICS_CTXT_FROM_PARAMTYPEARG?
//-------------------------------------------------------------------------
// All these frame offsets are inter-related and must be kept in sync
#if !defined(FEATURE_EH_FUNCLETS)
// This is used for the callable handlers
unsigned lvaShadowSPslotsVar; // TYP_BLK variable for all the shadow SP slots
#endif // FEATURE_EH_FUNCLETS
int lvaCachedGenericContextArgOffs;
int lvaCachedGenericContextArgOffset(); // For CORINFO_CALLCONV_PARAMTYPE and if generic context is passed as
// THIS pointer
#ifdef JIT32_GCENCODER
unsigned lvaLocAllocSPvar; // variable which stores the value of ESP after the the last alloca/localloc
#endif // JIT32_GCENCODER
unsigned lvaNewObjArrayArgs; // variable with arguments for new MD array helper
// TODO-Review: Prior to reg predict we reserve 24 bytes for Spill temps.
// after the reg predict we will use a computed maxTmpSize
// which is based upon the number of spill temps predicted by reg predict
// All this is necessary because if we under-estimate the size of the spill
// temps we could fail when encoding instructions that reference stack offsets for ARM.
//
// Pre codegen max spill temp size.
static const unsigned MAX_SPILL_TEMP_SIZE = 24;
//-------------------------------------------------------------------------
unsigned lvaGetMaxSpillTempSize();
#ifdef TARGET_ARM
bool lvaIsPreSpilled(unsigned lclNum, regMaskTP preSpillMask);
#endif // TARGET_ARM
void lvaAssignFrameOffsets(FrameLayoutState curState);
void lvaFixVirtualFrameOffsets();
void lvaUpdateArgWithInitialReg(LclVarDsc* varDsc);
void lvaUpdateArgsWithInitialReg();
void lvaAssignVirtualFrameOffsetsToArgs();
#ifdef UNIX_AMD64_ABI
int lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize, int argOffs, int* callerArgOffset);
#else // !UNIX_AMD64_ABI
int lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize, int argOffs);
#endif // !UNIX_AMD64_ABI
void lvaAssignVirtualFrameOffsetsToLocals();
int lvaAllocLocalAndSetVirtualOffset(unsigned lclNum, unsigned size, int stkOffs);
#ifdef TARGET_AMD64
// Returns true if compCalleeRegsPushed (including RBP if used as frame pointer) is even.
bool lvaIsCalleeSavedIntRegCountEven();
#endif
void lvaAlignFrame();
void lvaAssignFrameOffsetsToPromotedStructs();
int lvaAllocateTemps(int stkOffs, bool mustDoubleAlign);
#ifdef DEBUG
void lvaDumpRegLocation(unsigned lclNum);
void lvaDumpFrameLocation(unsigned lclNum);
void lvaDumpEntry(unsigned lclNum, FrameLayoutState curState, size_t refCntWtdWidth = 6);
void lvaTableDump(FrameLayoutState curState = NO_FRAME_LAYOUT); // NO_FRAME_LAYOUT means use the current frame
// layout state defined by lvaDoneFrameLayout
#endif
// Limit frames size to 1GB. The maximum is 2GB in theory - make it intentionally smaller
// to avoid bugs from borderline cases.
#define MAX_FrameSize 0x3FFFFFFF
void lvaIncrementFrameSize(unsigned size);
unsigned lvaFrameSize(FrameLayoutState curState);
// Returns the caller-SP-relative offset for the SP/FP relative offset determined by FP based.
int lvaToCallerSPRelativeOffset(int offs, bool isFpBased, bool forRootFrame = true) const;
// Returns the caller-SP-relative offset for the local variable "varNum."
int lvaGetCallerSPRelativeOffset(unsigned varNum);
// Returns the SP-relative offset for the local variable "varNum". Illegal to ask this for functions with localloc.
int lvaGetSPRelativeOffset(unsigned varNum);
int lvaToInitialSPRelativeOffset(unsigned offset, bool isFpBased);
int lvaGetInitialSPRelativeOffset(unsigned varNum);
// True if this is an OSR compilation and this local is potentially
// located on the original method stack frame.
bool lvaIsOSRLocal(unsigned varNum);
//------------------------ For splitting types ----------------------------
void lvaInitTypeRef();
void lvaInitArgs(InitVarDscInfo* varDscInfo);
void lvaInitThisPtr(InitVarDscInfo* varDscInfo);
void lvaInitRetBuffArg(InitVarDscInfo* varDscInfo, bool useFixedRetBufReg);
void lvaInitUserArgs(InitVarDscInfo* varDscInfo, unsigned skipArgs, unsigned takeArgs);
void lvaInitGenericsCtxt(InitVarDscInfo* varDscInfo);
void lvaInitVarArgsHandle(InitVarDscInfo* varDscInfo);
void lvaInitVarDsc(LclVarDsc* varDsc,
unsigned varNum,
CorInfoType corInfoType,
CORINFO_CLASS_HANDLE typeHnd,
CORINFO_ARG_LIST_HANDLE varList,
CORINFO_SIG_INFO* varSig);
static unsigned lvaTypeRefMask(var_types type);
var_types lvaGetActualType(unsigned lclNum);
var_types lvaGetRealType(unsigned lclNum);
//-------------------------------------------------------------------------
void lvaInit();
LclVarDsc* lvaGetDesc(unsigned lclNum)
{
assert(lclNum < lvaCount);
return &lvaTable[lclNum];
}
LclVarDsc* lvaGetDesc(unsigned lclNum) const
{
assert(lclNum < lvaCount);
return &lvaTable[lclNum];
}
LclVarDsc* lvaGetDesc(const GenTreeLclVarCommon* lclVar)
{
return lvaGetDesc(lclVar->GetLclNum());
}
unsigned lvaTrackedIndexToLclNum(unsigned trackedIndex)
{
assert(trackedIndex < lvaTrackedCount);
unsigned lclNum = lvaTrackedToVarNum[trackedIndex];
assert(lclNum < lvaCount);
return lclNum;
}
LclVarDsc* lvaGetDescByTrackedIndex(unsigned trackedIndex)
{
return lvaGetDesc(lvaTrackedIndexToLclNum(trackedIndex));
}
unsigned lvaGetLclNum(const LclVarDsc* varDsc)
{
assert((lvaTable <= varDsc) && (varDsc < lvaTable + lvaCount)); // varDsc must point within the table
assert(((char*)varDsc - (char*)lvaTable) % sizeof(LclVarDsc) ==
0); // varDsc better not point in the middle of a variable
unsigned varNum = (unsigned)(varDsc - lvaTable);
assert(varDsc == &lvaTable[varNum]);
return varNum;
}
unsigned lvaLclSize(unsigned varNum);
unsigned lvaLclExactSize(unsigned varNum);
bool lvaHaveManyLocals() const;
unsigned lvaGrabTemp(bool shortLifetime DEBUGARG(const char* reason));
unsigned lvaGrabTemps(unsigned cnt DEBUGARG(const char* reason));
unsigned lvaGrabTempWithImplicitUse(bool shortLifetime DEBUGARG(const char* reason));
void lvaSortByRefCount();
void lvaMarkLocalVars(); // Local variable ref-counting
void lvaComputeRefCounts(bool isRecompute, bool setSlotNumbers);
void lvaMarkLocalVars(BasicBlock* block, bool isRecompute);
void lvaAllocOutgoingArgSpaceVar(); // Set up lvaOutgoingArgSpaceVar
VARSET_VALRET_TP lvaStmtLclMask(Statement* stmt);
#ifdef DEBUG
struct lvaStressLclFldArgs
{
Compiler* m_pCompiler;
bool m_bFirstPass;
};
static fgWalkPreFn lvaStressLclFldCB;
void lvaStressLclFld();
void lvaDispVarSet(VARSET_VALARG_TP set, VARSET_VALARG_TP allVars);
void lvaDispVarSet(VARSET_VALARG_TP set);
#endif
#ifdef TARGET_ARM
int lvaFrameAddress(int varNum, bool mustBeFPBased, regNumber* pBaseReg, int addrModeOffset, bool isFloatUsage);
#else
int lvaFrameAddress(int varNum, bool* pFPbased);
#endif
bool lvaIsParameter(unsigned varNum);
bool lvaIsRegArgument(unsigned varNum);
bool lvaIsOriginalThisArg(unsigned varNum); // Is this varNum the original this argument?
bool lvaIsOriginalThisReadOnly(); // return true if there is no place in the code
// that writes to arg0
// For x64 this is 3, 5, 6, 7, >8 byte structs that are passed by reference.
// For ARM64, this is structs larger than 16 bytes that are passed by reference.
bool lvaIsImplicitByRefLocal(unsigned varNum)
{
#if defined(TARGET_AMD64) || defined(TARGET_ARM64)
LclVarDsc* varDsc = lvaGetDesc(varNum);
if (varDsc->lvIsImplicitByRef)
{
assert(varDsc->lvIsParam);
assert(varTypeIsStruct(varDsc) || (varDsc->lvType == TYP_BYREF));
return true;
}
#endif // defined(TARGET_AMD64) || defined(TARGET_ARM64)
return false;
}
// Returns true if this local var is a multireg struct
bool lvaIsMultiregStruct(LclVarDsc* varDsc, bool isVararg);
// If the local is a TYP_STRUCT, get/set a class handle describing it
CORINFO_CLASS_HANDLE lvaGetStruct(unsigned varNum);
void lvaSetStruct(unsigned varNum, CORINFO_CLASS_HANDLE typeHnd, bool unsafeValueClsCheck, bool setTypeInfo = true);
void lvaSetStructUsedAsVarArg(unsigned varNum);
// If the local is TYP_REF, set or update the associated class information.
void lvaSetClass(unsigned varNum, CORINFO_CLASS_HANDLE clsHnd, bool isExact = false);
void lvaSetClass(unsigned varNum, GenTree* tree, CORINFO_CLASS_HANDLE stackHandle = nullptr);
void lvaUpdateClass(unsigned varNum, CORINFO_CLASS_HANDLE clsHnd, bool isExact = false);
void lvaUpdateClass(unsigned varNum, GenTree* tree, CORINFO_CLASS_HANDLE stackHandle = nullptr);
#define MAX_NumOfFieldsInPromotableStruct 4 // Maximum number of fields in promotable struct
// Info about struct type fields.
struct lvaStructFieldInfo
{
CORINFO_FIELD_HANDLE fldHnd;
unsigned char fldOffset;
unsigned char fldOrdinal;
var_types fldType;
unsigned fldSize;
CORINFO_CLASS_HANDLE fldTypeHnd;
lvaStructFieldInfo()
: fldHnd(nullptr), fldOffset(0), fldOrdinal(0), fldType(TYP_UNDEF), fldSize(0), fldTypeHnd(nullptr)
{
}
};
// Info about a struct type, instances of which may be candidates for promotion.
struct lvaStructPromotionInfo
{
CORINFO_CLASS_HANDLE typeHnd;
bool canPromote;
bool containsHoles;
bool customLayout;
bool fieldsSorted;
unsigned char fieldCnt;
lvaStructFieldInfo fields[MAX_NumOfFieldsInPromotableStruct];
lvaStructPromotionInfo(CORINFO_CLASS_HANDLE typeHnd = nullptr)
: typeHnd(typeHnd)
, canPromote(false)
, containsHoles(false)
, customLayout(false)
, fieldsSorted(false)
, fieldCnt(0)
{
}
};
struct lvaFieldOffsetCmp
{
bool operator()(const lvaStructFieldInfo& field1, const lvaStructFieldInfo& field2);
};
// This class is responsible for checking validity and profitability of struct promotion.
// If it is both legal and profitable, then TryPromoteStructVar promotes the struct and initializes
// nessesary information for fgMorphStructField to use.
class StructPromotionHelper
{
public:
StructPromotionHelper(Compiler* compiler);
bool CanPromoteStructType(CORINFO_CLASS_HANDLE typeHnd);
bool TryPromoteStructVar(unsigned lclNum);
void Clear()
{
structPromotionInfo.typeHnd = NO_CLASS_HANDLE;
}
#ifdef DEBUG
void CheckRetypedAsScalar(CORINFO_FIELD_HANDLE fieldHnd, var_types requestedType);
#endif // DEBUG
private:
bool CanPromoteStructVar(unsigned lclNum);
bool ShouldPromoteStructVar(unsigned lclNum);
void PromoteStructVar(unsigned lclNum);
void SortStructFields();
lvaStructFieldInfo GetFieldInfo(CORINFO_FIELD_HANDLE fieldHnd, BYTE ordinal);
bool TryPromoteStructField(lvaStructFieldInfo& outerFieldInfo);
private:
Compiler* compiler;
lvaStructPromotionInfo structPromotionInfo;
#ifdef DEBUG
typedef JitHashTable<CORINFO_FIELD_HANDLE, JitPtrKeyFuncs<CORINFO_FIELD_STRUCT_>, var_types>
RetypedAsScalarFieldsMap;
RetypedAsScalarFieldsMap retypedFieldsMap;
#endif // DEBUG
};
StructPromotionHelper* structPromotionHelper;
unsigned lvaGetFieldLocal(const LclVarDsc* varDsc, unsigned int fldOffset);
lvaPromotionType lvaGetPromotionType(const LclVarDsc* varDsc);
lvaPromotionType lvaGetPromotionType(unsigned varNum);
lvaPromotionType lvaGetParentPromotionType(const LclVarDsc* varDsc);
lvaPromotionType lvaGetParentPromotionType(unsigned varNum);
bool lvaIsFieldOfDependentlyPromotedStruct(const LclVarDsc* varDsc);
bool lvaIsGCTracked(const LclVarDsc* varDsc);
#if defined(FEATURE_SIMD)
bool lvaMapSimd12ToSimd16(const LclVarDsc* varDsc)
{
assert(varDsc->lvType == TYP_SIMD12);
assert(varDsc->lvExactSize == 12);
#if defined(TARGET_64BIT)
assert(compMacOsArm64Abi() || varDsc->lvSize() == 16);
#endif // defined(TARGET_64BIT)
// We make local variable SIMD12 types 16 bytes instead of just 12.
// lvSize() will return 16 bytes for SIMD12, even for fields.
// However, we can't do that mapping if the var is a dependently promoted struct field.
// Such a field must remain its exact size within its parent struct unless it is a single
// field *and* it is the only field in a struct of 16 bytes.
if (varDsc->lvSize() != 16)
{
return false;
}
if (lvaIsFieldOfDependentlyPromotedStruct(varDsc))
{
LclVarDsc* parentVarDsc = lvaGetDesc(varDsc->lvParentLcl);
return (parentVarDsc->lvFieldCnt == 1) && (parentVarDsc->lvSize() == 16);
}
return true;
}
#endif // defined(FEATURE_SIMD)
unsigned lvaGSSecurityCookie; // LclVar number
bool lvaTempsHaveLargerOffsetThanVars();
// Returns "true" iff local variable "lclNum" is in SSA form.
bool lvaInSsa(unsigned lclNum)
{
assert(lclNum < lvaCount);
return lvaTable[lclNum].lvInSsa;
}
unsigned lvaStubArgumentVar; // variable representing the secret stub argument coming in EAX
#if defined(FEATURE_EH_FUNCLETS)
unsigned lvaPSPSym; // variable representing the PSPSym
#endif
InlineInfo* impInlineInfo; // Only present for inlinees
InlineStrategy* m_inlineStrategy;
InlineContext* compInlineContext; // Always present
// The Compiler* that is the root of the inlining tree of which "this" is a member.
Compiler* impInlineRoot();
#if defined(DEBUG) || defined(INLINE_DATA)
unsigned __int64 getInlineCycleCount()
{
return m_compCycles;
}
#endif // defined(DEBUG) || defined(INLINE_DATA)
bool fgNoStructPromotion; // Set to TRUE to turn off struct promotion for this method.
bool fgNoStructParamPromotion; // Set to TRUE to turn off struct promotion for parameters this method.
//=========================================================================
// PROTECTED
//=========================================================================
protected:
//---------------- Local variable ref-counting ----------------------------
void lvaMarkLclRefs(GenTree* tree, BasicBlock* block, Statement* stmt, bool isRecompute);
bool IsDominatedByExceptionalEntry(BasicBlock* block);
void SetVolatileHint(LclVarDsc* varDsc);
// Keeps the mapping from SSA #'s to VN's for the implicit memory variables.
SsaDefArray<SsaMemDef> lvMemoryPerSsaData;
public:
// Returns the address of the per-Ssa data for memory at the given ssaNum (which is required
// not to be the SsaConfig::RESERVED_SSA_NUM, which indicates that the variable is
// not an SSA variable).
SsaMemDef* GetMemoryPerSsaData(unsigned ssaNum)
{
return lvMemoryPerSsaData.GetSsaDef(ssaNum);
}
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Importer XX
XX XX
XX Imports the given method and converts it to semantic trees XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
private:
// For prefixFlags
enum
{
PREFIX_TAILCALL_EXPLICIT = 0x00000001, // call has "tail" IL prefix
PREFIX_TAILCALL_IMPLICIT =
0x00000010, // call is treated as having "tail" prefix even though there is no "tail" IL prefix
PREFIX_TAILCALL_STRESS =
0x00000100, // call doesn't "tail" IL prefix but is treated as explicit because of tail call stress
PREFIX_TAILCALL = (PREFIX_TAILCALL_EXPLICIT | PREFIX_TAILCALL_IMPLICIT | PREFIX_TAILCALL_STRESS),
PREFIX_VOLATILE = 0x00001000,
PREFIX_UNALIGNED = 0x00010000,
PREFIX_CONSTRAINED = 0x00100000,
PREFIX_READONLY = 0x01000000
};
static void impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix);
static OPCODE impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp);
static bool impOpcodeIsCallOpcode(OPCODE opcode);
public:
void impInit();
void impImport();
CORINFO_CLASS_HANDLE impGetRefAnyClass();
CORINFO_CLASS_HANDLE impGetRuntimeArgumentHandle();
CORINFO_CLASS_HANDLE impGetTypeHandleClass();
CORINFO_CLASS_HANDLE impGetStringClass();
CORINFO_CLASS_HANDLE impGetObjectClass();
// Returns underlying type of handles returned by ldtoken instruction
var_types GetRuntimeHandleUnderlyingType()
{
// RuntimeTypeHandle is backed by raw pointer on CoreRT and by object reference on other runtimes
return IsTargetAbi(CORINFO_CORERT_ABI) ? TYP_I_IMPL : TYP_REF;
}
void impDevirtualizeCall(GenTreeCall* call,
CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_METHOD_HANDLE* method,
unsigned* methodFlags,
CORINFO_CONTEXT_HANDLE* contextHandle,
CORINFO_CONTEXT_HANDLE* exactContextHandle,
bool isLateDevirtualization,
bool isExplicitTailCall,
IL_OFFSET ilOffset = BAD_IL_OFFSET);
//=========================================================================
// PROTECTED
//=========================================================================
protected:
//-------------------- Stack manipulation ---------------------------------
unsigned impStkSize; // Size of the full stack
#define SMALL_STACK_SIZE 16 // number of elements in impSmallStack
struct SavedStack // used to save/restore stack contents.
{
unsigned ssDepth; // number of values on stack
StackEntry* ssTrees; // saved tree values
};
bool impIsPrimitive(CorInfoType type);
bool impILConsumesAddr(const BYTE* codeAddr);
void impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind);
void impPushOnStack(GenTree* tree, typeInfo ti);
void impPushNullObjRefOnStack();
StackEntry impPopStack();
StackEntry& impStackTop(unsigned n = 0);
unsigned impStackHeight();
void impSaveStackState(SavedStack* savePtr, bool copy);
void impRestoreStackState(SavedStack* savePtr);
GenTree* impImportLdvirtftn(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo);
int impBoxPatternMatch(CORINFO_RESOLVED_TOKEN* pResolvedToken,
const BYTE* codeAddr,
const BYTE* codeEndp,
bool makeInlineObservation = false);
void impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken);
void impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo);
bool impCanPInvokeInline();
bool impCanPInvokeInlineCallSite(BasicBlock* block);
void impCheckForPInvokeCall(
GenTreeCall* call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block);
GenTreeCall* impImportIndirectCall(CORINFO_SIG_INFO* sig, const DebugInfo& di = DebugInfo());
void impPopArgsForUnmanagedCall(GenTree* call, CORINFO_SIG_INFO* sig);
void impInsertHelperCall(CORINFO_HELPER_DESC* helperCall);
void impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall);
void impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall);
var_types impImportCall(OPCODE opcode,
CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a
// type parameter?
GenTree* newobjThis,
int prefixFlags,
CORINFO_CALL_INFO* callInfo,
IL_OFFSET rawILOffset);
CORINFO_CLASS_HANDLE impGetSpecialIntrinsicExactReturnType(CORINFO_METHOD_HANDLE specialIntrinsicHandle);
bool impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo, CorInfoCallConvExtension callConv);
GenTree* impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HANDLE retClsHnd);
GenTree* impFixupStructReturnType(GenTree* op,
CORINFO_CLASS_HANDLE retClsHnd,
CorInfoCallConvExtension unmgdCallConv);
#ifdef DEBUG
var_types impImportJitTestLabelMark(int numArgs);
#endif // DEBUG
GenTree* impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken);
GenTree* impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp);
GenTree* impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_ACCESS_FLAGS access,
CORINFO_FIELD_INFO* pFieldInfo,
var_types lclTyp);
static void impBashVarAddrsToI(GenTree* tree1, GenTree* tree2 = nullptr);
GenTree* impImplicitIorI4Cast(GenTree* tree, var_types dstTyp);
GenTree* impImplicitR4orR8Cast(GenTree* tree, var_types dstTyp);
void impImportLeave(BasicBlock* block);
void impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr);
GenTree* impTypeIsAssignable(GenTree* typeTo, GenTree* typeFrom);
// Mirrors StringComparison.cs
enum StringComparison
{
Ordinal = 4,
OrdinalIgnoreCase = 5
};
enum StringComparisonJoint
{
Eq, // (d1 == cns1) && (s2 == cns2)
Xor, // (d1 ^ cns1) | (s2 ^ cns2)
};
GenTree* impStringEqualsOrStartsWith(bool startsWith, CORINFO_SIG_INFO* sig, unsigned methodFlags);
GenTree* impSpanEqualsOrStartsWith(bool startsWith, CORINFO_SIG_INFO* sig, unsigned methodFlags);
GenTree* impExpandHalfConstEquals(GenTreeLclVar* data,
GenTree* lengthFld,
bool checkForNull,
bool startsWith,
WCHAR* cnsData,
int len,
int dataOffset,
StringComparison cmpMode);
GenTree* impCreateCompareInd(GenTreeLclVar* obj,
var_types type,
ssize_t offset,
ssize_t value,
StringComparison ignoreCase,
StringComparisonJoint joint = Eq);
GenTree* impExpandHalfConstEqualsSWAR(
GenTreeLclVar* data, WCHAR* cns, int len, int dataOffset, StringComparison cmpMode);
GenTree* impExpandHalfConstEqualsSIMD(
GenTreeLclVar* data, WCHAR* cns, int len, int dataOffset, StringComparison cmpMode);
GenTreeStrCon* impGetStrConFromSpan(GenTree* span);
GenTree* impIntrinsic(GenTree* newobjThis,
CORINFO_CLASS_HANDLE clsHnd,
CORINFO_METHOD_HANDLE method,
CORINFO_SIG_INFO* sig,
unsigned methodFlags,
int memberRef,
bool readonlyCall,
bool tailCall,
CORINFO_RESOLVED_TOKEN* pContstrainedResolvedToken,
CORINFO_THIS_TRANSFORM constraintCallThisTransform,
NamedIntrinsic* pIntrinsicName,
bool* isSpecialIntrinsic = nullptr);
GenTree* impMathIntrinsic(CORINFO_METHOD_HANDLE method,
CORINFO_SIG_INFO* sig,
var_types callType,
NamedIntrinsic intrinsicName,
bool tailCall);
NamedIntrinsic lookupNamedIntrinsic(CORINFO_METHOD_HANDLE method);
GenTree* impUnsupportedNamedIntrinsic(unsigned helper,
CORINFO_METHOD_HANDLE method,
CORINFO_SIG_INFO* sig,
bool mustExpand);
#ifdef FEATURE_HW_INTRINSICS
GenTree* impHWIntrinsic(NamedIntrinsic intrinsic,
CORINFO_CLASS_HANDLE clsHnd,
CORINFO_METHOD_HANDLE method,
CORINFO_SIG_INFO* sig,
bool mustExpand);
GenTree* impSimdAsHWIntrinsic(NamedIntrinsic intrinsic,
CORINFO_CLASS_HANDLE clsHnd,
CORINFO_METHOD_HANDLE method,
CORINFO_SIG_INFO* sig,
GenTree* newobjThis);
protected:
bool compSupportsHWIntrinsic(CORINFO_InstructionSet isa);
GenTree* impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic,
CORINFO_CLASS_HANDLE clsHnd,
CORINFO_SIG_INFO* sig,
var_types retType,
CorInfoType simdBaseJitType,
unsigned simdSize,
GenTree* newobjThis);
GenTree* impSpecialIntrinsic(NamedIntrinsic intrinsic,
CORINFO_CLASS_HANDLE clsHnd,
CORINFO_METHOD_HANDLE method,
CORINFO_SIG_INFO* sig,
CorInfoType simdBaseJitType,
var_types retType,
unsigned simdSize);
GenTree* getArgForHWIntrinsic(var_types argType,
CORINFO_CLASS_HANDLE argClass,
bool expectAddr = false,
GenTree* newobjThis = nullptr);
GenTree* impNonConstFallback(NamedIntrinsic intrinsic, var_types simdType, CorInfoType simdBaseJitType);
GenTree* addRangeCheckIfNeeded(
NamedIntrinsic intrinsic, GenTree* immOp, bool mustExpand, int immLowerBound, int immUpperBound);
GenTree* addRangeCheckForHWIntrinsic(GenTree* immOp, int immLowerBound, int immUpperBound);
#ifdef TARGET_XARCH
GenTree* impBaseIntrinsic(NamedIntrinsic intrinsic,
CORINFO_CLASS_HANDLE clsHnd,
CORINFO_METHOD_HANDLE method,
CORINFO_SIG_INFO* sig,
CorInfoType simdBaseJitType,
var_types retType,
unsigned simdSize);
GenTree* impSSEIntrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig);
GenTree* impSSE2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig);
GenTree* impAvxOrAvx2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig);
GenTree* impBMI1OrBMI2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig);
#endif // TARGET_XARCH
#endif // FEATURE_HW_INTRINSICS
GenTree* impArrayAccessIntrinsic(CORINFO_CLASS_HANDLE clsHnd,
CORINFO_SIG_INFO* sig,
int memberRef,
bool readonlyCall,
NamedIntrinsic intrinsicName);
GenTree* impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig);
GenTree* impCreateSpanIntrinsic(CORINFO_SIG_INFO* sig);
GenTree* impKeepAliveIntrinsic(GenTree* objToKeepAlive);
GenTree* impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo);
GenTree* impTransformThis(GenTree* thisPtr,
CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
CORINFO_THIS_TRANSFORM transform);
//----------------- Manipulating the trees and stmts ----------------------
Statement* impStmtList; // Statements for the BB being imported.
Statement* impLastStmt; // The last statement for the current BB.
public:
enum
{
CHECK_SPILL_ALL = -1,
CHECK_SPILL_NONE = -2
};
void impBeginTreeList();
void impEndTreeList(BasicBlock* block, Statement* firstStmt, Statement* lastStmt);
void impEndTreeList(BasicBlock* block);
void impAppendStmtCheck(Statement* stmt, unsigned chkLevel);
void impAppendStmt(Statement* stmt, unsigned chkLevel, bool checkConsumedDebugInfo = true);
void impAppendStmt(Statement* stmt);
void impInsertStmtBefore(Statement* stmt, Statement* stmtBefore);
Statement* impAppendTree(GenTree* tree, unsigned chkLevel, const DebugInfo& di, bool checkConsumedDebugInfo = true);
void impInsertTreeBefore(GenTree* tree, const DebugInfo& di, Statement* stmtBefore);
void impAssignTempGen(unsigned tmp,
GenTree* val,
unsigned curLevel = (unsigned)CHECK_SPILL_NONE,
Statement** pAfterStmt = nullptr,
const DebugInfo& di = DebugInfo(),
BasicBlock* block = nullptr);
void impAssignTempGen(unsigned tmpNum,
GenTree* val,
CORINFO_CLASS_HANDLE structHnd,
unsigned curLevel,
Statement** pAfterStmt = nullptr,
const DebugInfo& di = DebugInfo(),
BasicBlock* block = nullptr);
Statement* impExtractLastStmt();
GenTree* impCloneExpr(GenTree* tree,
GenTree** clone,
CORINFO_CLASS_HANDLE structHnd,
unsigned curLevel,
Statement** pAfterStmt DEBUGARG(const char* reason));
GenTree* impAssignStruct(GenTree* dest,
GenTree* src,
CORINFO_CLASS_HANDLE structHnd,
unsigned curLevel,
Statement** pAfterStmt = nullptr,
const DebugInfo& di = DebugInfo(),
BasicBlock* block = nullptr);
GenTree* impAssignStructPtr(GenTree* dest,
GenTree* src,
CORINFO_CLASS_HANDLE structHnd,
unsigned curLevel,
Statement** pAfterStmt = nullptr,
const DebugInfo& di = DebugInfo(),
BasicBlock* block = nullptr);
GenTree* impGetStructAddr(GenTree* structVal, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, bool willDeref);
var_types impNormStructType(CORINFO_CLASS_HANDLE structHnd, CorInfoType* simdBaseJitType = nullptr);
GenTree* impNormStructVal(GenTree* structVal,
CORINFO_CLASS_HANDLE structHnd,
unsigned curLevel,
bool forceNormalization = false);
GenTree* impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken,
bool* pRuntimeLookup = nullptr,
bool mustRestoreHandle = false,
bool importParent = false);
GenTree* impParentClassTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken,
bool* pRuntimeLookup = nullptr,
bool mustRestoreHandle = false)
{
return impTokenToHandle(pResolvedToken, pRuntimeLookup, mustRestoreHandle, true);
}
GenTree* impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_LOOKUP* pLookup,
GenTreeFlags flags,
void* compileTimeHandle);
GenTree* getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind);
GenTree* impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_LOOKUP* pLookup,
void* compileTimeHandle);
GenTree* impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup, GenTreeFlags flags, void* compileTimeHandle);
GenTreeCall* impReadyToRunHelperToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
CorInfoHelpFunc helper,
var_types type,
GenTreeCall::Use* args = nullptr,
CORINFO_LOOKUP_KIND* pGenericLookupKind = nullptr);
bool impIsCastHelperEligibleForClassProbe(GenTree* tree);
bool impIsCastHelperMayHaveProfileData(CorInfoHelpFunc helper);
GenTree* impCastClassOrIsInstToTree(
GenTree* op1, GenTree* op2, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass, IL_OFFSET ilOffset);
GenTree* impOptimizeCastClassOrIsInst(GenTree* op1, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass);
bool VarTypeIsMultiByteAndCanEnreg(var_types type,
CORINFO_CLASS_HANDLE typeClass,
unsigned* typeSize,
bool forReturn,
bool isVarArg,
CorInfoCallConvExtension callConv);
bool IsIntrinsicImplementedByUserCall(NamedIntrinsic intrinsicName);
bool IsTargetIntrinsic(NamedIntrinsic intrinsicName);
bool IsMathIntrinsic(NamedIntrinsic intrinsicName);
bool IsMathIntrinsic(GenTree* tree);
private:
//----------------- Importing the method ----------------------------------
CORINFO_CONTEXT_HANDLE impTokenLookupContextHandle; // The context used for looking up tokens.
#ifdef DEBUG
unsigned impCurOpcOffs;
const char* impCurOpcName;
bool impNestedStackSpill;
// For displaying instrs with generated native code (-n:B)
Statement* impLastILoffsStmt; // oldest stmt added for which we did not call SetLastILOffset().
void impNoteLastILoffs();
#endif
// Debug info of current statement being imported. It gets set to contain
// no IL location (!impCurStmtDI.GetLocation().IsValid) after it has been
// set in the appended trees. Then it gets updated at IL instructions for
// which we have to report mapping info.
// It will always contain the current inline context.
DebugInfo impCurStmtDI;
DebugInfo impCreateDIWithCurrentStackInfo(IL_OFFSET offs, bool isCall);
void impCurStmtOffsSet(IL_OFFSET offs);
void impNoteBranchOffs();
unsigned impInitBlockLineInfo();
bool impIsThis(GenTree* obj);
bool impIsLDFTN_TOKEN(const BYTE* delegateCreateStart, const BYTE* newobjCodeAddr);
bool impIsDUP_LDVIRTFTN_TOKEN(const BYTE* delegateCreateStart, const BYTE* newobjCodeAddr);
bool impIsAnySTLOC(OPCODE opcode)
{
return ((opcode == CEE_STLOC) || (opcode == CEE_STLOC_S) ||
((opcode >= CEE_STLOC_0) && (opcode <= CEE_STLOC_3)));
}
GenTreeCall::Use* impPopCallArgs(unsigned count, CORINFO_SIG_INFO* sig, GenTreeCall::Use* prefixArgs = nullptr);
bool impCheckImplicitArgumentCoercion(var_types sigType, var_types nodeType) const;
GenTreeCall::Use* impPopReverseCallArgs(unsigned count, CORINFO_SIG_INFO* sig, unsigned skipReverseCount = 0);
//---------------- Spilling the importer stack ----------------------------
// The maximum number of bytes of IL processed without clean stack state.
// It allows to limit the maximum tree size and depth.
static const unsigned MAX_TREE_SIZE = 200;
bool impCanSpillNow(OPCODE prevOpcode);
struct PendingDsc
{
PendingDsc* pdNext;
BasicBlock* pdBB;
SavedStack pdSavedStack;
ThisInitState pdThisPtrInit;
};
PendingDsc* impPendingList; // list of BBs currently waiting to be imported.
PendingDsc* impPendingFree; // Freed up dscs that can be reused
// We keep a byte-per-block map (dynamically extended) in the top-level Compiler object of a compilation.
JitExpandArray<BYTE> impPendingBlockMembers;
// Return the byte for "b" (allocating/extending impPendingBlockMembers if necessary.)
// Operates on the map in the top-level ancestor.
BYTE impGetPendingBlockMember(BasicBlock* blk)
{
return impInlineRoot()->impPendingBlockMembers.Get(blk->bbInd());
}
// Set the byte for "b" to "val" (allocating/extending impPendingBlockMembers if necessary.)
// Operates on the map in the top-level ancestor.
void impSetPendingBlockMember(BasicBlock* blk, BYTE val)
{
impInlineRoot()->impPendingBlockMembers.Set(blk->bbInd(), val);
}
bool impCanReimport;
bool impSpillStackEntry(unsigned level,
unsigned varNum
#ifdef DEBUG
,
bool bAssertOnRecursion,
const char* reason
#endif
);
void impSpillStackEnsure(bool spillLeaves = false);
void impEvalSideEffects();
void impSpillSpecialSideEff();
void impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason));
void impSpillValueClasses();
void impSpillEvalStack();
static fgWalkPreFn impFindValueClasses;
void impSpillLclRefs(ssize_t lclNum);
BasicBlock* impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd, bool isSingleBlockFilter);
bool impBlockIsInALoop(BasicBlock* block);
void impImportBlockCode(BasicBlock* block);
void impReimportMarkBlock(BasicBlock* block);
void impReimportMarkSuccessors(BasicBlock* block);
void impVerifyEHBlock(BasicBlock* block, bool isTryStart);
void impImportBlockPending(BasicBlock* block);
// Similar to impImportBlockPending, but assumes that block has already been imported once and is being
// reimported for some reason. It specifically does *not* look at verCurrentState to set the EntryState
// for the block, but instead, just re-uses the block's existing EntryState.
void impReimportBlockPending(BasicBlock* block);
var_types impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTree** pOp1, GenTree** pOp2);
void impImportBlock(BasicBlock* block);
// Assumes that "block" is a basic block that completes with a non-empty stack. We will assign the values
// on the stack to local variables (the "spill temp" variables). The successor blocks will assume that
// its incoming stack contents are in those locals. This requires "block" and its successors to agree on
// the variables that will be used -- and for all the predecessors of those successors, and the
// successors of those predecessors, etc. Call such a set of blocks closed under alternating
// successor/predecessor edges a "spill clique." A block is a "predecessor" or "successor" member of the
// clique (or, conceivably, both). Each block has a specified sequence of incoming and outgoing spill
// temps. If "block" already has its outgoing spill temps assigned (they are always a contiguous series
// of local variable numbers, so we represent them with the base local variable number), returns that.
// Otherwise, picks a set of spill temps, and propagates this choice to all blocks in the spill clique of
// which "block" is a member (asserting, in debug mode, that no block in this clique had its spill temps
// chosen already. More precisely, that the incoming or outgoing spill temps are not chosen, depending
// on which kind of member of the clique the block is).
unsigned impGetSpillTmpBase(BasicBlock* block);
// Assumes that "block" is a basic block that completes with a non-empty stack. We have previously
// assigned the values on the stack to local variables (the "spill temp" variables). The successor blocks
// will assume that its incoming stack contents are in those locals. This requires "block" and its
// successors to agree on the variables and their types that will be used. The CLI spec allows implicit
// conversions between 'int' and 'native int' or 'float' and 'double' stack types. So one predecessor can
// push an int and another can push a native int. For 64-bit we have chosen to implement this by typing
// the "spill temp" as native int, and then importing (or re-importing as needed) so that all the
// predecessors in the "spill clique" push a native int (sign-extending if needed), and all the
// successors receive a native int. Similarly float and double are unified to double.
// This routine is called after a type-mismatch is detected, and it will walk the spill clique to mark
// blocks for re-importation as appropriate (both successors, so they get the right incoming type, and
// predecessors, so they insert an upcast if needed).
void impReimportSpillClique(BasicBlock* block);
// When we compute a "spill clique" (see above) these byte-maps are allocated to have a byte per basic
// block, and represent the predecessor and successor members of the clique currently being computed.
// *** Access to these will need to be locked in a parallel compiler.
JitExpandArray<BYTE> impSpillCliquePredMembers;
JitExpandArray<BYTE> impSpillCliqueSuccMembers;
enum SpillCliqueDir
{
SpillCliquePred,
SpillCliqueSucc
};
// Abstract class for receiving a callback while walking a spill clique
class SpillCliqueWalker
{
public:
virtual void Visit(SpillCliqueDir predOrSucc, BasicBlock* blk) = 0;
};
// This class is used for setting the bbStkTempsIn and bbStkTempsOut on the blocks within a spill clique
class SetSpillTempsBase : public SpillCliqueWalker
{
unsigned m_baseTmp;
public:
SetSpillTempsBase(unsigned baseTmp) : m_baseTmp(baseTmp)
{
}
virtual void Visit(SpillCliqueDir predOrSucc, BasicBlock* blk);
};
// This class is used for implementing impReimportSpillClique part on each block within the spill clique
class ReimportSpillClique : public SpillCliqueWalker
{
Compiler* m_pComp;
public:
ReimportSpillClique(Compiler* pComp) : m_pComp(pComp)
{
}
virtual void Visit(SpillCliqueDir predOrSucc, BasicBlock* blk);
};
// This is the heart of the algorithm for walking spill cliques. It invokes callback->Visit for each
// predecessor or successor within the spill clique
void impWalkSpillCliqueFromPred(BasicBlock* pred, SpillCliqueWalker* callback);
// For a BasicBlock that has already been imported, the EntryState has an array of GenTrees for the
// incoming locals. This walks that list an resets the types of the GenTrees to match the types of
// the VarDscs. They get out of sync when we have int/native int issues (see impReimportSpillClique).
void impRetypeEntryStateTemps(BasicBlock* blk);
BYTE impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk);
void impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val);
void impPushVar(GenTree* op, typeInfo tiRetVal);
GenTreeLclVar* impCreateLocalNode(unsigned lclNum DEBUGARG(IL_OFFSET offset));
void impLoadVar(unsigned lclNum, IL_OFFSET offset, const typeInfo& tiRetVal);
void impLoadVar(unsigned lclNum, IL_OFFSET offset)
{
impLoadVar(lclNum, offset, lvaGetDesc(lclNum)->lvVerTypeInfo);
}
void impLoadArg(unsigned ilArgNum, IL_OFFSET offset);
void impLoadLoc(unsigned ilLclNum, IL_OFFSET offset);
bool impReturnInstruction(int prefixFlags, OPCODE& opcode);
#ifdef TARGET_ARM
void impMarkLclDstNotPromotable(unsigned tmpNum, GenTree* op, CORINFO_CLASS_HANDLE hClass);
#endif
// A free list of linked list nodes used to represent to-do stacks of basic blocks.
struct BlockListNode
{
BasicBlock* m_blk;
BlockListNode* m_next;
BlockListNode(BasicBlock* blk, BlockListNode* next = nullptr) : m_blk(blk), m_next(next)
{
}
void* operator new(size_t sz, Compiler* comp);
};
BlockListNode* impBlockListNodeFreeList;
void FreeBlockListNode(BlockListNode* node);
bool impIsValueType(typeInfo* pTypeInfo);
var_types mangleVarArgsType(var_types type);
regNumber getCallArgIntRegister(regNumber floatReg);
regNumber getCallArgFloatRegister(regNumber intReg);
#if defined(DEBUG)
static unsigned jitTotalMethodCompiled;
#endif
#ifdef DEBUG
static LONG jitNestingLevel;
#endif // DEBUG
static bool impIsAddressInLocal(const GenTree* tree, GenTree** lclVarTreeOut = nullptr);
void impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult);
// STATIC inlining decision based on the IL code.
void impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle,
CORINFO_METHOD_INFO* methInfo,
bool forceInline,
InlineResult* inlineResult);
void impCheckCanInline(GenTreeCall* call,
CORINFO_METHOD_HANDLE fncHandle,
unsigned methAttr,
CORINFO_CONTEXT_HANDLE exactContextHnd,
InlineCandidateInfo** ppInlineCandidateInfo,
InlineResult* inlineResult);
void impInlineRecordArgInfo(InlineInfo* pInlineInfo,
GenTree* curArgVal,
unsigned argNum,
InlineResult* inlineResult);
void impInlineInitVars(InlineInfo* pInlineInfo);
unsigned impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason));
GenTree* impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclTypeInfo);
bool impInlineIsThis(GenTree* tree, InlArgInfo* inlArgInfo);
bool impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTree* additionalTree,
GenTreeCall::Use* additionalCallArgs,
GenTree* dereferencedAddress,
InlArgInfo* inlArgInfo);
void impMarkInlineCandidate(GenTree* call,
CORINFO_CONTEXT_HANDLE exactContextHnd,
bool exactContextNeedsRuntimeLookup,
CORINFO_CALL_INFO* callInfo);
void impMarkInlineCandidateHelper(GenTreeCall* call,
CORINFO_CONTEXT_HANDLE exactContextHnd,
bool exactContextNeedsRuntimeLookup,
CORINFO_CALL_INFO* callInfo);
bool impTailCallRetTypeCompatible(bool allowWidening,
var_types callerRetType,
CORINFO_CLASS_HANDLE callerRetTypeClass,
CorInfoCallConvExtension callerCallConv,
var_types calleeRetType,
CORINFO_CLASS_HANDLE calleeRetTypeClass,
CorInfoCallConvExtension calleeCallConv);
bool impIsTailCallILPattern(
bool tailPrefixed, OPCODE curOpcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, bool isRecursive);
bool impIsImplicitTailCallCandidate(
OPCODE curOpcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive);
bool impIsClassExact(CORINFO_CLASS_HANDLE classHnd);
bool impCanSkipCovariantStoreCheck(GenTree* value, GenTree* array);
methodPointerInfo* impAllocateMethodPointerInfo(const CORINFO_RESOLVED_TOKEN& token, mdToken tokenConstrained);
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX FlowGraph XX
XX XX
XX Info about the basic-blocks, their contents and the flow analysis XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
public:
BasicBlock* fgFirstBB; // Beginning of the basic block list
BasicBlock* fgLastBB; // End of the basic block list
BasicBlock* fgFirstColdBlock; // First block to be placed in the cold section
BasicBlock* fgEntryBB; // For OSR, the original method's entry point
BasicBlock* fgOSREntryBB; // For OSR, the logical entry point (~ patchpoint)
#if defined(FEATURE_EH_FUNCLETS)
BasicBlock* fgFirstFuncletBB; // First block of outlined funclets (to allow block insertion before the funclets)
#endif
BasicBlock* fgFirstBBScratch; // Block inserted for initialization stuff. Is nullptr if no such block has been
// created.
BasicBlockList* fgReturnBlocks; // list of BBJ_RETURN blocks
unsigned fgEdgeCount; // # of control flow edges between the BBs
unsigned fgBBcount; // # of BBs in the method
#ifdef DEBUG
unsigned fgBBcountAtCodegen; // # of BBs in the method at the start of codegen
#endif
unsigned fgBBNumMax; // The max bbNum that has been assigned to basic blocks
unsigned fgDomBBcount; // # of BBs for which we have dominator and reachability information
BasicBlock** fgBBInvPostOrder; // The flow graph stored in an array sorted in topological order, needed to compute
// dominance. Indexed by block number. Size: fgBBNumMax + 1.
// After the dominance tree is computed, we cache a DFS preorder number and DFS postorder number to compute
// dominance queries in O(1). fgDomTreePreOrder and fgDomTreePostOrder are arrays giving the block's preorder and
// postorder number, respectively. The arrays are indexed by basic block number. (Note that blocks are numbered
// starting from one. Thus, we always waste element zero. This makes debugging easier and makes the code less likely
// to suffer from bugs stemming from forgetting to add or subtract one from the block number to form an array
// index). The arrays are of size fgBBNumMax + 1.
unsigned* fgDomTreePreOrder;
unsigned* fgDomTreePostOrder;
// Dominator tree used by SSA construction and copy propagation (the two are expected to use the same tree
// in order to avoid the need for SSA reconstruction and an "out of SSA" phase).
DomTreeNode* fgSsaDomTree;
bool fgBBVarSetsInited;
// Allocate array like T* a = new T[fgBBNumMax + 1];
// Using helper so we don't keep forgetting +1.
template <typename T>
T* fgAllocateTypeForEachBlk(CompMemKind cmk = CMK_Unknown)
{
return getAllocator(cmk).allocate<T>(fgBBNumMax + 1);
}
// BlockSets are relative to a specific set of BasicBlock numbers. If that changes
// (if the blocks are renumbered), this changes. BlockSets from different epochs
// cannot be meaningfully combined. Note that new blocks can be created with higher
// block numbers without changing the basic block epoch. These blocks *cannot*
// participate in a block set until the blocks are all renumbered, causing the epoch
// to change. This is useful if continuing to use previous block sets is valuable.
// If the epoch is zero, then it is uninitialized, and block sets can't be used.
unsigned fgCurBBEpoch;
unsigned GetCurBasicBlockEpoch()
{
return fgCurBBEpoch;
}
// The number of basic blocks in the current epoch. When the blocks are renumbered,
// this is fgBBcount. As blocks are added, fgBBcount increases, fgCurBBEpochSize remains
// the same, until a new BasicBlock epoch is created, such as when the blocks are all renumbered.
unsigned fgCurBBEpochSize;
// The number of "size_t" elements required to hold a bitset large enough for fgCurBBEpochSize
// bits. This is precomputed to avoid doing math every time BasicBlockBitSetTraits::GetArrSize() is called.
unsigned fgBBSetCountInSizeTUnits;
void NewBasicBlockEpoch()
{
INDEBUG(unsigned oldEpochArrSize = fgBBSetCountInSizeTUnits);
// We have a new epoch. Compute and cache the size needed for new BlockSets.
fgCurBBEpoch++;
fgCurBBEpochSize = fgBBNumMax + 1;
fgBBSetCountInSizeTUnits =
roundUp(fgCurBBEpochSize, (unsigned)(sizeof(size_t) * 8)) / unsigned(sizeof(size_t) * 8);
#ifdef DEBUG
// All BlockSet objects are now invalid!
fgReachabilitySetsValid = false; // the bbReach sets are now invalid!
fgEnterBlksSetValid = false; // the fgEnterBlks set is now invalid!
if (verbose)
{
unsigned epochArrSize = BasicBlockBitSetTraits::GetArrSize(this, sizeof(size_t));
printf("\nNew BlockSet epoch %d, # of blocks (including unused BB00): %u, bitset array size: %u (%s)",
fgCurBBEpoch, fgCurBBEpochSize, epochArrSize, (epochArrSize <= 1) ? "short" : "long");
if ((fgCurBBEpoch != 1) && ((oldEpochArrSize <= 1) != (epochArrSize <= 1)))
{
// If we're not just establishing the first epoch, and the epoch array size has changed such that we're
// going to change our bitset representation from short (just a size_t bitset) to long (a pointer to an
// array of size_t bitsets), then print that out.
printf("; NOTE: BlockSet size was previously %s!", (oldEpochArrSize <= 1) ? "short" : "long");
}
printf("\n");
}
#endif // DEBUG
}
void EnsureBasicBlockEpoch()
{
if (fgCurBBEpochSize != fgBBNumMax + 1)
{
NewBasicBlockEpoch();
}
}
BasicBlock* fgNewBasicBlock(BBjumpKinds jumpKind);
void fgEnsureFirstBBisScratch();
bool fgFirstBBisScratch();
bool fgBBisScratch(BasicBlock* block);
void fgExtendEHRegionBefore(BasicBlock* block);
void fgExtendEHRegionAfter(BasicBlock* block);
BasicBlock* fgNewBBbefore(BBjumpKinds jumpKind, BasicBlock* block, bool extendRegion);
BasicBlock* fgNewBBafter(BBjumpKinds jumpKind, BasicBlock* block, bool extendRegion);
BasicBlock* fgNewBBinRegion(BBjumpKinds jumpKind,
unsigned tryIndex,
unsigned hndIndex,
BasicBlock* nearBlk,
bool putInFilter = false,
bool runRarely = false,
bool insertAtEnd = false);
BasicBlock* fgNewBBinRegion(BBjumpKinds jumpKind,
BasicBlock* srcBlk,
bool runRarely = false,
bool insertAtEnd = false);
BasicBlock* fgNewBBinRegion(BBjumpKinds jumpKind);
BasicBlock* fgNewBBinRegionWorker(BBjumpKinds jumpKind,
BasicBlock* afterBlk,
unsigned xcptnIndex,
bool putInTryRegion);
void fgInsertBBbefore(BasicBlock* insertBeforeBlk, BasicBlock* newBlk);
void fgInsertBBafter(BasicBlock* insertAfterBlk, BasicBlock* newBlk);
void fgUnlinkBlock(BasicBlock* block);
#ifdef FEATURE_JIT_METHOD_PERF
unsigned fgMeasureIR();
#endif // FEATURE_JIT_METHOD_PERF
bool fgModified; // True if the flow graph has been modified recently
bool fgComputePredsDone; // Have we computed the bbPreds list
bool fgCheapPredsValid; // Is the bbCheapPreds list valid?
bool fgDomsComputed; // Have we computed the dominator sets?
bool fgReturnBlocksComputed; // Have we computed the return blocks list?
bool fgOptimizedFinally; // Did we optimize any try-finallys?
bool fgHasSwitch; // any BBJ_SWITCH jumps?
BlockSet fgEnterBlks; // Set of blocks which have a special transfer of control; the "entry" blocks plus EH handler
// begin blocks.
#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM)
BlockSet fgAlwaysBlks; // Set of blocks which are BBJ_ALWAYS part of BBJ_CALLFINALLY/BBJ_ALWAYS pair that should
// never be removed due to a requirement to use the BBJ_ALWAYS for generating code and
// not have "retless" blocks.
#endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM)
#ifdef DEBUG
bool fgReachabilitySetsValid; // Are the bbReach sets valid?
bool fgEnterBlksSetValid; // Is the fgEnterBlks set valid?
#endif // DEBUG
bool fgRemoveRestOfBlock; // true if we know that we will throw
bool fgStmtRemoved; // true if we remove statements -> need new DFA
// There are two modes for ordering of the trees.
// - In FGOrderTree, the dominant ordering is the tree order, and the nodes contained in
// each tree and sub-tree are contiguous, and can be traversed (in gtNext/gtPrev order)
// by traversing the tree according to the order of the operands.
// - In FGOrderLinear, the dominant ordering is the linear order.
enum FlowGraphOrder
{
FGOrderTree,
FGOrderLinear
};
FlowGraphOrder fgOrder;
// The following are boolean flags that keep track of the state of internal data structures
bool fgStmtListThreaded; // true if the node list is now threaded
bool fgCanRelocateEHRegions; // true if we are allowed to relocate the EH regions
bool fgEdgeWeightsComputed; // true after we have called fgComputeEdgeWeights
bool fgHaveValidEdgeWeights; // true if we were successful in computing all of the edge weights
bool fgSlopUsedInEdgeWeights; // true if their was some slop used when computing the edge weights
bool fgRangeUsedInEdgeWeights; // true if some of the edgeWeight are expressed in Min..Max form
weight_t fgCalledCount; // count of the number of times this method was called
// This is derived from the profile data
// or is BB_UNITY_WEIGHT when we don't have profile data
#if defined(FEATURE_EH_FUNCLETS)
bool fgFuncletsCreated; // true if the funclet creation phase has been run
#endif // FEATURE_EH_FUNCLETS
bool fgGlobalMorph; // indicates if we are during the global morphing phase
// since fgMorphTree can be called from several places
bool impBoxTempInUse; // the temp below is valid and available
unsigned impBoxTemp; // a temporary that is used for boxing
#ifdef DEBUG
bool jitFallbackCompile; // Are we doing a fallback compile? That is, have we executed a NO_WAY assert,
// and we are trying to compile again in a "safer", minopts mode?
#endif
#if defined(DEBUG)
unsigned impInlinedCodeSize;
bool fgPrintInlinedMethods;
#endif
jitstd::vector<flowList*>* fgPredListSortVector;
//-------------------------------------------------------------------------
void fgInit();
PhaseStatus fgImport();
PhaseStatus fgTransformIndirectCalls();
PhaseStatus fgTransformPatchpoints();
PhaseStatus fgInline();
PhaseStatus fgRemoveEmptyTry();
PhaseStatus fgRemoveEmptyFinally();
PhaseStatus fgMergeFinallyChains();
PhaseStatus fgCloneFinally();
void fgCleanupContinuation(BasicBlock* continuation);
#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM)
PhaseStatus fgUpdateFinallyTargetFlags();
void fgClearAllFinallyTargetBits();
void fgAddFinallyTargetFlags();
#endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM)
PhaseStatus fgTailMergeThrows();
void fgTailMergeThrowsFallThroughHelper(BasicBlock* predBlock,
BasicBlock* nonCanonicalBlock,
BasicBlock* canonicalBlock,
flowList* predEdge);
void fgTailMergeThrowsJumpToHelper(BasicBlock* predBlock,
BasicBlock* nonCanonicalBlock,
BasicBlock* canonicalBlock,
flowList* predEdge);
GenTree* fgCheckCallArgUpdate(GenTree* parent, GenTree* child, var_types origType);
#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM)
// Sometimes we need to defer updating the BBF_FINALLY_TARGET bit. fgNeedToAddFinallyTargetBits signals
// when this is necessary.
bool fgNeedToAddFinallyTargetBits;
#endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM)
bool fgRetargetBranchesToCanonicalCallFinally(BasicBlock* block,
BasicBlock* handler,
BlockToBlockMap& continuationMap);
GenTree* fgGetCritSectOfStaticMethod();
#if defined(FEATURE_EH_FUNCLETS)
void fgAddSyncMethodEnterExit();
GenTree* fgCreateMonitorTree(unsigned lvaMonitorBool, unsigned lvaThisVar, BasicBlock* block, bool enter);
void fgConvertSyncReturnToLeave(BasicBlock* block);
#endif // FEATURE_EH_FUNCLETS
void fgAddReversePInvokeEnterExit();
bool fgMoreThanOneReturnBlock();
// The number of separate return points in the method.
unsigned fgReturnCount;
void fgAddInternal();
enum class FoldResult
{
FOLD_DID_NOTHING,
FOLD_CHANGED_CONTROL_FLOW,
FOLD_REMOVED_LAST_STMT,
FOLD_ALTERED_LAST_STMT,
};
FoldResult fgFoldConditional(BasicBlock* block);
void fgMorphStmts(BasicBlock* block);
void fgMorphBlocks();
void fgMergeBlockReturn(BasicBlock* block);
bool fgMorphBlockStmt(BasicBlock* block, Statement* stmt DEBUGARG(const char* msg));
void fgSetOptions();
#ifdef DEBUG
static fgWalkPreFn fgAssertNoQmark;
void fgPreExpandQmarkChecks(GenTree* expr);
void fgPostExpandQmarkChecks();
static void fgCheckQmarkAllowedForm(GenTree* tree);
#endif
IL_OFFSET fgFindBlockILOffset(BasicBlock* block);
void fgFixEntryFlowForOSR();
BasicBlock* fgSplitBlockAtBeginning(BasicBlock* curr);
BasicBlock* fgSplitBlockAtEnd(BasicBlock* curr);
BasicBlock* fgSplitBlockAfterStatement(BasicBlock* curr, Statement* stmt);
BasicBlock* fgSplitBlockAfterNode(BasicBlock* curr, GenTree* node); // for LIR
BasicBlock* fgSplitEdge(BasicBlock* curr, BasicBlock* succ);
Statement* fgNewStmtFromTree(GenTree* tree, BasicBlock* block, const DebugInfo& di);
Statement* fgNewStmtFromTree(GenTree* tree);
Statement* fgNewStmtFromTree(GenTree* tree, BasicBlock* block);
Statement* fgNewStmtFromTree(GenTree* tree, const DebugInfo& di);
GenTree* fgGetTopLevelQmark(GenTree* expr, GenTree** ppDst = nullptr);
void fgExpandQmarkForCastInstOf(BasicBlock* block, Statement* stmt);
void fgExpandQmarkStmt(BasicBlock* block, Statement* stmt);
void fgExpandQmarkNodes();
// Do "simple lowering." This functionality is (conceptually) part of "general"
// lowering that is distributed between fgMorph and the lowering phase of LSRA.
void fgSimpleLowering();
GenTree* fgInitThisClass();
GenTreeCall* fgGetStaticsCCtorHelper(CORINFO_CLASS_HANDLE cls, CorInfoHelpFunc helper);
GenTreeCall* fgGetSharedCCtor(CORINFO_CLASS_HANDLE cls);
bool backendRequiresLocalVarLifetimes()
{
return !opts.MinOpts() || m_pLinearScan->willEnregisterLocalVars();
}
void fgLocalVarLiveness();
void fgLocalVarLivenessInit();
void fgPerNodeLocalVarLiveness(GenTree* node);
void fgPerBlockLocalVarLiveness();
VARSET_VALRET_TP fgGetHandlerLiveVars(BasicBlock* block);
void fgLiveVarAnalysis(bool updateInternalOnly = false);
void fgComputeLifeCall(VARSET_TP& life, GenTreeCall* call);
void fgComputeLifeTrackedLocalUse(VARSET_TP& life, LclVarDsc& varDsc, GenTreeLclVarCommon* node);
bool fgComputeLifeTrackedLocalDef(VARSET_TP& life,
VARSET_VALARG_TP keepAliveVars,
LclVarDsc& varDsc,
GenTreeLclVarCommon* node);
bool fgComputeLifeUntrackedLocal(VARSET_TP& life,
VARSET_VALARG_TP keepAliveVars,
LclVarDsc& varDsc,
GenTreeLclVarCommon* lclVarNode);
bool fgComputeLifeLocal(VARSET_TP& life, VARSET_VALARG_TP keepAliveVars, GenTree* lclVarNode);
void fgComputeLife(VARSET_TP& life,
GenTree* startNode,
GenTree* endNode,
VARSET_VALARG_TP volatileVars,
bool* pStmtInfoDirty DEBUGARG(bool* treeModf));
void fgComputeLifeLIR(VARSET_TP& life, BasicBlock* block, VARSET_VALARG_TP volatileVars);
bool fgTryRemoveNonLocal(GenTree* node, LIR::Range* blockRange);
void fgRemoveDeadStoreLIR(GenTree* store, BasicBlock* block);
bool fgRemoveDeadStore(GenTree** pTree,
LclVarDsc* varDsc,
VARSET_VALARG_TP life,
bool* doAgain,
bool* pStmtInfoDirty,
bool* pStoreRemoved DEBUGARG(bool* treeModf));
void fgInterBlockLocalVarLiveness();
// Blocks: convenience methods for enabling range-based `for` iteration over the function's blocks, e.g.:
// 1. for (BasicBlock* const block : compiler->Blocks()) ...
// 2. for (BasicBlock* const block : compiler->Blocks(startBlock)) ...
// 3. for (BasicBlock* const block : compiler->Blocks(startBlock, endBlock)) ...
// In case (1), the block list can be empty. In case (2), `startBlock` can be nullptr. In case (3),
// both `startBlock` and `endBlock` must be non-null.
//
BasicBlockSimpleList Blocks() const
{
return BasicBlockSimpleList(fgFirstBB);
}
BasicBlockSimpleList Blocks(BasicBlock* startBlock) const
{
return BasicBlockSimpleList(startBlock);
}
BasicBlockRangeList Blocks(BasicBlock* startBlock, BasicBlock* endBlock) const
{
return BasicBlockRangeList(startBlock, endBlock);
}
// The presence of a partial definition presents some difficulties for SSA: this is both a use of some SSA name
// of "x", and a def of a new SSA name for "x". The tree only has one local variable for "x", so it has to choose
// whether to treat that as the use or def. It chooses the "use", and thus the old SSA name. This map allows us
// to record/recover the "def" SSA number, given the lcl var node for "x" in such a tree.
typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, unsigned> NodeToUnsignedMap;
NodeToUnsignedMap* m_opAsgnVarDefSsaNums;
NodeToUnsignedMap* GetOpAsgnVarDefSsaNums()
{
if (m_opAsgnVarDefSsaNums == nullptr)
{
m_opAsgnVarDefSsaNums = new (getAllocator()) NodeToUnsignedMap(getAllocator());
}
return m_opAsgnVarDefSsaNums;
}
// This map tracks nodes whose value numbers explicitly or implicitly depend on memory states.
// The map provides the entry block of the most closely enclosing loop that
// defines the memory region accessed when defining the nodes's VN.
//
// This information should be consulted when considering hoisting node out of a loop, as the VN
// for the node will only be valid within the indicated loop.
//
// It is not fine-grained enough to track memory dependence within loops, so cannot be used
// for more general code motion.
//
// If a node does not have an entry in the map we currently assume the VN is not memory dependent
// and so memory does not constrain hoisting.
//
typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, BasicBlock*> NodeToLoopMemoryBlockMap;
NodeToLoopMemoryBlockMap* m_nodeToLoopMemoryBlockMap;
NodeToLoopMemoryBlockMap* GetNodeToLoopMemoryBlockMap()
{
if (m_nodeToLoopMemoryBlockMap == nullptr)
{
m_nodeToLoopMemoryBlockMap = new (getAllocator()) NodeToLoopMemoryBlockMap(getAllocator());
}
return m_nodeToLoopMemoryBlockMap;
}
void optRecordLoopMemoryDependence(GenTree* tree, BasicBlock* block, ValueNum memoryVN);
void optCopyLoopMemoryDependence(GenTree* fromTree, GenTree* toTree);
// Requires value numbering phase to have completed. Returns the value number ("gtVN") of the
// "tree," EXCEPT in the case of GTF_VAR_USEASG, because the tree node's gtVN member is the
// "use" VN. Performs a lookup into the map of (use asg tree -> def VN.) to return the "def's"
// VN.
inline ValueNum GetUseAsgDefVNOrTreeVN(GenTree* tree);
// Requires that "lcl" has the GTF_VAR_DEF flag set. Returns the SSA number of "lcl".
// Except: assumes that lcl is a def, and if it is
// a partial def (GTF_VAR_USEASG), looks up and returns the SSA number for the "def",
// rather than the "use" SSA number recorded in the tree "lcl".
inline unsigned GetSsaNumForLocalVarDef(GenTree* lcl);
inline bool PreciseRefCountsRequired();
// Performs SSA conversion.
void fgSsaBuild();
// Reset any data structures to the state expected by "fgSsaBuild", so it can be run again.
void fgResetForSsa();
unsigned fgSsaPassesCompleted; // Number of times fgSsaBuild has been run.
// Returns "true" if this is a special variable that is never zero initialized in the prolog.
inline bool fgVarIsNeverZeroInitializedInProlog(unsigned varNum);
// Returns "true" if the variable needs explicit zero initialization.
inline bool fgVarNeedsExplicitZeroInit(unsigned varNum, bool bbInALoop, bool bbIsReturn);
// The value numbers for this compilation.
ValueNumStore* vnStore;
public:
ValueNumStore* GetValueNumStore()
{
return vnStore;
}
// Do value numbering (assign a value number to each
// tree node).
void fgValueNumber();
// Computes new GcHeap VN via the assignment H[elemTypeEq][arrVN][inx][fldSeq] = rhsVN.
// Assumes that "elemTypeEq" is the (equivalence class rep) of the array element type.
// The 'indType' is the indirection type of the lhs of the assignment and will typically
// match the element type of the array or fldSeq. When this type doesn't match
// or if the fldSeq is 'NotAField' we invalidate the array contents H[elemTypeEq][arrVN]
//
ValueNum fgValueNumberArrIndexAssign(CORINFO_CLASS_HANDLE elemTypeEq,
ValueNum arrVN,
ValueNum inxVN,
FieldSeqNode* fldSeq,
ValueNum rhsVN,
var_types indType);
// Requires that "tree" is a GT_IND marked as an array index, and that its address argument
// has been parsed to yield the other input arguments. If evaluation of the address
// can raise exceptions, those should be captured in the exception set "addrXvnp".
// Assumes that "elemTypeEq" is the (equivalence class rep) of the array element type.
// Marks "tree" with the VN for H[elemTypeEq][arrVN][inx][fldSeq] (for the liberal VN; a new unique
// VN for the conservative VN.) Also marks the tree's argument as the address of an array element.
// The type tree->TypeGet() will typically match the element type of the array or fldSeq.
// When this type doesn't match or if the fldSeq is 'NotAField' we return a new unique VN
//
ValueNum fgValueNumberArrIndexVal(GenTree* tree,
CORINFO_CLASS_HANDLE elemTypeEq,
ValueNum arrVN,
ValueNum inxVN,
ValueNumPair addrXvnp,
FieldSeqNode* fldSeq);
// Requires "funcApp" to be a VNF_PtrToArrElem, and "addrXvnp" to represent the exception set thrown
// by evaluating the array index expression "tree". Returns the value number resulting from
// dereferencing the array in the current GcHeap state. If "tree" is non-null, it must be the
// "GT_IND" that does the dereference, and it is given the returned value number.
ValueNum fgValueNumberArrIndexVal(GenTree* tree, VNFuncApp* funcApp, ValueNumPair addrXvnp);
// Compute the value number for a byref-exposed load of the given type via the given pointerVN.
ValueNum fgValueNumberByrefExposedLoad(var_types type, ValueNum pointerVN);
unsigned fgVNPassesCompleted; // Number of times fgValueNumber has been run.
// Utility functions for fgValueNumber.
// Perform value-numbering for the trees in "blk".
void fgValueNumberBlock(BasicBlock* blk);
// Requires that "entryBlock" is the entry block of loop "loopNum", and that "loopNum" is the
// innermost loop of which "entryBlock" is the entry. Returns the value number that should be
// assumed for the memoryKind at the start "entryBlk".
ValueNum fgMemoryVNForLoopSideEffects(MemoryKind memoryKind, BasicBlock* entryBlock, unsigned loopNum);
// Called when an operation (performed by "tree", described by "msg") may cause the GcHeap to be mutated.
// As GcHeap is a subset of ByrefExposed, this will also annotate the ByrefExposed mutation.
void fgMutateGcHeap(GenTree* tree DEBUGARG(const char* msg));
// Called when an operation (performed by "tree", described by "msg") may cause an address-exposed local to be
// mutated.
void fgMutateAddressExposedLocal(GenTree* tree DEBUGARG(const char* msg));
// For a GC heap store at curTree, record the new curMemoryVN's and update curTree's MemorySsaMap.
// As GcHeap is a subset of ByrefExposed, this will also record the ByrefExposed store.
void recordGcHeapStore(GenTree* curTree, ValueNum gcHeapVN DEBUGARG(const char* msg));
// For a store to an address-exposed local at curTree, record the new curMemoryVN and update curTree's MemorySsaMap.
void recordAddressExposedLocalStore(GenTree* curTree, ValueNum memoryVN DEBUGARG(const char* msg));
void fgSetCurrentMemoryVN(MemoryKind memoryKind, ValueNum newMemoryVN);
// Tree caused an update in the current memory VN. If "tree" has an associated heap SSA #, record that
// value in that SSA #.
void fgValueNumberRecordMemorySsa(MemoryKind memoryKind, GenTree* tree);
// The input 'tree' is a leaf node that is a constant
// Assign the proper value number to the tree
void fgValueNumberTreeConst(GenTree* tree);
// If the VN store has been initialized, reassign the
// proper value number to the constant tree.
void fgUpdateConstTreeValueNumber(GenTree* tree);
// Assumes that all inputs to "tree" have had value numbers assigned; assigns a VN to tree.
// (With some exceptions: the VN of the lhs of an assignment is assigned as part of the
// assignment.)
void fgValueNumberTree(GenTree* tree);
void fgValueNumberAssignment(GenTreeOp* tree);
// Does value-numbering for a block assignment.
void fgValueNumberBlockAssignment(GenTree* tree);
bool fgValueNumberBlockAssignmentTypeCheck(LclVarDsc* dstVarDsc, FieldSeqNode* dstFldSeq, GenTree* src);
// Does value-numbering for a cast tree.
void fgValueNumberCastTree(GenTree* tree);
// Does value-numbering for an intrinsic tree.
void fgValueNumberIntrinsic(GenTree* tree);
void fgValueNumberArrIndexAddr(GenTreeArrAddr* arrAddr);
#ifdef FEATURE_SIMD
// Does value-numbering for a GT_SIMD tree
void fgValueNumberSimd(GenTreeSIMD* tree);
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
// Does value-numbering for a GT_HWINTRINSIC tree
void fgValueNumberHWIntrinsic(GenTreeHWIntrinsic* tree);
#endif // FEATURE_HW_INTRINSICS
// Does value-numbering for a call. We interpret some helper calls.
void fgValueNumberCall(GenTreeCall* call);
// Does value-numbering for a helper representing a cast operation.
void fgValueNumberCastHelper(GenTreeCall* call);
// Does value-numbering for a helper "call" that has a VN function symbol "vnf".
void fgValueNumberHelperCallFunc(GenTreeCall* call, VNFunc vnf, ValueNumPair vnpExc);
// Requires "helpCall" to be a helper call. Assigns it a value number;
// we understand the semantics of some of the calls. Returns "true" if
// the call may modify the heap (we assume arbitrary memory side effects if so).
bool fgValueNumberHelperCall(GenTreeCall* helpCall);
// Requires that "helpFunc" is one of the pure Jit Helper methods.
// Returns the corresponding VNFunc to use for value numbering
VNFunc fgValueNumberJitHelperMethodVNFunc(CorInfoHelpFunc helpFunc);
// Adds the exception set for the current tree node which has a memory indirection operation
void fgValueNumberAddExceptionSetForIndirection(GenTree* tree, GenTree* baseAddr);
// Adds the exception sets for the current tree node which is performing a division or modulus operation
void fgValueNumberAddExceptionSetForDivision(GenTree* tree);
// Adds the exception set for the current tree node which is performing a overflow checking operation
void fgValueNumberAddExceptionSetForOverflow(GenTree* tree);
// Adds the exception set for the current tree node which is performing a bounds check operation
void fgValueNumberAddExceptionSetForBoundsCheck(GenTree* tree);
// Adds the exception set for the current tree node which is performing a ckfinite operation
void fgValueNumberAddExceptionSetForCkFinite(GenTree* tree);
// Adds the exception sets for the current tree node
void fgValueNumberAddExceptionSet(GenTree* tree);
#ifdef DEBUG
void fgDebugCheckExceptionSets();
void fgDebugCheckValueNumberedTree(GenTree* tree);
#endif
// These are the current value number for the memory implicit variables while
// doing value numbering. These are the value numbers under the "liberal" interpretation
// of memory values; the "conservative" interpretation needs no VN, since every access of
// memory yields an unknown value.
ValueNum fgCurMemoryVN[MemoryKindCount];
// Return a "pseudo"-class handle for an array element type. If "elemType" is TYP_STRUCT,
// requires "elemStructType" to be non-null (and to have a low-order zero). Otherwise, low order bit
// is 1, and the rest is an encoding of "elemTyp".
static CORINFO_CLASS_HANDLE EncodeElemType(var_types elemTyp, CORINFO_CLASS_HANDLE elemStructType)
{
if (elemStructType != nullptr)
{
assert(varTypeIsStruct(elemTyp) || elemTyp == TYP_REF || elemTyp == TYP_BYREF ||
varTypeIsIntegral(elemTyp));
assert((size_t(elemStructType) & 0x1) == 0x0); // Make sure the encoding below is valid.
return elemStructType;
}
else
{
assert(elemTyp != TYP_STRUCT);
elemTyp = varTypeToSigned(elemTyp);
return CORINFO_CLASS_HANDLE(size_t(elemTyp) << 1 | 0x1);
}
}
// If "clsHnd" is the result of an "EncodePrim" call, returns true and sets "*pPrimType" to the
// var_types it represents. Otherwise, returns TYP_STRUCT (on the assumption that "clsHnd" is
// the struct type of the element).
static var_types DecodeElemType(CORINFO_CLASS_HANDLE clsHnd)
{
size_t clsHndVal = size_t(clsHnd);
if (clsHndVal & 0x1)
{
return var_types(clsHndVal >> 1);
}
else
{
return TYP_STRUCT;
}
}
// Convert a BYTE which represents the VM's CorInfoGCtype to the JIT's var_types
var_types getJitGCType(BYTE gcType);
// Returns true if the provided type should be treated as a primitive type
// for the unmanaged calling conventions.
bool isNativePrimitiveStructType(CORINFO_CLASS_HANDLE clsHnd);
enum structPassingKind
{
SPK_Unknown, // Invalid value, never returned
SPK_PrimitiveType, // The struct is passed/returned using a primitive type.
SPK_EnclosingType, // Like SPK_Primitive type, but used for return types that
// require a primitive type temp that is larger than the struct size.
// Currently used for structs of size 3, 5, 6, or 7 bytes.
SPK_ByValue, // The struct is passed/returned by value (using the ABI rules)
// for ARM64 and UNIX_X64 in multiple registers. (when all of the
// parameters registers are used, then the stack will be used)
// for X86 passed on the stack, for ARM32 passed in registers
// or the stack or split between registers and the stack.
SPK_ByValueAsHfa, // The struct is passed/returned as an HFA in multiple registers.
SPK_ByReference
}; // The struct is passed/returned by reference to a copy/buffer.
// Get the "primitive" type that is is used when we are given a struct of size 'structSize'.
// For pointer sized structs the 'clsHnd' is used to determine if the struct contains GC ref.
// A "primitive" type is one of the scalar types: byte, short, int, long, ref, float, double
// If we can't or shouldn't use a "primitive" type then TYP_UNKNOWN is returned.
//
// isVarArg is passed for use on Windows Arm64 to change the decision returned regarding
// hfa types.
//
var_types getPrimitiveTypeForStruct(unsigned structSize, CORINFO_CLASS_HANDLE clsHnd, bool isVarArg);
// Get the type that is used to pass values of the given struct type.
// isVarArg is passed for use on Windows Arm64 to change the decision returned regarding
// hfa types.
//
var_types getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
structPassingKind* wbPassStruct,
bool isVarArg,
unsigned structSize);
// Get the type that is used to return values of the given struct type.
// If the size is unknown, pass 0 and it will be determined from 'clsHnd'.
var_types getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
CorInfoCallConvExtension callConv,
structPassingKind* wbPassStruct = nullptr,
unsigned structSize = 0);
#ifdef DEBUG
// Print a representation of "vnp" or "vn" on standard output.
// If "level" is non-zero, we also print out a partial expansion of the value.
void vnpPrint(ValueNumPair vnp, unsigned level);
void vnPrint(ValueNum vn, unsigned level);
#endif
bool fgDominate(BasicBlock* b1, BasicBlock* b2); // Return true if b1 dominates b2
// Dominator computation member functions
// Not exposed outside Compiler
protected:
bool fgReachable(BasicBlock* b1, BasicBlock* b2); // Returns true if block b1 can reach block b2
// Compute immediate dominators, the dominator tree and and its pre/post-order travsersal numbers.
void fgComputeDoms();
void fgCompDominatedByExceptionalEntryBlocks();
BlockSet_ValRet_T fgGetDominatorSet(BasicBlock* block); // Returns a set of blocks that dominate the given block.
// Note: this is relatively slow compared to calling fgDominate(),
// especially if dealing with a single block versus block check.
void fgComputeReachabilitySets(); // Compute bbReach sets. (Also sets BBF_GC_SAFE_POINT flag on blocks.)
void fgComputeReturnBlocks(); // Initialize fgReturnBlocks to a list of BBJ_RETURN blocks.
void fgComputeEnterBlocksSet(); // Compute the set of entry blocks, 'fgEnterBlks'.
// Remove blocks determined to be unreachable by the 'canRemoveBlock'.
template <typename CanRemoveBlockBody>
bool fgRemoveUnreachableBlocks(CanRemoveBlockBody canRemoveBlock);
void fgComputeReachability(); // Perform flow graph node reachability analysis.
void fgRemoveDeadBlocks(); // Identify and remove dead blocks.
BasicBlock* fgIntersectDom(BasicBlock* a, BasicBlock* b); // Intersect two immediate dominator sets.
void fgDfsInvPostOrder(); // In order to compute dominance using fgIntersectDom, the flow graph nodes must be
// processed in topological sort, this function takes care of that.
void fgDfsInvPostOrderHelper(BasicBlock* block, BlockSet& visited, unsigned* count);
BlockSet_ValRet_T fgDomFindStartNodes(); // Computes which basic blocks don't have incoming edges in the flow graph.
// Returns this as a set.
INDEBUG(void fgDispDomTree(DomTreeNode* domTree);) // Helper that prints out the Dominator Tree in debug builds.
DomTreeNode* fgBuildDomTree(); // Once we compute all the immediate dominator sets for each node in the flow graph
// (performed by fgComputeDoms), this procedure builds the dominance tree represented
// adjacency lists.
// In order to speed up the queries of the form 'Does A dominates B', we can perform a DFS preorder and postorder
// traversal of the dominance tree and the dominance query will become A dominates B iif preOrder(A) <= preOrder(B)
// && postOrder(A) >= postOrder(B) making the computation O(1).
void fgNumberDomTree(DomTreeNode* domTree);
// When the flow graph changes, we need to update the block numbers, predecessor lists, reachability sets,
// dominators, and possibly loops.
void fgUpdateChangedFlowGraph(const bool computePreds = true,
const bool computeDoms = true,
const bool computeReturnBlocks = false,
const bool computeLoops = false);
public:
// Compute the predecessors of the blocks in the control flow graph.
void fgComputePreds();
// Remove all predecessor information.
void fgRemovePreds();
// Compute the cheap flow graph predecessors lists. This is used in some early phases
// before the full predecessors lists are computed.
void fgComputeCheapPreds();
private:
void fgAddCheapPred(BasicBlock* block, BasicBlock* blockPred);
void fgRemoveCheapPred(BasicBlock* block, BasicBlock* blockPred);
public:
enum GCPollType
{
GCPOLL_NONE,
GCPOLL_CALL,
GCPOLL_INLINE
};
// Initialize the per-block variable sets (used for liveness analysis).
void fgInitBlockVarSets();
PhaseStatus fgInsertGCPolls();
BasicBlock* fgCreateGCPoll(GCPollType pollType, BasicBlock* block);
// Requires that "block" is a block that returns from
// a finally. Returns the number of successors (jump targets of
// of blocks in the covered "try" that did a "LEAVE".)
unsigned fgNSuccsOfFinallyRet(BasicBlock* block);
// Requires that "block" is a block that returns (in the sense of BBJ_EHFINALLYRET) from
// a finally. Returns its "i"th successor (jump targets of
// of blocks in the covered "try" that did a "LEAVE".)
// Requires that "i" < fgNSuccsOfFinallyRet(block).
BasicBlock* fgSuccOfFinallyRet(BasicBlock* block, unsigned i);
private:
// Factor out common portions of the impls of the methods above.
void fgSuccOfFinallyRetWork(BasicBlock* block, unsigned i, BasicBlock** bres, unsigned* nres);
public:
// For many purposes, it is desirable to be able to enumerate the *distinct* targets of a switch statement,
// skipping duplicate targets. (E.g., in flow analyses that are only interested in the set of possible targets.)
// SwitchUniqueSuccSet contains the non-duplicated switch targets.
// (Code that modifies the jump table of a switch has an obligation to call Compiler::UpdateSwitchTableTarget,
// which in turn will call the "UpdateTarget" method of this type if a SwitchUniqueSuccSet has already
// been computed for the switch block. If a switch block is deleted or is transformed into a non-switch,
// we leave the entry associated with the block, but it will no longer be accessed.)
struct SwitchUniqueSuccSet
{
unsigned numDistinctSuccs; // Number of distinct targets of the switch.
BasicBlock** nonDuplicates; // Array of "numDistinctSuccs", containing all the distinct switch target
// successors.
// The switch block "switchBlk" just had an entry with value "from" modified to the value "to".
// Update "this" as necessary: if "from" is no longer an element of the jump table of "switchBlk",
// remove it from "this", and ensure that "to" is a member. Use "alloc" to do any required allocation.
void UpdateTarget(CompAllocator alloc, BasicBlock* switchBlk, BasicBlock* from, BasicBlock* to);
};
typedef JitHashTable<BasicBlock*, JitPtrKeyFuncs<BasicBlock>, SwitchUniqueSuccSet> BlockToSwitchDescMap;
private:
// Maps BasicBlock*'s that end in switch statements to SwitchUniqueSuccSets that allow
// iteration over only the distinct successors.
BlockToSwitchDescMap* m_switchDescMap;
public:
BlockToSwitchDescMap* GetSwitchDescMap(bool createIfNull = true)
{
if ((m_switchDescMap == nullptr) && createIfNull)
{
m_switchDescMap = new (getAllocator()) BlockToSwitchDescMap(getAllocator());
}
return m_switchDescMap;
}
// Invalidate the map of unique switch block successors. For example, since the hash key of the map
// depends on block numbers, we must invalidate the map when the blocks are renumbered, to ensure that
// we don't accidentally look up and return the wrong switch data.
void InvalidateUniqueSwitchSuccMap()
{
m_switchDescMap = nullptr;
}
// Requires "switchBlock" to be a block that ends in a switch. Returns
// the corresponding SwitchUniqueSuccSet.
SwitchUniqueSuccSet GetDescriptorForSwitch(BasicBlock* switchBlk);
// The switch block "switchBlk" just had an entry with value "from" modified to the value "to".
// Update "this" as necessary: if "from" is no longer an element of the jump table of "switchBlk",
// remove it from "this", and ensure that "to" is a member.
void UpdateSwitchTableTarget(BasicBlock* switchBlk, BasicBlock* from, BasicBlock* to);
// Remove the "SwitchUniqueSuccSet" of "switchBlk" in the BlockToSwitchDescMap.
void fgInvalidateSwitchDescMapEntry(BasicBlock* switchBlk);
BasicBlock* fgFirstBlockOfHandler(BasicBlock* block);
bool fgIsFirstBlockOfFilterOrHandler(BasicBlock* block);
flowList* fgGetPredForBlock(BasicBlock* block, BasicBlock* blockPred);
flowList* fgGetPredForBlock(BasicBlock* block, BasicBlock* blockPred, flowList*** ptrToPred);
flowList* fgRemoveRefPred(BasicBlock* block, BasicBlock* blockPred);
flowList* fgRemoveAllRefPreds(BasicBlock* block, BasicBlock* blockPred);
void fgRemoveBlockAsPred(BasicBlock* block);
void fgChangeSwitchBlock(BasicBlock* oldSwitchBlock, BasicBlock* newSwitchBlock);
void fgReplaceSwitchJumpTarget(BasicBlock* blockSwitch, BasicBlock* newTarget, BasicBlock* oldTarget);
void fgReplaceJumpTarget(BasicBlock* block, BasicBlock* newTarget, BasicBlock* oldTarget);
void fgReplacePred(BasicBlock* block, BasicBlock* oldPred, BasicBlock* newPred);
flowList* fgAddRefPred(BasicBlock* block,
BasicBlock* blockPred,
flowList* oldEdge = nullptr,
bool initializingPreds = false); // Only set to 'true' when we are computing preds in
// fgComputePreds()
void fgFindBasicBlocks();
bool fgIsBetterFallThrough(BasicBlock* bCur, BasicBlock* bAlt);
bool fgCheckEHCanInsertAfterBlock(BasicBlock* blk, unsigned regionIndex, bool putInTryRegion);
BasicBlock* fgFindInsertPoint(unsigned regionIndex,
bool putInTryRegion,
BasicBlock* startBlk,
BasicBlock* endBlk,
BasicBlock* nearBlk,
BasicBlock* jumpBlk,
bool runRarely);
unsigned fgGetNestingLevel(BasicBlock* block, unsigned* pFinallyNesting = nullptr);
void fgPostImportationCleanup();
void fgRemoveStmt(BasicBlock* block, Statement* stmt DEBUGARG(bool isUnlink = false));
void fgUnlinkStmt(BasicBlock* block, Statement* stmt);
bool fgCheckRemoveStmt(BasicBlock* block, Statement* stmt);
void fgCreateLoopPreHeader(unsigned lnum);
void fgUnreachableBlock(BasicBlock* block);
void fgRemoveConditionalJump(BasicBlock* block);
BasicBlock* fgLastBBInMainFunction();
BasicBlock* fgEndBBAfterMainFunction();
void fgUnlinkRange(BasicBlock* bBeg, BasicBlock* bEnd);
void fgRemoveBlock(BasicBlock* block, bool unreachable);
bool fgCanCompactBlocks(BasicBlock* block, BasicBlock* bNext);
void fgCompactBlocks(BasicBlock* block, BasicBlock* bNext);
void fgUpdateLoopsAfterCompacting(BasicBlock* block, BasicBlock* bNext);
BasicBlock* fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst);
bool fgRenumberBlocks();
bool fgExpandRarelyRunBlocks();
bool fgEhAllowsMoveBlock(BasicBlock* bBefore, BasicBlock* bAfter);
void fgMoveBlocksAfter(BasicBlock* bStart, BasicBlock* bEnd, BasicBlock* insertAfterBlk);
enum FG_RELOCATE_TYPE
{
FG_RELOCATE_TRY, // relocate the 'try' region
FG_RELOCATE_HANDLER // relocate the handler region (including the filter if necessary)
};
BasicBlock* fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE relocateType);
#if defined(FEATURE_EH_FUNCLETS)
#if defined(TARGET_ARM)
void fgClearFinallyTargetBit(BasicBlock* block);
#endif // defined(TARGET_ARM)
bool fgIsIntraHandlerPred(BasicBlock* predBlock, BasicBlock* block);
bool fgAnyIntraHandlerPreds(BasicBlock* block);
void fgInsertFuncletPrologBlock(BasicBlock* block);
void fgCreateFuncletPrologBlocks();
void fgCreateFunclets();
#else // !FEATURE_EH_FUNCLETS
bool fgRelocateEHRegions();
#endif // !FEATURE_EH_FUNCLETS
bool fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock* target);
bool fgBlockEndFavorsTailDuplication(BasicBlock* block, unsigned lclNum);
bool fgBlockIsGoodTailDuplicationCandidate(BasicBlock* block, unsigned* lclNum);
bool fgOptimizeEmptyBlock(BasicBlock* block);
bool fgOptimizeBranchToEmptyUnconditional(BasicBlock* block, BasicBlock* bDest);
bool fgOptimizeBranch(BasicBlock* bJump);
bool fgOptimizeSwitchBranches(BasicBlock* block);
bool fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, BasicBlock* bPrev);
bool fgOptimizeSwitchJumps();
#ifdef DEBUG
void fgPrintEdgeWeights();
#endif
void fgComputeBlockAndEdgeWeights();
weight_t fgComputeMissingBlockWeights();
void fgComputeCalledCount(weight_t returnWeight);
void fgComputeEdgeWeights();
bool fgReorderBlocks();
PhaseStatus fgDetermineFirstColdBlock();
bool fgIsForwardBranch(BasicBlock* bJump, BasicBlock* bSrc = nullptr);
bool fgUpdateFlowGraph(bool doTailDup = false);
void fgFindOperOrder();
// method that returns if you should split here
typedef bool(fgSplitPredicate)(GenTree* tree, GenTree* parent, fgWalkData* data);
void fgSetBlockOrder();
void fgRemoveReturnBlock(BasicBlock* block);
/* Helper code that has been factored out */
inline void fgConvertBBToThrowBB(BasicBlock* block);
bool fgCastNeeded(GenTree* tree, var_types toType);
GenTree* fgDoNormalizeOnStore(GenTree* tree);
GenTree* fgMakeTmpArgNode(fgArgTabEntry* curArgTabEntry);
// The following check for loops that don't execute calls
bool fgLoopCallMarked;
void fgLoopCallTest(BasicBlock* srcBB, BasicBlock* dstBB);
void fgLoopCallMark();
void fgMarkLoopHead(BasicBlock* block);
unsigned fgGetCodeEstimate(BasicBlock* block);
#if DUMP_FLOWGRAPHS
enum class PhasePosition
{
PrePhase,
PostPhase
};
const char* fgProcessEscapes(const char* nameIn, escapeMapping_t* map);
static void fgDumpTree(FILE* fgxFile, GenTree* const tree);
FILE* fgOpenFlowGraphFile(bool* wbDontClose, Phases phase, PhasePosition pos, LPCWSTR type);
bool fgDumpFlowGraph(Phases phase, PhasePosition pos);
#endif // DUMP_FLOWGRAPHS
#ifdef DEBUG
void fgDispDoms();
void fgDispReach();
void fgDispBBLiveness(BasicBlock* block);
void fgDispBBLiveness();
void fgTableDispBasicBlock(BasicBlock* block, int ibcColWidth = 0);
void fgDispBasicBlocks(BasicBlock* firstBlock, BasicBlock* lastBlock, bool dumpTrees);
void fgDispBasicBlocks(bool dumpTrees = false);
void fgDumpStmtTree(Statement* stmt, unsigned bbNum);
void fgDumpBlock(BasicBlock* block);
void fgDumpTrees(BasicBlock* firstBlock, BasicBlock* lastBlock);
static fgWalkPreFn fgStress64RsltMulCB;
void fgStress64RsltMul();
void fgDebugCheckUpdate();
void fgDebugCheckBBNumIncreasing();
void fgDebugCheckBBlist(bool checkBBNum = false, bool checkBBRefs = true);
void fgDebugCheckBlockLinks();
void fgDebugCheckLinks(bool morphTrees = false);
void fgDebugCheckStmtsList(BasicBlock* block, bool morphTrees);
void fgDebugCheckNodeLinks(BasicBlock* block, Statement* stmt);
void fgDebugCheckNodesUniqueness();
void fgDebugCheckLoopTable();
void fgDebugCheckFlags(GenTree* tree);
void fgDebugCheckDispFlags(GenTree* tree, GenTreeFlags dispFlags, GenTreeDebugFlags debugFlags);
void fgDebugCheckFlagsHelper(GenTree* tree, GenTreeFlags actualFlags, GenTreeFlags expectedFlags);
void fgDebugCheckTryFinallyExits();
void fgDebugCheckProfileData();
bool fgDebugCheckIncomingProfileData(BasicBlock* block);
bool fgDebugCheckOutgoingProfileData(BasicBlock* block);
#endif // DEBUG
static bool fgProfileWeightsEqual(weight_t weight1, weight_t weight2);
static bool fgProfileWeightsConsistent(weight_t weight1, weight_t weight2);
static GenTree* fgGetFirstNode(GenTree* tree);
//--------------------- Walking the trees in the IR -----------------------
struct fgWalkData
{
Compiler* compiler;
fgWalkPreFn* wtprVisitorFn;
fgWalkPostFn* wtpoVisitorFn;
void* pCallbackData; // user-provided data
GenTree* parent; // parent of current node, provided to callback
GenTreeStack* parentStack; // stack of parent nodes, if asked for
bool wtprLclsOnly; // whether to only visit lclvar nodes
#ifdef DEBUG
bool printModified; // callback can use this
#endif
};
fgWalkResult fgWalkTreePre(GenTree** pTree,
fgWalkPreFn* visitor,
void* pCallBackData = nullptr,
bool lclVarsOnly = false,
bool computeStack = false);
fgWalkResult fgWalkTree(GenTree** pTree,
fgWalkPreFn* preVisitor,
fgWalkPostFn* postVisitor,
void* pCallBackData = nullptr);
void fgWalkAllTreesPre(fgWalkPreFn* visitor, void* pCallBackData);
//----- Postorder
fgWalkResult fgWalkTreePost(GenTree** pTree,
fgWalkPostFn* visitor,
void* pCallBackData = nullptr,
bool computeStack = false);
// An fgWalkPreFn that looks for expressions that have inline throws in
// minopts mode. Basically it looks for tress with gtOverflowEx() or
// GTF_IND_RNGCHK. It returns WALK_ABORT if one is found. It
// returns WALK_SKIP_SUBTREES if GTF_EXCEPT is not set (assumes flags
// properly propagated to parent trees). It returns WALK_CONTINUE
// otherwise.
static fgWalkResult fgChkThrowCB(GenTree** pTree, Compiler::fgWalkData* data);
static fgWalkResult fgChkLocAllocCB(GenTree** pTree, Compiler::fgWalkData* data);
static fgWalkResult fgChkQmarkCB(GenTree** pTree, Compiler::fgWalkData* data);
/**************************************************************************
* PROTECTED
*************************************************************************/
protected:
friend class SsaBuilder;
friend struct ValueNumberState;
//--------------------- Detect the basic blocks ---------------------------
BasicBlock** fgBBs; // Table of pointers to the BBs
void fgInitBBLookup();
BasicBlock* fgLookupBB(unsigned addr);
bool fgCanSwitchToOptimized();
void fgSwitchToOptimized(const char* reason);
bool fgMayExplicitTailCall();
void fgFindJumpTargets(const BYTE* codeAddr, IL_OFFSET codeSize, FixedBitVect* jumpTarget);
void fgMarkBackwardJump(BasicBlock* startBlock, BasicBlock* endBlock);
void fgLinkBasicBlocks();
unsigned fgMakeBasicBlocks(const BYTE* codeAddr, IL_OFFSET codeSize, FixedBitVect* jumpTarget);
void fgCheckBasicBlockControlFlow();
void fgControlFlowPermitted(BasicBlock* blkSrc,
BasicBlock* blkDest,
bool IsLeave = false /* is the src a leave block */);
bool fgFlowToFirstBlockOfInnerTry(BasicBlock* blkSrc, BasicBlock* blkDest, bool sibling);
void fgObserveInlineConstants(OPCODE opcode, const FgStack& stack, bool isInlining);
void fgAdjustForAddressExposedOrWrittenThis();
unsigned fgStressBBProf()
{
#ifdef DEBUG
unsigned result = JitConfig.JitStressBBProf();
if (result == 0)
{
if (compStressCompile(STRESS_BB_PROFILE, 15))
{
result = 1;
}
}
return result;
#else
return 0;
#endif
}
bool fgHaveProfileData();
bool fgGetProfileWeightForBasicBlock(IL_OFFSET offset, weight_t* weight);
Instrumentor* fgCountInstrumentor;
Instrumentor* fgClassInstrumentor;
PhaseStatus fgPrepareToInstrumentMethod();
PhaseStatus fgInstrumentMethod();
PhaseStatus fgIncorporateProfileData();
void fgIncorporateBlockCounts();
void fgIncorporateEdgeCounts();
CORINFO_CLASS_HANDLE getRandomClass(ICorJitInfo::PgoInstrumentationSchema* schema,
UINT32 countSchemaItems,
BYTE* pInstrumentationData,
int32_t ilOffset,
CLRRandom* random);
public:
const char* fgPgoFailReason;
bool fgPgoDisabled;
ICorJitInfo::PgoSource fgPgoSource;
ICorJitInfo::PgoInstrumentationSchema* fgPgoSchema;
BYTE* fgPgoData;
UINT32 fgPgoSchemaCount;
HRESULT fgPgoQueryResult;
UINT32 fgNumProfileRuns;
UINT32 fgPgoBlockCounts;
UINT32 fgPgoEdgeCounts;
UINT32 fgPgoClassProfiles;
unsigned fgPgoInlineePgo;
unsigned fgPgoInlineeNoPgo;
unsigned fgPgoInlineeNoPgoSingleBlock;
void WalkSpanningTree(SpanningTreeVisitor* visitor);
void fgSetProfileWeight(BasicBlock* block, weight_t weight);
void fgApplyProfileScale();
bool fgHaveSufficientProfileData();
bool fgHaveTrustedProfileData();
// fgIsUsingProfileWeights - returns true if we have real profile data for this method
// or if we have some fake profile data for the stress mode
bool fgIsUsingProfileWeights()
{
return (fgHaveProfileData() || fgStressBBProf());
}
// fgProfileRunsCount - returns total number of scenario runs for the profile data
// or BB_UNITY_WEIGHT_UNSIGNED when we aren't using profile data.
unsigned fgProfileRunsCount()
{
return fgIsUsingProfileWeights() ? fgNumProfileRuns : BB_UNITY_WEIGHT_UNSIGNED;
}
//-------- Insert a statement at the start or end of a basic block --------
#ifdef DEBUG
public:
static bool fgBlockContainsStatementBounded(BasicBlock* block, Statement* stmt, bool answerOnBoundExceeded = true);
#endif
public:
Statement* fgNewStmtAtBeg(BasicBlock* block, GenTree* tree, const DebugInfo& di = DebugInfo());
void fgInsertStmtAtEnd(BasicBlock* block, Statement* stmt);
Statement* fgNewStmtAtEnd(BasicBlock* block, GenTree* tree, const DebugInfo& di = DebugInfo());
Statement* fgNewStmtNearEnd(BasicBlock* block, GenTree* tree, const DebugInfo& di = DebugInfo());
private:
void fgInsertStmtNearEnd(BasicBlock* block, Statement* stmt);
void fgInsertStmtAtBeg(BasicBlock* block, Statement* stmt);
void fgInsertStmtAfter(BasicBlock* block, Statement* insertionPoint, Statement* stmt);
public:
void fgInsertStmtBefore(BasicBlock* block, Statement* insertionPoint, Statement* stmt);
private:
Statement* fgInsertStmtListAfter(BasicBlock* block, Statement* stmtAfter, Statement* stmtList);
// Create a new temporary variable to hold the result of *ppTree,
// and transform the graph accordingly.
GenTree* fgInsertCommaFormTemp(GenTree** ppTree, CORINFO_CLASS_HANDLE structType = nullptr);
GenTree* fgMakeMultiUse(GenTree** ppTree);
private:
// Recognize a bitwise rotation pattern and convert into a GT_ROL or a GT_ROR node.
GenTree* fgRecognizeAndMorphBitwiseRotation(GenTree* tree);
bool fgOperIsBitwiseRotationRoot(genTreeOps oper);
#if !defined(TARGET_64BIT)
// Recognize and morph a long multiplication with 32 bit operands.
GenTreeOp* fgRecognizeAndMorphLongMul(GenTreeOp* mul);
GenTreeOp* fgMorphLongMul(GenTreeOp* mul);
#endif
//-------- Determine the order in which the trees will be evaluated -------
unsigned fgTreeSeqNum;
GenTree* fgTreeSeqLst;
GenTree* fgTreeSeqBeg;
GenTree* fgSetTreeSeq(GenTree* tree, GenTree* prev = nullptr, bool isLIR = false);
void fgSetTreeSeqHelper(GenTree* tree, bool isLIR);
void fgSetTreeSeqFinish(GenTree* tree, bool isLIR);
void fgSetStmtSeq(Statement* stmt);
void fgSetBlockOrder(BasicBlock* block);
//------------------------- Morphing --------------------------------------
unsigned fgPtrArgCntMax;
public:
//------------------------------------------------------------------------
// fgGetPtrArgCntMax: Return the maximum number of pointer-sized stack arguments that calls inside this method
// can push on the stack. This value is calculated during morph.
//
// Return Value:
// Returns fgPtrArgCntMax, that is a private field.
//
unsigned fgGetPtrArgCntMax() const
{
return fgPtrArgCntMax;
}
//------------------------------------------------------------------------
// fgSetPtrArgCntMax: Set the maximum number of pointer-sized stack arguments that calls inside this method
// can push on the stack. This function is used during StackLevelSetter to fix incorrect morph calculations.
//
void fgSetPtrArgCntMax(unsigned argCntMax)
{
fgPtrArgCntMax = argCntMax;
}
bool compCanEncodePtrArgCntMax();
private:
hashBv* fgOutgoingArgTemps;
hashBv* fgCurrentlyInUseArgTemps;
void fgSetRngChkTarget(GenTree* tree, bool delay = true);
BasicBlock* fgSetRngChkTargetInner(SpecialCodeKind kind, bool delay);
#if REARRANGE_ADDS
void fgMoveOpsLeft(GenTree* tree);
#endif
bool fgIsCommaThrow(GenTree* tree, bool forFolding = false);
bool fgIsThrow(GenTree* tree);
bool fgInDifferentRegions(BasicBlock* blk1, BasicBlock* blk2);
bool fgIsBlockCold(BasicBlock* block);
GenTree* fgMorphCastIntoHelper(GenTree* tree, int helper, GenTree* oper);
GenTree* fgMorphIntoHelperCall(GenTree* tree, int helper, GenTreeCall::Use* args, bool morphArgs = true);
GenTree* fgMorphStackArgForVarArgs(unsigned lclNum, var_types varType, unsigned lclOffs);
// A "MorphAddrContext" carries information from the surrounding context. If we are evaluating a byref address,
// it is useful to know whether the address will be immediately dereferenced, or whether the address value will
// be used, perhaps by passing it as an argument to a called method. This affects how null checking is done:
// for sufficiently small offsets, we can rely on OS page protection to implicitly null-check addresses that we
// know will be dereferenced. To know that reliance on implicit null checking is sound, we must further know that
// all offsets between the top-level indirection and the bottom are constant, and that their sum is sufficiently
// small; hence the other fields of MorphAddrContext.
enum MorphAddrContextKind
{
MACK_Ind,
MACK_Addr,
};
struct MorphAddrContext
{
MorphAddrContextKind m_kind;
bool m_allConstantOffsets; // Valid only for "m_kind == MACK_Ind". True iff all offsets between
// top-level indirection and here have been constants.
size_t m_totalOffset; // Valid only for "m_kind == MACK_Ind", and if "m_allConstantOffsets" is true.
// In that case, is the sum of those constant offsets.
MorphAddrContext(MorphAddrContextKind kind) : m_kind(kind), m_allConstantOffsets(true), m_totalOffset(0)
{
}
};
// A MACK_CopyBlock context is immutable, so we can just make one of these and share it.
static MorphAddrContext s_CopyBlockMAC;
#ifdef FEATURE_SIMD
GenTree* getSIMDStructFromField(GenTree* tree,
CorInfoType* simdBaseJitTypeOut,
unsigned* indexOut,
unsigned* simdSizeOut,
bool ignoreUsedInSIMDIntrinsic = false);
GenTree* fgMorphFieldAssignToSimdSetElement(GenTree* tree);
GenTree* fgMorphFieldToSimdGetElement(GenTree* tree);
bool fgMorphCombineSIMDFieldAssignments(BasicBlock* block, Statement* stmt);
void impMarkContiguousSIMDFieldAssignments(Statement* stmt);
// fgPreviousCandidateSIMDFieldAsgStmt is only used for tracking previous simd field assignment
// in function: Complier::impMarkContiguousSIMDFieldAssignments.
Statement* fgPreviousCandidateSIMDFieldAsgStmt;
#endif // FEATURE_SIMD
GenTree* fgMorphArrayIndex(GenTree* tree);
GenTree* fgMorphExpandCast(GenTreeCast* tree);
GenTreeFieldList* fgMorphLclArgToFieldlist(GenTreeLclVarCommon* lcl);
void fgInitArgInfo(GenTreeCall* call);
GenTreeCall* fgMorphArgs(GenTreeCall* call);
void fgMakeOutgoingStructArgCopy(GenTreeCall* call, GenTreeCall::Use* args, CORINFO_CLASS_HANDLE copyBlkClass);
GenTree* fgMorphLocalVar(GenTree* tree, bool forceRemorph);
public:
bool fgAddrCouldBeNull(GenTree* addr);
private:
GenTree* fgMorphField(GenTree* tree, MorphAddrContext* mac);
bool fgCanFastTailCall(GenTreeCall* call, const char** failReason);
#if FEATURE_FASTTAILCALL
bool fgCallHasMustCopyByrefParameter(GenTreeCall* callee);
#endif
bool fgCheckStmtAfterTailCall();
GenTree* fgMorphTailCallViaHelpers(GenTreeCall* call, CORINFO_TAILCALL_HELPERS& help);
bool fgCanTailCallViaJitHelper();
void fgMorphTailCallViaJitHelper(GenTreeCall* call);
GenTree* fgCreateCallDispatcherAndGetResult(GenTreeCall* origCall,
CORINFO_METHOD_HANDLE callTargetStubHnd,
CORINFO_METHOD_HANDLE dispatcherHnd);
GenTree* getLookupTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_LOOKUP* pLookup,
GenTreeFlags handleFlags,
void* compileTimeHandle);
GenTree* getRuntimeLookupTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_LOOKUP* pLookup,
void* compileTimeHandle);
GenTree* getVirtMethodPointerTree(GenTree* thisPtr,
CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_CALL_INFO* pCallInfo);
GenTree* getTokenHandleTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool parent);
GenTree* fgMorphPotentialTailCall(GenTreeCall* call);
GenTree* fgGetStubAddrArg(GenTreeCall* call);
unsigned fgGetArgTabEntryParameterLclNum(GenTreeCall* call, fgArgTabEntry* argTabEntry);
void fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCall* recursiveTailCall);
Statement* fgAssignRecursiveCallArgToCallerParam(GenTree* arg,
fgArgTabEntry* argTabEntry,
unsigned lclParamNum,
BasicBlock* block,
const DebugInfo& callDI,
Statement* tmpAssignmentInsertionPoint,
Statement* paramAssignmentInsertionPoint);
GenTree* fgMorphCall(GenTreeCall* call);
GenTree* fgExpandVirtualVtableCallTarget(GenTreeCall* call);
void fgMorphCallInline(GenTreeCall* call, InlineResult* result);
void fgMorphCallInlineHelper(GenTreeCall* call, InlineResult* result, InlineContext** createdContext);
#if DEBUG
void fgNoteNonInlineCandidate(Statement* stmt, GenTreeCall* call);
static fgWalkPreFn fgFindNonInlineCandidate;
#endif
GenTree* fgOptimizeDelegateConstructor(GenTreeCall* call,
CORINFO_CONTEXT_HANDLE* ExactContextHnd,
methodPointerInfo* ldftnToken);
GenTree* fgMorphLeaf(GenTree* tree);
void fgAssignSetVarDef(GenTree* tree);
GenTree* fgMorphOneAsgBlockOp(GenTree* tree);
GenTree* fgMorphInitBlock(GenTree* tree);
GenTree* fgMorphPromoteLocalInitBlock(GenTreeLclVar* destLclNode, GenTree* initVal, unsigned blockSize);
GenTree* fgMorphGetStructAddr(GenTree** pTree, CORINFO_CLASS_HANDLE clsHnd, bool isRValue = false);
GenTree* fgMorphBlockOperand(GenTree* tree, var_types asgType, unsigned blockWidth, bool isBlkReqd);
GenTree* fgMorphCopyBlock(GenTree* tree);
GenTree* fgMorphStoreDynBlock(GenTreeStoreDynBlk* tree);
GenTree* fgMorphForRegisterFP(GenTree* tree);
GenTree* fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac = nullptr);
GenTree* fgOptimizeCast(GenTreeCast* cast);
GenTree* fgOptimizeEqualityComparisonWithConst(GenTreeOp* cmp);
GenTree* fgOptimizeRelationalComparisonWithConst(GenTreeOp* cmp);
#ifdef FEATURE_HW_INTRINSICS
GenTree* fgOptimizeHWIntrinsic(GenTreeHWIntrinsic* node);
#endif
GenTree* fgOptimizeCommutativeArithmetic(GenTreeOp* tree);
GenTree* fgOptimizeRelationalComparisonWithCasts(GenTreeOp* cmp);
GenTree* fgOptimizeAddition(GenTreeOp* add);
GenTree* fgOptimizeMultiply(GenTreeOp* mul);
GenTree* fgOptimizeBitwiseAnd(GenTreeOp* andOp);
GenTree* fgOptimizeBitwiseXor(GenTreeOp* xorOp);
GenTree* fgPropagateCommaThrow(GenTree* parent, GenTreeOp* commaThrow, GenTreeFlags precedingSideEffects);
GenTree* fgMorphRetInd(GenTreeUnOp* tree);
GenTree* fgMorphModToSubMulDiv(GenTreeOp* tree);
GenTree* fgMorphUModToAndSub(GenTreeOp* tree);
GenTree* fgMorphSmpOpOptional(GenTreeOp* tree);
GenTree* fgMorphMultiOp(GenTreeMultiOp* multiOp);
GenTree* fgMorphConst(GenTree* tree);
bool fgMorphCanUseLclFldForCopy(unsigned lclNum1, unsigned lclNum2);
GenTreeLclVar* fgMorphTryFoldObjAsLclVar(GenTreeObj* obj, bool destroyNodes = true);
GenTreeOp* fgMorphCommutative(GenTreeOp* tree);
GenTree* fgMorphCastedBitwiseOp(GenTreeOp* tree);
GenTree* fgMorphReduceAddOps(GenTree* tree);
public:
GenTree* fgMorphTree(GenTree* tree, MorphAddrContext* mac = nullptr);
private:
void fgKillDependentAssertionsSingle(unsigned lclNum DEBUGARG(GenTree* tree));
void fgKillDependentAssertions(unsigned lclNum DEBUGARG(GenTree* tree));
void fgMorphTreeDone(GenTree* tree, GenTree* oldTree = nullptr DEBUGARG(int morphNum = 0));
Statement* fgMorphStmt;
unsigned fgGetBigOffsetMorphingTemp(var_types type); // We cache one temp per type to be
// used when morphing big offset.
//----------------------- Liveness analysis -------------------------------
VARSET_TP fgCurUseSet; // vars used by block (before an assignment)
VARSET_TP fgCurDefSet; // vars assigned by block (before a use)
MemoryKindSet fgCurMemoryUse; // True iff the current basic block uses memory.
MemoryKindSet fgCurMemoryDef; // True iff the current basic block modifies memory.
MemoryKindSet fgCurMemoryHavoc; // True if the current basic block is known to set memory to a "havoc" value.
bool byrefStatesMatchGcHeapStates; // True iff GcHeap and ByrefExposed memory have all the same def points.
void fgMarkUseDef(GenTreeLclVarCommon* tree);
void fgBeginScopeLife(VARSET_TP* inScope, VarScopeDsc* var);
void fgEndScopeLife(VARSET_TP* inScope, VarScopeDsc* var);
void fgMarkInScope(BasicBlock* block, VARSET_VALARG_TP inScope);
void fgUnmarkInScope(BasicBlock* block, VARSET_VALARG_TP unmarkScope);
void fgExtendDbgScopes();
void fgExtendDbgLifetimes();
#ifdef DEBUG
void fgDispDebugScopes();
#endif // DEBUG
//-------------------------------------------------------------------------
//
// The following keeps track of any code we've added for things like array
// range checking or explicit calls to enable GC, and so on.
//
public:
struct AddCodeDsc
{
AddCodeDsc* acdNext;
BasicBlock* acdDstBlk; // block to which we jump
unsigned acdData;
SpecialCodeKind acdKind; // what kind of a special block is this?
#if !FEATURE_FIXED_OUT_ARGS
bool acdStkLvlInit; // has acdStkLvl value been already set?
unsigned acdStkLvl; // stack level in stack slots.
#endif // !FEATURE_FIXED_OUT_ARGS
};
private:
static unsigned acdHelper(SpecialCodeKind codeKind);
AddCodeDsc* fgAddCodeList;
bool fgAddCodeModf;
bool fgRngChkThrowAdded;
AddCodeDsc* fgExcptnTargetCache[SCK_COUNT];
BasicBlock* fgRngChkTarget(BasicBlock* block, SpecialCodeKind kind);
BasicBlock* fgAddCodeRef(BasicBlock* srcBlk, unsigned refData, SpecialCodeKind kind);
public:
AddCodeDsc* fgFindExcptnTarget(SpecialCodeKind kind, unsigned refData);
bool fgUseThrowHelperBlocks();
AddCodeDsc* fgGetAdditionalCodeDescriptors()
{
return fgAddCodeList;
}
private:
bool fgIsCodeAdded();
bool fgIsThrowHlpBlk(BasicBlock* block);
#if !FEATURE_FIXED_OUT_ARGS
unsigned fgThrowHlpBlkStkLevel(BasicBlock* block);
#endif // !FEATURE_FIXED_OUT_ARGS
unsigned fgBigOffsetMorphingTemps[TYP_COUNT];
unsigned fgCheckInlineDepthAndRecursion(InlineInfo* inlineInfo);
void fgInvokeInlineeCompiler(GenTreeCall* call, InlineResult* result, InlineContext** createdContext);
void fgInsertInlineeBlocks(InlineInfo* pInlineInfo);
Statement* fgInlinePrependStatements(InlineInfo* inlineInfo);
void fgInlineAppendStatements(InlineInfo* inlineInfo, BasicBlock* block, Statement* stmt);
#if FEATURE_MULTIREG_RET
GenTree* fgGetStructAsStructPtr(GenTree* tree);
GenTree* fgAssignStructInlineeToVar(GenTree* child, CORINFO_CLASS_HANDLE retClsHnd);
void fgAttachStructInlineeToAsg(GenTree* tree, GenTree* child, CORINFO_CLASS_HANDLE retClsHnd);
#endif // FEATURE_MULTIREG_RET
static fgWalkPreFn fgUpdateInlineReturnExpressionPlaceHolder;
static fgWalkPostFn fgLateDevirtualization;
#ifdef DEBUG
static fgWalkPreFn fgDebugCheckInlineCandidates;
void CheckNoTransformableIndirectCallsRemain();
static fgWalkPreFn fgDebugCheckForTransformableIndirectCalls;
#endif
void fgPromoteStructs();
void fgMorphStructField(GenTree* tree, GenTree* parent);
void fgMorphLocalField(GenTree* tree, GenTree* parent);
// Reset the refCount for implicit byrefs.
void fgResetImplicitByRefRefCount();
// Change implicit byrefs' types from struct to pointer, and for any that were
// promoted, create new promoted struct temps.
void fgRetypeImplicitByRefArgs();
// Rewrite appearances of implicit byrefs (manifest the implied additional level of indirection).
bool fgMorphImplicitByRefArgs(GenTree* tree);
GenTree* fgMorphImplicitByRefArgs(GenTree* tree, bool isAddr);
// Clear up annotations for any struct promotion temps created for implicit byrefs.
void fgMarkDemotedImplicitByRefArgs();
void fgMarkAddressExposedLocals();
void fgMarkAddressExposedLocals(Statement* stmt);
PhaseStatus fgForwardSub();
bool fgForwardSubBlock(BasicBlock* block);
bool fgForwardSubStatement(Statement* statement);
static fgWalkPreFn fgUpdateSideEffectsPre;
static fgWalkPostFn fgUpdateSideEffectsPost;
// The given local variable, required to be a struct variable, is being assigned via
// a "lclField", to make it masquerade as an integral type in the ABI. Make sure that
// the variable is not enregistered, and is therefore not promoted independently.
void fgLclFldAssign(unsigned lclNum);
static fgWalkPreFn gtHasLocalsWithAddrOpCB;
enum TypeProducerKind
{
TPK_Unknown = 0, // May not be a RuntimeType
TPK_Handle = 1, // RuntimeType via handle
TPK_GetType = 2, // RuntimeType via Object.get_Type()
TPK_Null = 3, // Tree value is null
TPK_Other = 4 // RuntimeType via other means
};
TypeProducerKind gtGetTypeProducerKind(GenTree* tree);
bool gtIsTypeHandleToRuntimeTypeHelper(GenTreeCall* call);
bool gtIsTypeHandleToRuntimeTypeHandleHelper(GenTreeCall* call, CorInfoHelpFunc* pHelper = nullptr);
bool gtIsActiveCSE_Candidate(GenTree* tree);
bool fgIsBigOffset(size_t offset);
bool fgNeedReturnSpillTemp();
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Optimizer XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
public:
void optInit();
GenTree* optRemoveRangeCheck(GenTreeBoundsChk* check, GenTree* comma, Statement* stmt);
GenTree* optRemoveStandaloneRangeCheck(GenTreeBoundsChk* check, Statement* stmt);
void optRemoveCommaBasedRangeCheck(GenTree* comma, Statement* stmt);
protected:
// Do hoisting for all loops.
void optHoistLoopCode();
// To represent sets of VN's that have already been hoisted in outer loops.
typedef JitHashTable<ValueNum, JitSmallPrimitiveKeyFuncs<ValueNum>, bool> VNSet;
struct LoopHoistContext
{
private:
// The set of variables hoisted in the current loop (or nullptr if there are none).
VNSet* m_pHoistedInCurLoop;
public:
// Value numbers of expressions that have been hoisted in parent loops in the loop nest.
VNSet m_hoistedInParentLoops;
// Value numbers of expressions that have been hoisted in the current (or most recent) loop in the nest.
// Previous decisions on loop-invariance of value numbers in the current loop.
VNSet m_curLoopVnInvariantCache;
VNSet* GetHoistedInCurLoop(Compiler* comp)
{
if (m_pHoistedInCurLoop == nullptr)
{
m_pHoistedInCurLoop = new (comp->getAllocatorLoopHoist()) VNSet(comp->getAllocatorLoopHoist());
}
return m_pHoistedInCurLoop;
}
VNSet* ExtractHoistedInCurLoop()
{
VNSet* res = m_pHoistedInCurLoop;
m_pHoistedInCurLoop = nullptr;
return res;
}
LoopHoistContext(Compiler* comp)
: m_pHoistedInCurLoop(nullptr)
, m_hoistedInParentLoops(comp->getAllocatorLoopHoist())
, m_curLoopVnInvariantCache(comp->getAllocatorLoopHoist())
{
}
};
// Do hoisting for loop "lnum" (an index into the optLoopTable), and all loops nested within it.
// Tracks the expressions that have been hoisted by containing loops by temporarily recording their
// value numbers in "m_hoistedInParentLoops". This set is not modified by the call.
void optHoistLoopNest(unsigned lnum, LoopHoistContext* hoistCtxt);
// Do hoisting for a particular loop ("lnum" is an index into the optLoopTable.)
// Assumes that expressions have been hoisted in containing loops if their value numbers are in
// "m_hoistedInParentLoops".
//
void optHoistThisLoop(unsigned lnum, LoopHoistContext* hoistCtxt);
// Hoist all expressions in "blocks" that are invariant in loop "loopNum" (an index into the optLoopTable)
// outside of that loop. Exempt expressions whose value number is in "m_hoistedInParentLoops"; add VN's of hoisted
// expressions to "hoistInLoop".
void optHoistLoopBlocks(unsigned loopNum, ArrayStack<BasicBlock*>* blocks, LoopHoistContext* hoistContext);
// Return true if the tree looks profitable to hoist out of loop 'lnum'.
bool optIsProfitableToHoistTree(GenTree* tree, unsigned lnum);
// Performs the hoisting 'tree' into the PreHeader for loop 'lnum'
void optHoistCandidate(GenTree* tree, BasicBlock* treeBb, unsigned lnum, LoopHoistContext* hoistCtxt);
// Returns true iff the ValueNum "vn" represents a value that is loop-invariant in "lnum".
// Constants and init values are always loop invariant.
// VNPhi's connect VN's to the SSA definition, so we can know if the SSA def occurs in the loop.
bool optVNIsLoopInvariant(ValueNum vn, unsigned lnum, VNSet* recordedVNs);
// If "blk" is the entry block of a natural loop, returns true and sets "*pLnum" to the index of the loop
// in the loop table.
bool optBlockIsLoopEntry(BasicBlock* blk, unsigned* pLnum);
// Records the set of "side effects" of all loops: fields (object instance and static)
// written to, and SZ-array element type equivalence classes updated.
void optComputeLoopSideEffects();
#ifdef DEBUG
bool optAnyChildNotRemoved(unsigned loopNum);
#endif // DEBUG
// Mark a loop as removed.
void optMarkLoopRemoved(unsigned loopNum);
private:
// Requires "lnum" to be the index of an outermost loop in the loop table. Traverses the body of that loop,
// including all nested loops, and records the set of "side effects" of the loop: fields (object instance and
// static) written to, and SZ-array element type equivalence classes updated.
void optComputeLoopNestSideEffects(unsigned lnum);
// Given a loop number 'lnum' mark it and any nested loops as having 'memoryHavoc'
void optRecordLoopNestsMemoryHavoc(unsigned lnum, MemoryKindSet memoryHavoc);
// Add the side effects of "blk" (which is required to be within a loop) to all loops of which it is a part.
// Returns false if we encounter a block that is not marked as being inside a loop.
//
bool optComputeLoopSideEffectsOfBlock(BasicBlock* blk);
// Hoist the expression "expr" out of loop "lnum".
void optPerformHoistExpr(GenTree* expr, BasicBlock* exprBb, unsigned lnum);
public:
void optOptimizeBools();
public:
PhaseStatus optInvertLoops(); // Invert loops so they're entered at top and tested at bottom.
PhaseStatus optOptimizeLayout(); // Optimize the BasicBlock layout of the method
PhaseStatus optSetBlockWeights();
PhaseStatus optFindLoopsPhase(); // Finds loops and records them in the loop table
void optFindLoops();
PhaseStatus optCloneLoops();
void optCloneLoop(unsigned loopInd, LoopCloneContext* context);
void optEnsureUniqueHead(unsigned loopInd, weight_t ambientWeight);
PhaseStatus optUnrollLoops(); // Unrolls loops (needs to have cost info)
void optRemoveRedundantZeroInits();
protected:
// This enumeration describes what is killed by a call.
enum callInterf
{
CALLINT_NONE, // no interference (most helpers)
CALLINT_REF_INDIRS, // kills GC ref indirections (SETFIELD OBJ)
CALLINT_SCL_INDIRS, // kills non GC ref indirections (SETFIELD non-OBJ)
CALLINT_ALL_INDIRS, // kills both GC ref and non GC ref indirections (SETFIELD STRUCT)
CALLINT_ALL, // kills everything (normal method call)
};
enum class FieldKindForVN
{
SimpleStatic,
WithBaseAddr
};
public:
// A "LoopDsc" describes a ("natural") loop. We (currently) require the body of a loop to be a contiguous (in
// bbNext order) sequence of basic blocks. (At times, we may require the blocks in a loop to be "properly numbered"
// in bbNext order; we use comparisons on the bbNum to decide order.)
// The blocks that define the body are
// top <= entry <= bottom
// The "head" of the loop is a block outside the loop that has "entry" as a successor. We only support loops with a
// single 'head' block. The meanings of these blocks are given in the definitions below. Also see the picture at
// Compiler::optFindNaturalLoops().
struct LoopDsc
{
BasicBlock* lpHead; // HEAD of the loop (not part of the looping of the loop) -- has ENTRY as a successor.
BasicBlock* lpTop; // loop TOP (the back edge from lpBottom reaches here). Lexically first block (in bbNext
// order) reachable in this loop.
BasicBlock* lpEntry; // the ENTRY in the loop (in most cases TOP or BOTTOM)
BasicBlock* lpBottom; // loop BOTTOM (from here we have a back edge to the TOP)
BasicBlock* lpExit; // if a single exit loop this is the EXIT (in most cases BOTTOM)
callInterf lpAsgCall; // "callInterf" for calls in the loop
ALLVARSET_TP lpAsgVars; // set of vars assigned within the loop (all vars, not just tracked)
varRefKinds lpAsgInds : 8; // set of inds modified within the loop
LoopFlags lpFlags;
unsigned char lpExitCnt; // number of exits from the loop
unsigned char lpParent; // The index of the most-nested loop that completely contains this one,
// or else BasicBlock::NOT_IN_LOOP if no such loop exists.
unsigned char lpChild; // The index of a nested loop, or else BasicBlock::NOT_IN_LOOP if no child exists.
// (Actually, an "immediately" nested loop --
// no other child of this loop is a parent of lpChild.)
unsigned char lpSibling; // The index of another loop that is an immediate child of lpParent,
// or else BasicBlock::NOT_IN_LOOP. One can enumerate all the children of a loop
// by following "lpChild" then "lpSibling" links.
bool lpLoopHasMemoryHavoc[MemoryKindCount]; // The loop contains an operation that we assume has arbitrary
// memory side effects. If this is set, the fields below
// may not be accurate (since they become irrelevant.)
VARSET_TP lpVarInOut; // The set of variables that are IN or OUT during the execution of this loop
VARSET_TP lpVarUseDef; // The set of variables that are USE or DEF during the execution of this loop
// The following counts are used for hoisting profitability checks.
int lpHoistedExprCount; // The register count for the non-FP expressions from inside this loop that have been
// hoisted
int lpLoopVarCount; // The register count for the non-FP LclVars that are read/written inside this loop
int lpVarInOutCount; // The register count for the non-FP LclVars that are alive inside or across this loop
int lpHoistedFPExprCount; // The register count for the FP expressions from inside this loop that have been
// hoisted
int lpLoopVarFPCount; // The register count for the FP LclVars that are read/written inside this loop
int lpVarInOutFPCount; // The register count for the FP LclVars that are alive inside or across this loop
typedef JitHashTable<CORINFO_FIELD_HANDLE, JitPtrKeyFuncs<struct CORINFO_FIELD_STRUCT_>, FieldKindForVN>
FieldHandleSet;
FieldHandleSet* lpFieldsModified; // This has entries for all static field and object instance fields modified
// in the loop.
typedef JitHashTable<CORINFO_CLASS_HANDLE, JitPtrKeyFuncs<struct CORINFO_CLASS_STRUCT_>, bool> ClassHandleSet;
ClassHandleSet* lpArrayElemTypesModified; // Bits set indicate the set of sz array element types such that
// arrays of that type are modified
// in the loop.
// Adds the variable liveness information for 'blk' to 'this' LoopDsc
void AddVariableLiveness(Compiler* comp, BasicBlock* blk);
inline void AddModifiedField(Compiler* comp, CORINFO_FIELD_HANDLE fldHnd, FieldKindForVN fieldKind);
// This doesn't *always* take a class handle -- it can also take primitive types, encoded as class handles
// (shifted left, with a low-order bit set to distinguish.)
// Use the {Encode/Decode}ElemType methods to construct/destruct these.
inline void AddModifiedElemType(Compiler* comp, CORINFO_CLASS_HANDLE structHnd);
/* The following values are set only for iterator loops, i.e. has the flag LPFLG_ITER set */
GenTree* lpIterTree; // The "i = i <op> const" tree
unsigned lpIterVar() const; // iterator variable #
int lpIterConst() const; // the constant with which the iterator is incremented
genTreeOps lpIterOper() const; // the type of the operation on the iterator (ASG_ADD, ASG_SUB, etc.)
void VERIFY_lpIterTree() const;
var_types lpIterOperType() const; // For overflow instructions
// Set to the block where we found the initialization for LPFLG_CONST_INIT loops.
// Initially, this will be 'head', but 'head' might change if we insert a loop pre-header block.
BasicBlock* lpInitBlock;
int lpConstInit; // initial constant value of iterator : Valid if LPFLG_CONST_INIT
// The following is for LPFLG_ITER loops only (i.e. the loop condition is "i RELOP const or var")
GenTree* lpTestTree; // pointer to the node containing the loop test
genTreeOps lpTestOper() const; // the type of the comparison between the iterator and the limit (GT_LE, GT_GE,
// etc.)
void VERIFY_lpTestTree() const;
bool lpIsReversed() const; // true if the iterator node is the second operand in the loop condition
GenTree* lpIterator() const; // the iterator node in the loop test
GenTree* lpLimit() const; // the limit node in the loop test
// Limit constant value of iterator - loop condition is "i RELOP const"
// : Valid if LPFLG_CONST_LIMIT
int lpConstLimit() const;
// The lclVar # in the loop condition ( "i RELOP lclVar" )
// : Valid if LPFLG_VAR_LIMIT
unsigned lpVarLimit() const;
// The array length in the loop condition ( "i RELOP arr.len" or "i RELOP arr[i][j].len" )
// : Valid if LPFLG_ARRLEN_LIMIT
bool lpArrLenLimit(Compiler* comp, ArrIndex* index) const;
// Returns "true" iff this is a "top entry" loop.
bool lpIsTopEntry() const
{
if (lpHead->bbNext == lpEntry)
{
assert(lpHead->bbFallsThrough());
assert(lpTop == lpEntry);
return true;
}
else
{
return false;
}
}
// Returns "true" iff "*this" contains the blk.
bool lpContains(BasicBlock* blk) const
{
return lpTop->bbNum <= blk->bbNum && blk->bbNum <= lpBottom->bbNum;
}
// Returns "true" iff "*this" (properly) contains the range [top, bottom] (allowing tops
// to be equal, but requiring bottoms to be different.)
bool lpContains(BasicBlock* top, BasicBlock* bottom) const
{
return lpTop->bbNum <= top->bbNum && bottom->bbNum < lpBottom->bbNum;
}
// Returns "true" iff "*this" (properly) contains "lp2" (allowing tops to be equal, but requiring
// bottoms to be different.)
bool lpContains(const LoopDsc& lp2) const
{
return lpContains(lp2.lpTop, lp2.lpBottom);
}
// Returns "true" iff "*this" is (properly) contained by the range [top, bottom]
// (allowing tops to be equal, but requiring bottoms to be different.)
bool lpContainedBy(BasicBlock* top, BasicBlock* bottom) const
{
return top->bbNum <= lpTop->bbNum && lpBottom->bbNum < bottom->bbNum;
}
// Returns "true" iff "*this" is (properly) contained by "lp2"
// (allowing tops to be equal, but requiring bottoms to be different.)
bool lpContainedBy(const LoopDsc& lp2) const
{
return lpContainedBy(lp2.lpTop, lp2.lpBottom);
}
// Returns "true" iff "*this" is disjoint from the range [top, bottom].
bool lpDisjoint(BasicBlock* top, BasicBlock* bottom) const
{
return bottom->bbNum < lpTop->bbNum || lpBottom->bbNum < top->bbNum;
}
// Returns "true" iff "*this" is disjoint from "lp2".
bool lpDisjoint(const LoopDsc& lp2) const
{
return lpDisjoint(lp2.lpTop, lp2.lpBottom);
}
// Returns "true" iff the loop is well-formed (see code for defn).
bool lpWellFormed() const
{
return lpTop->bbNum <= lpEntry->bbNum && lpEntry->bbNum <= lpBottom->bbNum &&
(lpHead->bbNum < lpTop->bbNum || lpHead->bbNum > lpBottom->bbNum);
}
#ifdef DEBUG
void lpValidatePreHeader() const
{
// If this is called, we expect there to be a pre-header.
assert(lpFlags & LPFLG_HAS_PREHEAD);
// The pre-header must unconditionally enter the loop.
assert(lpHead->GetUniqueSucc() == lpEntry);
// The loop block must be marked as a pre-header.
assert(lpHead->bbFlags & BBF_LOOP_PREHEADER);
// The loop entry must have a single non-loop predecessor, which is the pre-header.
// We can't assume here that the bbNum are properly ordered, so we can't do a simple lpContained()
// check. So, we defer this check, which will be done by `fgDebugCheckLoopTable()`.
}
#endif // DEBUG
// LoopBlocks: convenience method for enabling range-based `for` iteration over all the
// blocks in a loop, e.g.:
// for (BasicBlock* const block : loop->LoopBlocks()) ...
// Currently, the loop blocks are expected to be in linear, lexical, `bbNext` order
// from `lpTop` through `lpBottom`, inclusive. All blocks in this range are considered
// to be part of the loop.
//
BasicBlockRangeList LoopBlocks() const
{
return BasicBlockRangeList(lpTop, lpBottom);
}
};
protected:
bool fgMightHaveLoop(); // returns true if there are any back edges
bool fgHasLoops; // True if this method has any loops, set in fgComputeReachability
public:
LoopDsc* optLoopTable; // loop descriptor table
unsigned char optLoopCount; // number of tracked loops
unsigned char loopAlignCandidates; // number of loops identified for alignment
// Every time we rebuild the loop table, we increase the global "loop epoch". Any loop indices or
// loop table pointers from the previous epoch are invalid.
// TODO: validate this in some way?
unsigned optCurLoopEpoch;
void NewLoopEpoch()
{
++optCurLoopEpoch;
JITDUMP("New loop epoch %d\n", optCurLoopEpoch);
}
#ifdef DEBUG
unsigned char loopsAligned; // number of loops actually aligned
#endif // DEBUG
bool optRecordLoop(BasicBlock* head,
BasicBlock* top,
BasicBlock* entry,
BasicBlock* bottom,
BasicBlock* exit,
unsigned char exitCnt);
void optClearLoopIterInfo();
#ifdef DEBUG
void optPrintLoopInfo(unsigned lnum, bool printVerbose = false);
void optPrintLoopInfo(const LoopDsc* loop, bool printVerbose = false);
void optPrintLoopTable();
#endif
protected:
unsigned optCallCount; // number of calls made in the method
unsigned optIndirectCallCount; // number of virtual, interface and indirect calls made in the method
unsigned optNativeCallCount; // number of Pinvoke/Native calls made in the method
unsigned optLoopsCloned; // number of loops cloned in the current method.
#ifdef DEBUG
void optCheckPreds();
#endif
void optResetLoopInfo();
void optFindAndScaleGeneralLoopBlocks();
// Determine if there are any potential loops, and set BBF_LOOP_HEAD on potential loop heads.
void optMarkLoopHeads();
void optScaleLoopBlocks(BasicBlock* begBlk, BasicBlock* endBlk);
void optUnmarkLoopBlocks(BasicBlock* begBlk, BasicBlock* endBlk);
void optUpdateLoopsBeforeRemoveBlock(BasicBlock* block, bool skipUnmarkLoop = false);
bool optIsLoopTestEvalIntoTemp(Statement* testStmt, Statement** newTestStmt);
unsigned optIsLoopIncrTree(GenTree* incr);
bool optCheckIterInLoopTest(unsigned loopInd, GenTree* test, BasicBlock* from, BasicBlock* to, unsigned iterVar);
bool optComputeIterInfo(GenTree* incr, BasicBlock* from, BasicBlock* to, unsigned* pIterVar);
bool optPopulateInitInfo(unsigned loopInd, BasicBlock* initBlock, GenTree* init, unsigned iterVar);
bool optExtractInitTestIncr(
BasicBlock* head, BasicBlock* bottom, BasicBlock* exit, GenTree** ppInit, GenTree** ppTest, GenTree** ppIncr);
void optFindNaturalLoops();
void optIdentifyLoopsForAlignment();
// Ensures that all the loops in the loop nest rooted at "loopInd" (an index into the loop table) are 'canonical' --
// each loop has a unique "top." Returns "true" iff the flowgraph has been modified.
bool optCanonicalizeLoopNest(unsigned char loopInd);
// Ensures that the loop "loopInd" (an index into the loop table) is 'canonical' -- it has a unique "top,"
// unshared with any other loop. Returns "true" iff the flowgraph has been modified
bool optCanonicalizeLoop(unsigned char loopInd);
// Requires "l1" to be a valid loop table index, and not "BasicBlock::NOT_IN_LOOP".
// Requires "l2" to be a valid loop table index, or else "BasicBlock::NOT_IN_LOOP".
// Returns true iff "l2" is not NOT_IN_LOOP, and "l1" contains "l2".
// A loop contains itself.
bool optLoopContains(unsigned l1, unsigned l2) const;
// Updates the loop table by changing loop "loopInd", whose head is required
// to be "from", to be "to". Also performs this transformation for any
// loop nested in "loopInd" that shares the same head as "loopInd".
void optUpdateLoopHead(unsigned loopInd, BasicBlock* from, BasicBlock* to);
void optRedirectBlock(BasicBlock* blk, BlockToBlockMap* redirectMap, const bool updatePreds = false);
// Marks the containsCall information to "lnum" and any parent loops.
void AddContainsCallAllContainingLoops(unsigned lnum);
// Adds the variable liveness information from 'blk' to "lnum" and any parent loops.
void AddVariableLivenessAllContainingLoops(unsigned lnum, BasicBlock* blk);
// Adds "fldHnd" to the set of modified fields of "lnum" and any parent loops.
void AddModifiedFieldAllContainingLoops(unsigned lnum, CORINFO_FIELD_HANDLE fldHnd, FieldKindForVN fieldKind);
// Adds "elemType" to the set of modified array element types of "lnum" and any parent loops.
void AddModifiedElemTypeAllContainingLoops(unsigned lnum, CORINFO_CLASS_HANDLE elemType);
// Requires that "from" and "to" have the same "bbJumpKind" (perhaps because "to" is a clone
// of "from".) Copies the jump destination from "from" to "to".
void optCopyBlkDest(BasicBlock* from, BasicBlock* to);
// Returns true if 'block' is an entry block for any loop in 'optLoopTable'
bool optIsLoopEntry(BasicBlock* block) const;
// The depth of the loop described by "lnum" (an index into the loop table.) (0 == top level)
unsigned optLoopDepth(unsigned lnum)
{
assert(lnum < optLoopCount);
unsigned depth = 0;
while ((lnum = optLoopTable[lnum].lpParent) != BasicBlock::NOT_IN_LOOP)
{
++depth;
}
return depth;
}
// Struct used in optInvertWhileLoop to count interesting constructs to boost the profitability score.
struct OptInvertCountTreeInfoType
{
int sharedStaticHelperCount;
int arrayLengthCount;
};
static fgWalkResult optInvertCountTreeInfo(GenTree** pTree, fgWalkData* data);
bool optInvertWhileLoop(BasicBlock* block);
private:
static bool optIterSmallOverflow(int iterAtExit, var_types incrType);
static bool optIterSmallUnderflow(int iterAtExit, var_types decrType);
bool optComputeLoopRep(int constInit,
int constLimit,
int iterInc,
genTreeOps iterOper,
var_types iterType,
genTreeOps testOper,
bool unsignedTest,
bool dupCond,
unsigned* iterCount);
static fgWalkPreFn optIsVarAssgCB;
protected:
bool optIsVarAssigned(BasicBlock* beg, BasicBlock* end, GenTree* skip, unsigned var);
bool optIsVarAssgLoop(unsigned lnum, unsigned var);
int optIsSetAssgLoop(unsigned lnum, ALLVARSET_VALARG_TP vars, varRefKinds inds = VR_NONE);
bool optNarrowTree(GenTree* tree, var_types srct, var_types dstt, ValueNumPair vnpNarrow, bool doit);
protected:
// The following is the upper limit on how many expressions we'll keep track
// of for the CSE analysis.
//
static const unsigned MAX_CSE_CNT = EXPSET_SZ;
static const int MIN_CSE_COST = 2;
// BitVec trait information only used by the optCSE_canSwap() method, for the CSE_defMask and CSE_useMask.
// This BitVec uses one bit per CSE candidate
BitVecTraits* cseMaskTraits; // one bit per CSE candidate
// BitVec trait information for computing CSE availability using the CSE_DataFlow algorithm.
// Two bits are allocated per CSE candidate to compute CSE availability
// plus an extra bit to handle the initial unvisited case.
// (See CSE_DataFlow::EndMerge for an explanation of why this is necessary.)
//
// The two bits per CSE candidate have the following meanings:
// 11 - The CSE is available, and is also available when considering calls as killing availability.
// 10 - The CSE is available, but is not available when considering calls as killing availability.
// 00 - The CSE is not available
// 01 - An illegal combination
//
BitVecTraits* cseLivenessTraits;
//-----------------------------------------------------------------------------------------------------------------
// getCSEnum2bit: Return the normalized index to use in the EXPSET_TP for the CSE with the given CSE index.
// Each GenTree has a `gtCSEnum` field. Zero is reserved to mean this node is not a CSE, positive values indicate
// CSE uses, and negative values indicate CSE defs. The caller must pass a non-zero positive value, as from
// GET_CSE_INDEX().
//
static unsigned genCSEnum2bit(unsigned CSEnum)
{
assert((CSEnum > 0) && (CSEnum <= MAX_CSE_CNT));
return CSEnum - 1;
}
//-----------------------------------------------------------------------------------------------------------------
// getCSEAvailBit: Return the bit used by CSE dataflow sets (bbCseGen, etc.) for the availability bit for a CSE.
//
static unsigned getCSEAvailBit(unsigned CSEnum)
{
return genCSEnum2bit(CSEnum) * 2;
}
//-----------------------------------------------------------------------------------------------------------------
// getCSEAvailCrossCallBit: Return the bit used by CSE dataflow sets (bbCseGen, etc.) for the availability bit
// for a CSE considering calls as killing availability bit (see description above).
//
static unsigned getCSEAvailCrossCallBit(unsigned CSEnum)
{
return getCSEAvailBit(CSEnum) + 1;
}
void optPrintCSEDataFlowSet(EXPSET_VALARG_TP cseDataFlowSet, bool includeBits = true);
EXPSET_TP cseCallKillsMask; // Computed once - A mask that is used to kill available CSEs at callsites
/* Generic list of nodes - used by the CSE logic */
struct treeLst
{
treeLst* tlNext;
GenTree* tlTree;
};
struct treeStmtLst
{
treeStmtLst* tslNext;
GenTree* tslTree; // tree node
Statement* tslStmt; // statement containing the tree
BasicBlock* tslBlock; // block containing the statement
};
// The following logic keeps track of expressions via a simple hash table.
struct CSEdsc
{
CSEdsc* csdNextInBucket; // used by the hash table
size_t csdHashKey; // the orginal hashkey
ssize_t csdConstDefValue; // When we CSE similar constants, this is the value that we use as the def
ValueNum csdConstDefVN; // When we CSE similar constants, this is the ValueNumber that we use for the LclVar
// assignment
unsigned csdIndex; // 1..optCSECandidateCount
bool csdIsSharedConst; // true if this CSE is a shared const
bool csdLiveAcrossCall;
unsigned short csdDefCount; // definition count
unsigned short csdUseCount; // use count (excluding the implicit uses at defs)
weight_t csdDefWtCnt; // weighted def count
weight_t csdUseWtCnt; // weighted use count (excluding the implicit uses at defs)
GenTree* csdTree; // treenode containing the 1st occurrence
Statement* csdStmt; // stmt containing the 1st occurrence
BasicBlock* csdBlock; // block containing the 1st occurrence
treeStmtLst* csdTreeList; // list of matching tree nodes: head
treeStmtLst* csdTreeLast; // list of matching tree nodes: tail
// ToDo: This can be removed when gtGetStructHandleIfPresent stops guessing
// and GT_IND nodes always have valid struct handle.
//
CORINFO_CLASS_HANDLE csdStructHnd; // The class handle, currently needed to create a SIMD LclVar in PerformCSE
bool csdStructHndMismatch;
ValueNum defExcSetPromise; // The exception set that is now required for all defs of this CSE.
// This will be set to NoVN if we decide to abandon this CSE
ValueNum defExcSetCurrent; // The set of exceptions we currently can use for CSE uses.
ValueNum defConservNormVN; // if all def occurrences share the same conservative normal value
// number, this will reflect it; otherwise, NoVN.
// not used for shared const CSE's
};
static const size_t s_optCSEhashSizeInitial;
static const size_t s_optCSEhashGrowthFactor;
static const size_t s_optCSEhashBucketSize;
size_t optCSEhashSize; // The current size of hashtable
size_t optCSEhashCount; // Number of entries in hashtable
size_t optCSEhashMaxCountBeforeResize; // Number of entries before resize
CSEdsc** optCSEhash;
CSEdsc** optCSEtab;
typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, GenTree*> NodeToNodeMap;
NodeToNodeMap* optCseCheckedBoundMap; // Maps bound nodes to ancestor compares that should be
// re-numbered with the bound to improve range check elimination
// Given a compare, look for a cse candidate checked bound feeding it and add a map entry if found.
void optCseUpdateCheckedBoundMap(GenTree* compare);
void optCSEstop();
CSEdsc* optCSEfindDsc(unsigned index);
bool optUnmarkCSE(GenTree* tree);
// user defined callback data for the tree walk function optCSE_MaskHelper()
struct optCSE_MaskData
{
EXPSET_TP CSE_defMask;
EXPSET_TP CSE_useMask;
};
// Treewalk helper for optCSE_DefMask and optCSE_UseMask
static fgWalkPreFn optCSE_MaskHelper;
// This function walks all the node for an given tree
// and return the mask of CSE definitions and uses for the tree
//
void optCSE_GetMaskData(GenTree* tree, optCSE_MaskData* pMaskData);
// Given a binary tree node return true if it is safe to swap the order of evaluation for op1 and op2.
bool optCSE_canSwap(GenTree* firstNode, GenTree* secondNode);
struct optCSEcostCmpEx
{
bool operator()(const CSEdsc* op1, const CSEdsc* op2);
};
struct optCSEcostCmpSz
{
bool operator()(const CSEdsc* op1, const CSEdsc* op2);
};
void optCleanupCSEs();
#ifdef DEBUG
void optEnsureClearCSEInfo();
#endif // DEBUG
static bool Is_Shared_Const_CSE(size_t key)
{
return ((key & TARGET_SIGN_BIT) != 0);
}
// returns the encoded key
static size_t Encode_Shared_Const_CSE_Value(size_t key)
{
return TARGET_SIGN_BIT | (key >> CSE_CONST_SHARED_LOW_BITS);
}
// returns the orginal key
static size_t Decode_Shared_Const_CSE_Value(size_t enckey)
{
assert(Is_Shared_Const_CSE(enckey));
return (enckey & ~TARGET_SIGN_BIT) << CSE_CONST_SHARED_LOW_BITS;
}
/**************************************************************************
* Value Number based CSEs
*************************************************************************/
// String to use for formatting CSE numbers. Note that this is the positive number, e.g., from GET_CSE_INDEX().
#define FMT_CSE "CSE #%02u"
public:
void optOptimizeValnumCSEs();
protected:
void optValnumCSE_Init();
unsigned optValnumCSE_Index(GenTree* tree, Statement* stmt);
bool optValnumCSE_Locate();
void optValnumCSE_InitDataFlow();
void optValnumCSE_DataFlow();
void optValnumCSE_Availablity();
void optValnumCSE_Heuristic();
bool optDoCSE; // True when we have found a duplicate CSE tree
bool optValnumCSE_phase; // True when we are executing the optOptimizeValnumCSEs() phase
unsigned optCSECandidateCount; // Count of CSE's candidates
unsigned optCSEstart; // The first local variable number that is a CSE
unsigned optCSEcount; // The total count of CSE's introduced.
weight_t optCSEweight; // The weight of the current block when we are doing PerformCSE
bool optIsCSEcandidate(GenTree* tree);
// lclNumIsTrueCSE returns true if the LclVar was introduced by the CSE phase of the compiler
//
bool lclNumIsTrueCSE(unsigned lclNum) const
{
return ((optCSEcount > 0) && (lclNum >= optCSEstart) && (lclNum < optCSEstart + optCSEcount));
}
// lclNumIsCSE returns true if the LclVar should be treated like a CSE with regards to constant prop.
//
bool lclNumIsCSE(unsigned lclNum) const
{
return lvaGetDesc(lclNum)->lvIsCSE;
}
#ifdef DEBUG
bool optConfigDisableCSE();
bool optConfigDisableCSE2();
#endif
void optOptimizeCSEs();
struct isVarAssgDsc
{
GenTree* ivaSkip;
ALLVARSET_TP ivaMaskVal; // Set of variables assigned to. This is a set of all vars, not tracked vars.
#ifdef DEBUG
void* ivaSelf;
#endif
unsigned ivaVar; // Variable we are interested in, or -1
varRefKinds ivaMaskInd; // What kind of indirect assignments are there?
callInterf ivaMaskCall; // What kind of calls are there?
bool ivaMaskIncomplete; // Variables not representable in ivaMaskVal were assigned to.
};
static callInterf optCallInterf(GenTreeCall* call);
public:
// VN based copy propagation.
// In DEBUG builds, we'd like to know the tree that the SSA definition was pushed for.
// While for ordinary SSA defs it will be available (as an ASG) in the SSA descriptor,
// for locals which will use "definitions from uses", it will not be, so we store it
// in this class instead.
class CopyPropSsaDef
{
LclSsaVarDsc* m_ssaDef;
#ifdef DEBUG
GenTree* m_defNode;
#endif
public:
CopyPropSsaDef(LclSsaVarDsc* ssaDef, GenTree* defNode)
: m_ssaDef(ssaDef)
#ifdef DEBUG
, m_defNode(defNode)
#endif
{
}
LclSsaVarDsc* GetSsaDef() const
{
return m_ssaDef;
}
#ifdef DEBUG
GenTree* GetDefNode() const
{
return m_defNode;
}
#endif
};
typedef ArrayStack<CopyPropSsaDef> CopyPropSsaDefStack;
typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, CopyPropSsaDefStack*> LclNumToLiveDefsMap;
// Copy propagation functions.
void optCopyProp(Statement* stmt, GenTreeLclVarCommon* tree, unsigned lclNum, LclNumToLiveDefsMap* curSsaName);
void optBlockCopyPropPopStacks(BasicBlock* block, LclNumToLiveDefsMap* curSsaName);
void optBlockCopyProp(BasicBlock* block, LclNumToLiveDefsMap* curSsaName);
void optCopyPropPushDef(GenTree* defNode,
GenTreeLclVarCommon* lclNode,
unsigned lclNum,
LclNumToLiveDefsMap* curSsaName);
unsigned optIsSsaLocal(GenTreeLclVarCommon* lclNode);
int optCopyProp_LclVarScore(const LclVarDsc* lclVarDsc, const LclVarDsc* copyVarDsc, bool preferOp2);
void optVnCopyProp();
INDEBUG(void optDumpCopyPropStack(LclNumToLiveDefsMap* curSsaName));
/**************************************************************************
* Early value propagation
*************************************************************************/
struct SSAName
{
unsigned m_lvNum;
unsigned m_ssaNum;
SSAName(unsigned lvNum, unsigned ssaNum) : m_lvNum(lvNum), m_ssaNum(ssaNum)
{
}
static unsigned GetHashCode(SSAName ssaNm)
{
return (ssaNm.m_lvNum << 16) | (ssaNm.m_ssaNum);
}
static bool Equals(SSAName ssaNm1, SSAName ssaNm2)
{
return (ssaNm1.m_lvNum == ssaNm2.m_lvNum) && (ssaNm1.m_ssaNum == ssaNm2.m_ssaNum);
}
};
#define OMF_HAS_NEWARRAY 0x00000001 // Method contains 'new' of an array
#define OMF_HAS_NEWOBJ 0x00000002 // Method contains 'new' of an object type.
#define OMF_HAS_ARRAYREF 0x00000004 // Method contains array element loads or stores.
#define OMF_HAS_NULLCHECK 0x00000008 // Method contains null check.
#define OMF_HAS_FATPOINTER 0x00000010 // Method contains call, that needs fat pointer transformation.
#define OMF_HAS_OBJSTACKALLOC 0x00000020 // Method contains an object allocated on the stack.
#define OMF_HAS_GUARDEDDEVIRT 0x00000040 // Method contains guarded devirtualization candidate
#define OMF_HAS_EXPRUNTIMELOOKUP 0x00000080 // Method contains a runtime lookup to an expandable dictionary.
#define OMF_HAS_PATCHPOINT 0x00000100 // Method contains patchpoints
#define OMF_NEEDS_GCPOLLS 0x00000200 // Method needs GC polls
#define OMF_HAS_FROZEN_STRING 0x00000400 // Method has a frozen string (REF constant int), currently only on CoreRT.
#define OMF_HAS_PARTIAL_COMPILATION_PATCHPOINT 0x00000800 // Method contains partial compilation patchpoints
#define OMF_HAS_TAILCALL_SUCCESSOR 0x00001000 // Method has potential tail call in a non BBJ_RETURN block
bool doesMethodHaveFatPointer()
{
return (optMethodFlags & OMF_HAS_FATPOINTER) != 0;
}
void setMethodHasFatPointer()
{
optMethodFlags |= OMF_HAS_FATPOINTER;
}
void clearMethodHasFatPointer()
{
optMethodFlags &= ~OMF_HAS_FATPOINTER;
}
void addFatPointerCandidate(GenTreeCall* call);
bool doesMethodHaveFrozenString() const
{
return (optMethodFlags & OMF_HAS_FROZEN_STRING) != 0;
}
void setMethodHasFrozenString()
{
optMethodFlags |= OMF_HAS_FROZEN_STRING;
}
bool doesMethodHaveGuardedDevirtualization() const
{
return (optMethodFlags & OMF_HAS_GUARDEDDEVIRT) != 0;
}
void setMethodHasGuardedDevirtualization()
{
optMethodFlags |= OMF_HAS_GUARDEDDEVIRT;
}
void clearMethodHasGuardedDevirtualization()
{
optMethodFlags &= ~OMF_HAS_GUARDEDDEVIRT;
}
void considerGuardedDevirtualization(GenTreeCall* call,
IL_OFFSET ilOffset,
bool isInterface,
CORINFO_METHOD_HANDLE baseMethod,
CORINFO_CLASS_HANDLE baseClass,
CORINFO_CONTEXT_HANDLE* pContextHandle DEBUGARG(CORINFO_CLASS_HANDLE objClass)
DEBUGARG(const char* objClassName));
void addGuardedDevirtualizationCandidate(GenTreeCall* call,
CORINFO_METHOD_HANDLE methodHandle,
CORINFO_CLASS_HANDLE classHandle,
unsigned methodAttr,
unsigned classAttr,
unsigned likelihood);
bool doesMethodHaveExpRuntimeLookup()
{
return (optMethodFlags & OMF_HAS_EXPRUNTIMELOOKUP) != 0;
}
void setMethodHasExpRuntimeLookup()
{
optMethodFlags |= OMF_HAS_EXPRUNTIMELOOKUP;
}
void clearMethodHasExpRuntimeLookup()
{
optMethodFlags &= ~OMF_HAS_EXPRUNTIMELOOKUP;
}
void addExpRuntimeLookupCandidate(GenTreeCall* call);
bool doesMethodHavePatchpoints()
{
return (optMethodFlags & OMF_HAS_PATCHPOINT) != 0;
}
void setMethodHasPatchpoint()
{
optMethodFlags |= OMF_HAS_PATCHPOINT;
}
bool doesMethodHavePartialCompilationPatchpoints()
{
return (optMethodFlags & OMF_HAS_PARTIAL_COMPILATION_PATCHPOINT) != 0;
}
void setMethodHasPartialCompilationPatchpoint()
{
optMethodFlags |= OMF_HAS_PARTIAL_COMPILATION_PATCHPOINT;
}
unsigned optMethodFlags;
bool doesMethodHaveNoReturnCalls()
{
return optNoReturnCallCount > 0;
}
void setMethodHasNoReturnCalls()
{
optNoReturnCallCount++;
}
unsigned optNoReturnCallCount;
// Recursion bound controls how far we can go backwards tracking for a SSA value.
// No throughput diff was found with backward walk bound between 3-8.
static const int optEarlyPropRecurBound = 5;
enum class optPropKind
{
OPK_INVALID,
OPK_ARRAYLEN,
OPK_NULLCHECK
};
typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, GenTree*> LocalNumberToNullCheckTreeMap;
GenTree* getArrayLengthFromAllocation(GenTree* tree DEBUGARG(BasicBlock* block));
GenTree* optPropGetValueRec(unsigned lclNum, unsigned ssaNum, optPropKind valueKind, int walkDepth);
GenTree* optPropGetValue(unsigned lclNum, unsigned ssaNum, optPropKind valueKind);
GenTree* optEarlyPropRewriteTree(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap);
bool optDoEarlyPropForBlock(BasicBlock* block);
bool optDoEarlyPropForFunc();
void optEarlyProp();
void optFoldNullCheck(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap);
GenTree* optFindNullCheckToFold(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap);
bool optIsNullCheckFoldingLegal(GenTree* tree,
GenTree* nullCheckTree,
GenTree** nullCheckParent,
Statement** nullCheckStmt);
bool optCanMoveNullCheckPastTree(GenTree* tree,
unsigned nullCheckLclNum,
bool isInsideTry,
bool checkSideEffectSummary);
#if DEBUG
void optCheckFlagsAreSet(unsigned methodFlag,
const char* methodFlagStr,
unsigned bbFlag,
const char* bbFlagStr,
GenTree* tree,
BasicBlock* basicBlock);
#endif
// Redundant branch opts
//
PhaseStatus optRedundantBranches();
bool optRedundantRelop(BasicBlock* const block);
bool optRedundantBranch(BasicBlock* const block);
bool optJumpThread(BasicBlock* const block, BasicBlock* const domBlock, bool domIsSameRelop);
bool optReachable(BasicBlock* const fromBlock, BasicBlock* const toBlock, BasicBlock* const excludedBlock);
/**************************************************************************
* Value/Assertion propagation
*************************************************************************/
public:
// Data structures for assertion prop
BitVecTraits* apTraits;
ASSERT_TP apFull;
enum optAssertionKind
{
OAK_INVALID,
OAK_EQUAL,
OAK_NOT_EQUAL,
OAK_SUBRANGE,
OAK_NO_THROW,
OAK_COUNT
};
enum optOp1Kind
{
O1K_INVALID,
O1K_LCLVAR,
O1K_ARR_BND,
O1K_BOUND_OPER_BND,
O1K_BOUND_LOOP_BND,
O1K_CONSTANT_LOOP_BND,
O1K_CONSTANT_LOOP_BND_UN,
O1K_EXACT_TYPE,
O1K_SUBTYPE,
O1K_VALUE_NUMBER,
O1K_COUNT
};
enum optOp2Kind
{
O2K_INVALID,
O2K_LCLVAR_COPY,
O2K_IND_CNS_INT,
O2K_CONST_INT,
O2K_CONST_LONG,
O2K_CONST_DOUBLE,
O2K_ZEROOBJ,
O2K_SUBRANGE,
O2K_COUNT
};
struct AssertionDsc
{
optAssertionKind assertionKind;
struct SsaVar
{
unsigned lclNum; // assigned to or property of this local var number
unsigned ssaNum;
};
struct ArrBnd
{
ValueNum vnIdx;
ValueNum vnLen;
};
struct AssertionDscOp1
{
optOp1Kind kind; // a normal LclVar, or Exact-type or Subtype
ValueNum vn;
union {
SsaVar lcl;
ArrBnd bnd;
};
} op1;
struct AssertionDscOp2
{
optOp2Kind kind; // a const or copy assignment
ValueNum vn;
struct IntVal
{
ssize_t iconVal; // integer
#if !defined(HOST_64BIT)
unsigned padding; // unused; ensures iconFlags does not overlap lconVal
#endif
GenTreeFlags iconFlags; // gtFlags
};
union {
struct
{
SsaVar lcl;
FieldSeqNode* zeroOffsetFieldSeq;
};
IntVal u1;
__int64 lconVal;
double dconVal;
IntegralRange u2;
};
} op2;
bool IsCheckedBoundArithBound()
{
return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) && op1.kind == O1K_BOUND_OPER_BND);
}
bool IsCheckedBoundBound()
{
return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) && op1.kind == O1K_BOUND_LOOP_BND);
}
bool IsConstantBound()
{
return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) &&
(op1.kind == O1K_CONSTANT_LOOP_BND));
}
bool IsConstantBoundUnsigned()
{
return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) &&
(op1.kind == O1K_CONSTANT_LOOP_BND_UN));
}
bool IsBoundsCheckNoThrow()
{
return ((assertionKind == OAK_NO_THROW) && (op1.kind == O1K_ARR_BND));
}
bool IsCopyAssertion()
{
return ((assertionKind == OAK_EQUAL) && (op1.kind == O1K_LCLVAR) && (op2.kind == O2K_LCLVAR_COPY));
}
bool IsConstantInt32Assertion()
{
return ((assertionKind == OAK_EQUAL) || (assertionKind == OAK_NOT_EQUAL)) && (op2.kind == O2K_CONST_INT);
}
static bool SameKind(AssertionDsc* a1, AssertionDsc* a2)
{
return a1->assertionKind == a2->assertionKind && a1->op1.kind == a2->op1.kind &&
a1->op2.kind == a2->op2.kind;
}
static bool ComplementaryKind(optAssertionKind kind, optAssertionKind kind2)
{
if (kind == OAK_EQUAL)
{
return kind2 == OAK_NOT_EQUAL;
}
else if (kind == OAK_NOT_EQUAL)
{
return kind2 == OAK_EQUAL;
}
return false;
}
bool HasSameOp1(AssertionDsc* that, bool vnBased)
{
if (op1.kind != that->op1.kind)
{
return false;
}
else if (op1.kind == O1K_ARR_BND)
{
assert(vnBased);
return (op1.bnd.vnIdx == that->op1.bnd.vnIdx) && (op1.bnd.vnLen == that->op1.bnd.vnLen);
}
else
{
return ((vnBased && (op1.vn == that->op1.vn)) ||
(!vnBased && (op1.lcl.lclNum == that->op1.lcl.lclNum)));
}
}
bool HasSameOp2(AssertionDsc* that, bool vnBased)
{
if (op2.kind != that->op2.kind)
{
return false;
}
switch (op2.kind)
{
case O2K_IND_CNS_INT:
case O2K_CONST_INT:
return ((op2.u1.iconVal == that->op2.u1.iconVal) && (op2.u1.iconFlags == that->op2.u1.iconFlags));
case O2K_CONST_LONG:
return (op2.lconVal == that->op2.lconVal);
case O2K_CONST_DOUBLE:
// exact match because of positive and negative zero.
return (memcmp(&op2.dconVal, &that->op2.dconVal, sizeof(double)) == 0);
case O2K_ZEROOBJ:
return true;
case O2K_LCLVAR_COPY:
return (op2.lcl.lclNum == that->op2.lcl.lclNum) &&
(!vnBased || op2.lcl.ssaNum == that->op2.lcl.ssaNum) &&
(op2.zeroOffsetFieldSeq == that->op2.zeroOffsetFieldSeq);
case O2K_SUBRANGE:
return op2.u2.Equals(that->op2.u2);
case O2K_INVALID:
// we will return false
break;
default:
assert(!"Unexpected value for op2.kind in AssertionDsc.");
break;
}
return false;
}
bool Complementary(AssertionDsc* that, bool vnBased)
{
return ComplementaryKind(assertionKind, that->assertionKind) && HasSameOp1(that, vnBased) &&
HasSameOp2(that, vnBased);
}
bool Equals(AssertionDsc* that, bool vnBased)
{
if (assertionKind != that->assertionKind)
{
return false;
}
else if (assertionKind == OAK_NO_THROW)
{
assert(op2.kind == O2K_INVALID);
return HasSameOp1(that, vnBased);
}
else
{
return HasSameOp1(that, vnBased) && HasSameOp2(that, vnBased);
}
}
};
protected:
static fgWalkPreFn optAddCopiesCallback;
static fgWalkPreFn optVNAssertionPropCurStmtVisitor;
unsigned optAddCopyLclNum;
GenTree* optAddCopyAsgnNode;
bool optLocalAssertionProp; // indicates that we are performing local assertion prop
bool optAssertionPropagated; // set to true if we modified the trees
bool optAssertionPropagatedCurrentStmt;
#ifdef DEBUG
GenTree* optAssertionPropCurrentTree;
#endif
AssertionIndex* optComplementaryAssertionMap;
JitExpandArray<ASSERT_TP>* optAssertionDep; // table that holds dependent assertions (assertions
// using the value of a local var) for each local var
AssertionDsc* optAssertionTabPrivate; // table that holds info about value assignments
AssertionIndex optAssertionCount; // total number of assertions in the assertion table
AssertionIndex optMaxAssertionCount;
public:
void optVnNonNullPropCurStmt(BasicBlock* block, Statement* stmt, GenTree* tree);
fgWalkResult optVNConstantPropCurStmt(BasicBlock* block, Statement* stmt, GenTree* tree);
GenTree* optVNConstantPropOnJTrue(BasicBlock* block, GenTree* test);
GenTree* optVNConstantPropOnTree(BasicBlock* block, GenTree* tree);
GenTree* optExtractSideEffListFromConst(GenTree* tree);
AssertionIndex GetAssertionCount()
{
return optAssertionCount;
}
ASSERT_TP* bbJtrueAssertionOut;
typedef JitHashTable<ValueNum, JitSmallPrimitiveKeyFuncs<ValueNum>, ASSERT_TP> ValueNumToAssertsMap;
ValueNumToAssertsMap* optValueNumToAsserts;
// Assertion prop helpers.
ASSERT_TP& GetAssertionDep(unsigned lclNum);
AssertionDsc* optGetAssertion(AssertionIndex assertIndex);
void optAssertionInit(bool isLocalProp);
void optAssertionTraitsInit(AssertionIndex assertionCount);
void optAssertionReset(AssertionIndex limit);
void optAssertionRemove(AssertionIndex index);
// Assertion prop data flow functions.
void optAssertionPropMain();
Statement* optVNAssertionPropCurStmt(BasicBlock* block, Statement* stmt);
bool optIsTreeKnownIntValue(bool vnBased, GenTree* tree, ssize_t* pConstant, GenTreeFlags* pIconFlags);
ASSERT_TP* optInitAssertionDataflowFlags();
ASSERT_TP* optComputeAssertionGen();
// Assertion Gen functions.
void optAssertionGen(GenTree* tree);
AssertionIndex optAssertionGenCast(GenTreeCast* cast);
AssertionIndex optAssertionGenPhiDefn(GenTree* tree);
AssertionInfo optCreateJTrueBoundsAssertion(GenTree* tree);
AssertionInfo optAssertionGenJtrue(GenTree* tree);
AssertionIndex optCreateJtrueAssertions(GenTree* op1,
GenTree* op2,
Compiler::optAssertionKind assertionKind,
bool helperCallArgs = false);
AssertionIndex optFindComplementary(AssertionIndex assertionIndex);
void optMapComplementary(AssertionIndex assertionIndex, AssertionIndex index);
// Assertion creation functions.
AssertionIndex optCreateAssertion(GenTree* op1,
GenTree* op2,
optAssertionKind assertionKind,
bool helperCallArgs = false);
AssertionIndex optFinalizeCreatingAssertion(AssertionDsc* assertion);
bool optTryExtractSubrangeAssertion(GenTree* source, IntegralRange* pRange);
void optCreateComplementaryAssertion(AssertionIndex assertionIndex,
GenTree* op1,
GenTree* op2,
bool helperCallArgs = false);
bool optAssertionVnInvolvesNan(AssertionDsc* assertion);
AssertionIndex optAddAssertion(AssertionDsc* assertion);
void optAddVnAssertionMapping(ValueNum vn, AssertionIndex index);
#ifdef DEBUG
void optPrintVnAssertionMapping();
#endif
ASSERT_TP optGetVnMappedAssertions(ValueNum vn);
// Used for respective assertion propagations.
AssertionIndex optAssertionIsSubrange(GenTree* tree, IntegralRange range, ASSERT_VALARG_TP assertions);
AssertionIndex optAssertionIsSubtype(GenTree* tree, GenTree* methodTableArg, ASSERT_VALARG_TP assertions);
AssertionIndex optAssertionIsNonNullInternal(GenTree* op, ASSERT_VALARG_TP assertions DEBUGARG(bool* pVnBased));
bool optAssertionIsNonNull(GenTree* op,
ASSERT_VALARG_TP assertions DEBUGARG(bool* pVnBased) DEBUGARG(AssertionIndex* pIndex));
AssertionIndex optGlobalAssertionIsEqualOrNotEqual(ASSERT_VALARG_TP assertions, GenTree* op1, GenTree* op2);
AssertionIndex optGlobalAssertionIsEqualOrNotEqualZero(ASSERT_VALARG_TP assertions, GenTree* op1);
AssertionIndex optLocalAssertionIsEqualOrNotEqual(
optOp1Kind op1Kind, unsigned lclNum, optOp2Kind op2Kind, ssize_t cnsVal, ASSERT_VALARG_TP assertions);
// Assertion prop for lcl var functions.
bool optAssertionProp_LclVarTypeCheck(GenTree* tree, LclVarDsc* lclVarDsc, LclVarDsc* copyVarDsc);
GenTree* optCopyAssertionProp(AssertionDsc* curAssertion,
GenTreeLclVarCommon* tree,
Statement* stmt DEBUGARG(AssertionIndex index));
GenTree* optConstantAssertionProp(AssertionDsc* curAssertion,
GenTreeLclVarCommon* tree,
Statement* stmt DEBUGARG(AssertionIndex index));
bool optZeroObjAssertionProp(GenTree* tree, ASSERT_VALARG_TP assertions);
// Assertion propagation functions.
GenTree* optAssertionProp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt, BasicBlock* block);
GenTree* optAssertionProp_LclVar(ASSERT_VALARG_TP assertions, GenTreeLclVarCommon* tree, Statement* stmt);
GenTree* optAssertionProp_Asg(ASSERT_VALARG_TP assertions, GenTreeOp* asg, Statement* stmt);
GenTree* optAssertionProp_Return(ASSERT_VALARG_TP assertions, GenTreeUnOp* ret, Statement* stmt);
GenTree* optAssertionProp_Ind(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt);
GenTree* optAssertionProp_Cast(ASSERT_VALARG_TP assertions, GenTreeCast* cast, Statement* stmt);
GenTree* optAssertionProp_Call(ASSERT_VALARG_TP assertions, GenTreeCall* call, Statement* stmt);
GenTree* optAssertionProp_RelOp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt);
GenTree* optAssertionProp_Comma(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt);
GenTree* optAssertionProp_BndsChk(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt);
GenTree* optAssertionPropGlobal_RelOp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt);
GenTree* optAssertionPropLocal_RelOp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt);
GenTree* optAssertionProp_Update(GenTree* newTree, GenTree* tree, Statement* stmt);
GenTree* optNonNullAssertionProp_Call(ASSERT_VALARG_TP assertions, GenTreeCall* call);
// Implied assertion functions.
void optImpliedAssertions(AssertionIndex assertionIndex, ASSERT_TP& activeAssertions);
void optImpliedByTypeOfAssertions(ASSERT_TP& activeAssertions);
void optImpliedByCopyAssertion(AssertionDsc* copyAssertion, AssertionDsc* depAssertion, ASSERT_TP& result);
void optImpliedByConstAssertion(AssertionDsc* curAssertion, ASSERT_TP& result);
#ifdef DEBUG
void optPrintAssertion(AssertionDsc* newAssertion, AssertionIndex assertionIndex = 0);
void optPrintAssertionIndex(AssertionIndex index);
void optPrintAssertionIndices(ASSERT_TP assertions);
void optDebugCheckAssertion(AssertionDsc* assertion);
void optDebugCheckAssertions(AssertionIndex AssertionIndex);
#endif
static void optDumpAssertionIndices(const char* header, ASSERT_TP assertions, const char* footer = nullptr);
static void optDumpAssertionIndices(ASSERT_TP assertions, const char* footer = nullptr);
void optAddCopies();
/**************************************************************************
* Range checks
*************************************************************************/
public:
struct LoopCloneVisitorInfo
{
LoopCloneContext* context;
unsigned loopNum;
Statement* stmt;
LoopCloneVisitorInfo(LoopCloneContext* context, unsigned loopNum, Statement* stmt)
: context(context), loopNum(loopNum), stmt(nullptr)
{
}
};
bool optIsStackLocalInvariant(unsigned loopNum, unsigned lclNum);
bool optExtractArrIndex(GenTree* tree, ArrIndex* result, unsigned lhsNum);
bool optReconstructArrIndex(GenTree* tree, ArrIndex* result, unsigned lhsNum);
bool optIdentifyLoopOptInfo(unsigned loopNum, LoopCloneContext* context);
static fgWalkPreFn optCanOptimizeByLoopCloningVisitor;
fgWalkResult optCanOptimizeByLoopCloning(GenTree* tree, LoopCloneVisitorInfo* info);
bool optObtainLoopCloningOpts(LoopCloneContext* context);
bool optIsLoopClonable(unsigned loopInd);
bool optLoopCloningEnabled();
#ifdef DEBUG
void optDebugLogLoopCloning(BasicBlock* block, Statement* insertBefore);
#endif
void optPerformStaticOptimizations(unsigned loopNum, LoopCloneContext* context DEBUGARG(bool fastPath));
bool optComputeDerefConditions(unsigned loopNum, LoopCloneContext* context);
bool optDeriveLoopCloningConditions(unsigned loopNum, LoopCloneContext* context);
BasicBlock* optInsertLoopChoiceConditions(LoopCloneContext* context,
unsigned loopNum,
BasicBlock* slowHead,
BasicBlock* insertAfter);
protected:
ssize_t optGetArrayRefScaleAndIndex(GenTree* mul, GenTree** pIndex DEBUGARG(bool bRngChk));
bool optReachWithoutCall(BasicBlock* srcBB, BasicBlock* dstBB);
protected:
bool optLoopsMarked;
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX RegAlloc XX
XX XX
XX Does the register allocation and puts the remaining lclVars on the stack XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
public:
regNumber raUpdateRegStateForArg(RegState* regState, LclVarDsc* argDsc);
void raMarkStkVars();
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
#if defined(TARGET_AMD64)
static bool varTypeNeedsPartialCalleeSave(var_types type)
{
assert(type != TYP_STRUCT);
return (type == TYP_SIMD32);
}
#elif defined(TARGET_ARM64)
static bool varTypeNeedsPartialCalleeSave(var_types type)
{
assert(type != TYP_STRUCT);
// ARM64 ABI FP Callee save registers only require Callee to save lower 8 Bytes
// For SIMD types longer than 8 bytes Caller is responsible for saving and restoring Upper bytes.
return ((type == TYP_SIMD16) || (type == TYP_SIMD12));
}
#else // !defined(TARGET_AMD64) && !defined(TARGET_ARM64)
#error("Unknown target architecture for FEATURE_SIMD")
#endif // !defined(TARGET_AMD64) && !defined(TARGET_ARM64)
#endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE
protected:
// Some things are used by both LSRA and regpredict allocators.
FrameType rpFrameType;
bool rpMustCreateEBPCalled; // Set to true after we have called rpMustCreateEBPFrame once
bool rpMustCreateEBPFrame(INDEBUG(const char** wbReason));
private:
Lowering* m_pLowering; // Lowering; needed to Lower IR that's added or modified after Lowering.
LinearScanInterface* m_pLinearScan; // Linear Scan allocator
/* raIsVarargsStackArg is called by raMaskStkVars and by
lvaComputeRefCounts. It identifies the special case
where a varargs function has a parameter passed on the
stack, other than the special varargs handle. Such parameters
require special treatment, because they cannot be tracked
by the GC (their offsets in the stack are not known
at compile time).
*/
bool raIsVarargsStackArg(unsigned lclNum)
{
#ifdef TARGET_X86
LclVarDsc* varDsc = lvaGetDesc(lclNum);
assert(varDsc->lvIsParam);
return (info.compIsVarArgs && !varDsc->lvIsRegArg && (lclNum != lvaVarargsHandleArg));
#else // TARGET_X86
return false;
#endif // TARGET_X86
}
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX EEInterface XX
XX XX
XX Get to the class and method info from the Execution Engine given XX
XX tokens for the class and method XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
public:
// Get handles
void eeGetCallInfo(CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_RESOLVED_TOKEN* pConstrainedToken,
CORINFO_CALLINFO_FLAGS flags,
CORINFO_CALL_INFO* pResult);
void eeGetFieldInfo(CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_ACCESS_FLAGS flags,
CORINFO_FIELD_INFO* pResult);
// Get the flags
bool eeIsValueClass(CORINFO_CLASS_HANDLE clsHnd);
bool eeIsIntrinsic(CORINFO_METHOD_HANDLE ftn);
bool eeIsFieldStatic(CORINFO_FIELD_HANDLE fldHnd);
var_types eeGetFieldType(CORINFO_FIELD_HANDLE fldHnd, CORINFO_CLASS_HANDLE* pStructHnd = nullptr);
#if defined(DEBUG) || defined(FEATURE_JIT_METHOD_PERF) || defined(FEATURE_SIMD) || defined(TRACK_LSRA_STATS)
const char* eeGetMethodName(CORINFO_METHOD_HANDLE hnd, const char** className);
const char* eeGetMethodFullName(CORINFO_METHOD_HANDLE hnd);
unsigned compMethodHash(CORINFO_METHOD_HANDLE methodHandle);
bool eeIsNativeMethod(CORINFO_METHOD_HANDLE method);
CORINFO_METHOD_HANDLE eeGetMethodHandleForNative(CORINFO_METHOD_HANDLE method);
#endif
var_types eeGetArgType(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig);
var_types eeGetArgType(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig, bool* isPinned);
CORINFO_CLASS_HANDLE eeGetArgClass(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE list);
CORINFO_CLASS_HANDLE eeGetClassFromContext(CORINFO_CONTEXT_HANDLE context);
unsigned eeGetArgSize(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig);
static unsigned eeGetArgSizeAlignment(var_types type, bool isFloatHfa);
// VOM info, method sigs
void eeGetSig(unsigned sigTok,
CORINFO_MODULE_HANDLE scope,
CORINFO_CONTEXT_HANDLE context,
CORINFO_SIG_INFO* retSig);
void eeGetCallSiteSig(unsigned sigTok,
CORINFO_MODULE_HANDLE scope,
CORINFO_CONTEXT_HANDLE context,
CORINFO_SIG_INFO* retSig);
void eeGetMethodSig(CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* retSig, CORINFO_CLASS_HANDLE owner = nullptr);
// Method entry-points, instrs
CORINFO_METHOD_HANDLE eeMarkNativeTarget(CORINFO_METHOD_HANDLE method);
CORINFO_EE_INFO eeInfo;
bool eeInfoInitialized;
CORINFO_EE_INFO* eeGetEEInfo();
// Gets the offset of a SDArray's first element
static unsigned eeGetArrayDataOffset();
// Get the offset of a MDArray's first element
static unsigned eeGetMDArrayDataOffset(unsigned rank);
// Get the offset of a MDArray's dimension length for a given dimension.
static unsigned eeGetMDArrayLengthOffset(unsigned rank, unsigned dimension);
// Get the offset of a MDArray's lower bound for a given dimension.
static unsigned eeGetMDArrayLowerBoundOffset(unsigned rank, unsigned dimension);
GenTree* eeGetPInvokeCookie(CORINFO_SIG_INFO* szMetaSig);
// Returns the page size for the target machine as reported by the EE.
target_size_t eeGetPageSize()
{
return (target_size_t)eeGetEEInfo()->osPageSize;
}
//------------------------------------------------------------------------
// VirtualStubParam: virtual stub dispatch extra parameter (slot address).
//
// It represents Abi and target specific registers for the parameter.
//
class VirtualStubParamInfo
{
public:
VirtualStubParamInfo(bool isCoreRTABI)
{
#if defined(TARGET_X86)
reg = REG_EAX;
regMask = RBM_EAX;
#elif defined(TARGET_AMD64)
if (isCoreRTABI)
{
reg = REG_R10;
regMask = RBM_R10;
}
else
{
reg = REG_R11;
regMask = RBM_R11;
}
#elif defined(TARGET_ARM)
if (isCoreRTABI)
{
reg = REG_R12;
regMask = RBM_R12;
}
else
{
reg = REG_R4;
regMask = RBM_R4;
}
#elif defined(TARGET_ARM64)
reg = REG_R11;
regMask = RBM_R11;
#else
#error Unsupported or unset target architecture
#endif
}
regNumber GetReg() const
{
return reg;
}
_regMask_enum GetRegMask() const
{
return regMask;
}
private:
regNumber reg;
_regMask_enum regMask;
};
VirtualStubParamInfo* virtualStubParamInfo;
bool IsTargetAbi(CORINFO_RUNTIME_ABI abi)
{
return eeGetEEInfo()->targetAbi == abi;
}
bool generateCFIUnwindCodes()
{
#if defined(FEATURE_CFI_SUPPORT)
return TargetOS::IsUnix && IsTargetAbi(CORINFO_CORERT_ABI);
#else
return false;
#endif
}
// Debugging support - Line number info
void eeGetStmtOffsets();
unsigned eeBoundariesCount;
ICorDebugInfo::OffsetMapping* eeBoundaries; // Boundaries to report to the EE
void eeSetLIcount(unsigned count);
void eeSetLIinfo(unsigned which, UNATIVE_OFFSET offs, IPmappingDscKind kind, const ILLocation& loc);
void eeSetLIdone();
#ifdef DEBUG
static void eeDispILOffs(IL_OFFSET offs);
static void eeDispSourceMappingOffs(uint32_t offs);
static void eeDispLineInfo(const ICorDebugInfo::OffsetMapping* line);
void eeDispLineInfos();
#endif // DEBUG
// Debugging support - Local var info
void eeGetVars();
unsigned eeVarsCount;
struct VarResultInfo
{
UNATIVE_OFFSET startOffset;
UNATIVE_OFFSET endOffset;
DWORD varNumber;
CodeGenInterface::siVarLoc loc;
} * eeVars;
void eeSetLVcount(unsigned count);
void eeSetLVinfo(unsigned which,
UNATIVE_OFFSET startOffs,
UNATIVE_OFFSET length,
unsigned varNum,
const CodeGenInterface::siVarLoc& loc);
void eeSetLVdone();
#ifdef DEBUG
void eeDispVar(ICorDebugInfo::NativeVarInfo* var);
void eeDispVars(CORINFO_METHOD_HANDLE ftn, ULONG32 cVars, ICorDebugInfo::NativeVarInfo* vars);
#endif // DEBUG
// ICorJitInfo wrappers
void eeReserveUnwindInfo(bool isFunclet, bool isColdCode, ULONG unwindSize);
void eeAllocUnwindInfo(BYTE* pHotCode,
BYTE* pColdCode,
ULONG startOffset,
ULONG endOffset,
ULONG unwindSize,
BYTE* pUnwindBlock,
CorJitFuncKind funcKind);
void eeSetEHcount(unsigned cEH);
void eeSetEHinfo(unsigned EHnumber, const CORINFO_EH_CLAUSE* clause);
WORD eeGetRelocTypeHint(void* target);
// ICorStaticInfo wrapper functions
bool eeTryResolveToken(CORINFO_RESOLVED_TOKEN* resolvedToken);
#if defined(UNIX_AMD64_ABI)
#ifdef DEBUG
static void dumpSystemVClassificationType(SystemVClassificationType ct);
#endif // DEBUG
void eeGetSystemVAmd64PassStructInRegisterDescriptor(
/*IN*/ CORINFO_CLASS_HANDLE structHnd,
/*OUT*/ SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* structPassInRegDescPtr);
#endif // UNIX_AMD64_ABI
template <typename ParamType>
bool eeRunWithErrorTrap(void (*function)(ParamType*), ParamType* param)
{
return eeRunWithErrorTrapImp(reinterpret_cast<void (*)(void*)>(function), reinterpret_cast<void*>(param));
}
bool eeRunWithErrorTrapImp(void (*function)(void*), void* param);
template <typename ParamType>
bool eeRunWithSPMIErrorTrap(void (*function)(ParamType*), ParamType* param)
{
return eeRunWithSPMIErrorTrapImp(reinterpret_cast<void (*)(void*)>(function), reinterpret_cast<void*>(param));
}
bool eeRunWithSPMIErrorTrapImp(void (*function)(void*), void* param);
// Utility functions
const char* eeGetFieldName(CORINFO_FIELD_HANDLE fieldHnd, const char** classNamePtr = nullptr);
#if defined(DEBUG)
const WCHAR* eeGetCPString(size_t stringHandle);
unsigned eeTryGetClassSize(CORINFO_CLASS_HANDLE clsHnd);
const char16_t* eeGetShortClassName(CORINFO_CLASS_HANDLE clsHnd);
#endif
const char* eeGetClassName(CORINFO_CLASS_HANDLE clsHnd);
static CORINFO_METHOD_HANDLE eeFindHelper(unsigned helper);
static CorInfoHelpFunc eeGetHelperNum(CORINFO_METHOD_HANDLE method);
static bool IsSharedStaticHelper(GenTree* tree);
static bool IsGcSafePoint(GenTreeCall* call);
static CORINFO_FIELD_HANDLE eeFindJitDataOffs(unsigned jitDataOffs);
// returns true/false if 'field' is a Jit Data offset
static bool eeIsJitDataOffs(CORINFO_FIELD_HANDLE field);
// returns a number < 0 if 'field' is not a Jit Data offset, otherwise the data offset (limited to 2GB)
static int eeGetJitDataOffs(CORINFO_FIELD_HANDLE field);
/*****************************************************************************/
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX CodeGenerator XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
public:
CodeGenInterface* codeGen;
// Record the instr offset mapping to the generated code
jitstd::list<IPmappingDsc> genIPmappings;
#ifdef DEBUG
jitstd::list<PreciseIPMapping> genPreciseIPmappings;
#endif
// Managed RetVal - A side hash table meant to record the mapping from a
// GT_CALL node to its debug info. This info is used to emit sequence points
// that can be used by debugger to determine the native offset at which the
// managed RetVal will be available.
//
// In fact we can store debug info in a GT_CALL node. This was ruled out in
// favor of a side table for two reasons: 1) We need debug info for only those
// GT_CALL nodes (created during importation) that correspond to an IL call and
// whose return type is other than TYP_VOID. 2) GT_CALL node is a frequently used
// structure and IL offset is needed only when generating debuggable code. Therefore
// it is desirable to avoid memory size penalty in retail scenarios.
typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, DebugInfo> CallSiteDebugInfoTable;
CallSiteDebugInfoTable* genCallSite2DebugInfoMap;
unsigned genReturnLocal; // Local number for the return value when applicable.
BasicBlock* genReturnBB; // jumped to when not optimizing for speed.
// The following properties are part of CodeGenContext. Getters are provided here for
// convenience and backward compatibility, but the properties can only be set by invoking
// the setter on CodeGenContext directly.
emitter* GetEmitter() const
{
return codeGen->GetEmitter();
}
bool isFramePointerUsed() const
{
return codeGen->isFramePointerUsed();
}
bool GetInterruptible()
{
return codeGen->GetInterruptible();
}
void SetInterruptible(bool value)
{
codeGen->SetInterruptible(value);
}
#if DOUBLE_ALIGN
const bool genDoubleAlign()
{
return codeGen->doDoubleAlign();
}
DWORD getCanDoubleAlign();
bool shouldDoubleAlign(unsigned refCntStk,
unsigned refCntReg,
weight_t refCntWtdReg,
unsigned refCntStkParam,
weight_t refCntWtdStkDbl);
#endif // DOUBLE_ALIGN
bool IsFullPtrRegMapRequired()
{
return codeGen->IsFullPtrRegMapRequired();
}
void SetFullPtrRegMapRequired(bool value)
{
codeGen->SetFullPtrRegMapRequired(value);
}
// Things that MAY belong either in CodeGen or CodeGenContext
#if defined(FEATURE_EH_FUNCLETS)
FuncInfoDsc* compFuncInfos;
unsigned short compCurrFuncIdx;
unsigned short compFuncInfoCount;
unsigned short compFuncCount()
{
assert(fgFuncletsCreated);
return compFuncInfoCount;
}
#else // !FEATURE_EH_FUNCLETS
// This is a no-op when there are no funclets!
void genUpdateCurrentFunclet(BasicBlock* block)
{
return;
}
FuncInfoDsc compFuncInfoRoot;
static const unsigned compCurrFuncIdx = 0;
unsigned short compFuncCount()
{
return 1;
}
#endif // !FEATURE_EH_FUNCLETS
FuncInfoDsc* funCurrentFunc();
void funSetCurrentFunc(unsigned funcIdx);
FuncInfoDsc* funGetFunc(unsigned funcIdx);
unsigned int funGetFuncIdx(BasicBlock* block);
// LIVENESS
VARSET_TP compCurLife; // current live variables
GenTree* compCurLifeTree; // node after which compCurLife has been computed
// Compare the given "newLife" with last set of live variables and update
// codeGen "gcInfo", siScopes, "regSet" with the new variable's homes/liveness.
template <bool ForCodeGen>
void compChangeLife(VARSET_VALARG_TP newLife);
// Update the GC's masks, register's masks and reports change on variable's homes given a set of
// current live variables if changes have happened since "compCurLife".
template <bool ForCodeGen>
inline void compUpdateLife(VARSET_VALARG_TP newLife);
// Gets a register mask that represent the kill set for a helper call since
// not all JIT Helper calls follow the standard ABI on the target architecture.
regMaskTP compHelperCallKillSet(CorInfoHelpFunc helper);
#ifdef TARGET_ARM
// Requires that "varDsc" be a promoted struct local variable being passed as an argument, beginning at
// "firstArgRegNum", which is assumed to have already been aligned to the register alignment restriction of the
// struct type. Adds bits to "*pArgSkippedRegMask" for any argument registers *not* used in passing "varDsc" --
// i.e., internal "holes" caused by internal alignment constraints. For example, if the struct contained an int and
// a double, and we at R0 (on ARM), then R1 would be skipped, and the bit for R1 would be added to the mask.
void fgAddSkippedRegsInPromotedStructArg(LclVarDsc* varDsc, unsigned firstArgRegNum, regMaskTP* pArgSkippedRegMask);
#endif // TARGET_ARM
// If "tree" is a indirection (GT_IND, or GT_OBJ) whose arg is an ADDR, whose arg is a LCL_VAR, return that LCL_VAR
// node, else NULL.
static GenTreeLclVar* fgIsIndirOfAddrOfLocal(GenTree* tree);
// This map is indexed by GT_OBJ nodes that are address of promoted struct variables, which
// have been annotated with the GTF_VAR_DEATH flag. If such a node is *not* mapped in this
// table, one may assume that all the (tracked) field vars die at this GT_OBJ. Otherwise,
// the node maps to a pointer to a VARSET_TP, containing set bits for each of the tracked field
// vars of the promoted struct local that go dead at the given node (the set bits are the bits
// for the tracked var indices of the field vars, as in a live var set).
//
// The map is allocated on demand so all map operations should use one of the following three
// wrapper methods.
NodeToVarsetPtrMap* m_promotedStructDeathVars;
NodeToVarsetPtrMap* GetPromotedStructDeathVars()
{
if (m_promotedStructDeathVars == nullptr)
{
m_promotedStructDeathVars = new (getAllocator()) NodeToVarsetPtrMap(getAllocator());
}
return m_promotedStructDeathVars;
}
void ClearPromotedStructDeathVars()
{
if (m_promotedStructDeathVars != nullptr)
{
m_promotedStructDeathVars->RemoveAll();
}
}
bool LookupPromotedStructDeathVars(GenTree* tree, VARSET_TP** bits)
{
*bits = nullptr;
bool result = false;
if (m_promotedStructDeathVars != nullptr)
{
result = m_promotedStructDeathVars->Lookup(tree, bits);
}
return result;
}
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX UnwindInfo XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#if !defined(__GNUC__)
#pragma region Unwind information
#endif
public:
//
// Infrastructure functions: start/stop/reserve/emit.
//
void unwindBegProlog();
void unwindEndProlog();
void unwindBegEpilog();
void unwindEndEpilog();
void unwindReserve();
void unwindEmit(void* pHotCode, void* pColdCode);
//
// Specific unwind information functions: called by code generation to indicate a particular
// prolog or epilog unwindable instruction has been generated.
//
void unwindPush(regNumber reg);
void unwindAllocStack(unsigned size);
void unwindSetFrameReg(regNumber reg, unsigned offset);
void unwindSaveReg(regNumber reg, unsigned offset);
#if defined(TARGET_ARM)
void unwindPushMaskInt(regMaskTP mask);
void unwindPushMaskFloat(regMaskTP mask);
void unwindPopMaskInt(regMaskTP mask);
void unwindPopMaskFloat(regMaskTP mask);
void unwindBranch16(); // The epilog terminates with a 16-bit branch (e.g., "bx lr")
void unwindNop(unsigned codeSizeInBytes); // Generate unwind NOP code. 'codeSizeInBytes' is 2 or 4 bytes. Only
// called via unwindPadding().
void unwindPadding(); // Generate a sequence of unwind NOP codes representing instructions between the last
// instruction and the current location.
#endif // TARGET_ARM
#if defined(TARGET_ARM64)
void unwindNop();
void unwindPadding(); // Generate a sequence of unwind NOP codes representing instructions between the last
// instruction and the current location.
void unwindSaveReg(regNumber reg, int offset); // str reg, [sp, #offset]
void unwindSaveRegPreindexed(regNumber reg, int offset); // str reg, [sp, #offset]!
void unwindSaveRegPair(regNumber reg1, regNumber reg2, int offset); // stp reg1, reg2, [sp, #offset]
void unwindSaveRegPairPreindexed(regNumber reg1, regNumber reg2, int offset); // stp reg1, reg2, [sp, #offset]!
void unwindSaveNext(); // unwind code: save_next
void unwindReturn(regNumber reg); // ret lr
#endif // defined(TARGET_ARM64)
//
// Private "helper" functions for the unwind implementation.
//
private:
#if defined(FEATURE_EH_FUNCLETS)
void unwindGetFuncLocations(FuncInfoDsc* func,
bool getHotSectionData,
/* OUT */ emitLocation** ppStartLoc,
/* OUT */ emitLocation** ppEndLoc);
#endif // FEATURE_EH_FUNCLETS
void unwindReserveFunc(FuncInfoDsc* func);
void unwindEmitFunc(FuncInfoDsc* func, void* pHotCode, void* pColdCode);
#if defined(TARGET_AMD64) || (defined(TARGET_X86) && defined(FEATURE_EH_FUNCLETS))
void unwindReserveFuncHelper(FuncInfoDsc* func, bool isHotCode);
void unwindEmitFuncHelper(FuncInfoDsc* func, void* pHotCode, void* pColdCode, bool isHotCode);
#endif // TARGET_AMD64 || (TARGET_X86 && FEATURE_EH_FUNCLETS)
UNATIVE_OFFSET unwindGetCurrentOffset(FuncInfoDsc* func);
#if defined(TARGET_AMD64)
void unwindBegPrologWindows();
void unwindPushWindows(regNumber reg);
void unwindAllocStackWindows(unsigned size);
void unwindSetFrameRegWindows(regNumber reg, unsigned offset);
void unwindSaveRegWindows(regNumber reg, unsigned offset);
#ifdef UNIX_AMD64_ABI
void unwindSaveRegCFI(regNumber reg, unsigned offset);
#endif // UNIX_AMD64_ABI
#elif defined(TARGET_ARM)
void unwindPushPopMaskInt(regMaskTP mask, bool useOpsize16);
void unwindPushPopMaskFloat(regMaskTP mask);
#endif // TARGET_ARM
#if defined(FEATURE_CFI_SUPPORT)
short mapRegNumToDwarfReg(regNumber reg);
void createCfiCode(FuncInfoDsc* func, UNATIVE_OFFSET codeOffset, UCHAR opcode, short dwarfReg, INT offset = 0);
void unwindPushPopCFI(regNumber reg);
void unwindBegPrologCFI();
void unwindPushPopMaskCFI(regMaskTP regMask, bool isFloat);
void unwindAllocStackCFI(unsigned size);
void unwindSetFrameRegCFI(regNumber reg, unsigned offset);
void unwindEmitFuncCFI(FuncInfoDsc* func, void* pHotCode, void* pColdCode);
#ifdef DEBUG
void DumpCfiInfo(bool isHotCode,
UNATIVE_OFFSET startOffset,
UNATIVE_OFFSET endOffset,
DWORD cfiCodeBytes,
const CFI_CODE* const pCfiCode);
#endif
#endif // FEATURE_CFI_SUPPORT
#if !defined(__GNUC__)
#pragma endregion // Note: region is NOT under !defined(__GNUC__)
#endif
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX SIMD XX
XX XX
XX Info about SIMD types, methods and the SIMD assembly (i.e. the assembly XX
XX that contains the distinguished, well-known SIMD type definitions). XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
bool IsBaselineSimdIsaSupported()
{
#ifdef FEATURE_SIMD
#if defined(TARGET_XARCH)
CORINFO_InstructionSet minimumIsa = InstructionSet_SSE2;
#elif defined(TARGET_ARM64)
CORINFO_InstructionSet minimumIsa = InstructionSet_AdvSimd;
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
return compOpportunisticallyDependsOn(minimumIsa);
#else
return false;
#endif
}
#if defined(DEBUG)
bool IsBaselineSimdIsaSupportedDebugOnly()
{
#ifdef FEATURE_SIMD
#if defined(TARGET_XARCH)
CORINFO_InstructionSet minimumIsa = InstructionSet_SSE2;
#elif defined(TARGET_ARM64)
CORINFO_InstructionSet minimumIsa = InstructionSet_AdvSimd;
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
return compIsaSupportedDebugOnly(minimumIsa);
#else
return false;
#endif // FEATURE_SIMD
}
#endif // DEBUG
// Get highest available level for SIMD codegen
SIMDLevel getSIMDSupportLevel()
{
#if defined(TARGET_XARCH)
if (compOpportunisticallyDependsOn(InstructionSet_AVX2))
{
return SIMD_AVX2_Supported;
}
if (compOpportunisticallyDependsOn(InstructionSet_SSE42))
{
return SIMD_SSE4_Supported;
}
// min bar is SSE2
return SIMD_SSE2_Supported;
#else
assert(!"Available instruction set(s) for SIMD codegen is not defined for target arch");
unreached();
return SIMD_Not_Supported;
#endif
}
bool isIntrinsicType(CORINFO_CLASS_HANDLE clsHnd)
{
return info.compCompHnd->isIntrinsicType(clsHnd);
}
const char* getClassNameFromMetadata(CORINFO_CLASS_HANDLE cls, const char** namespaceName)
{
return info.compCompHnd->getClassNameFromMetadata(cls, namespaceName);
}
CORINFO_CLASS_HANDLE getTypeInstantiationArgument(CORINFO_CLASS_HANDLE cls, unsigned index)
{
return info.compCompHnd->getTypeInstantiationArgument(cls, index);
}
#ifdef FEATURE_SIMD
// Have we identified any SIMD types?
// This is currently used by struct promotion to avoid getting type information for a struct
// field to see if it is a SIMD type, if we haven't seen any SIMD types or operations in
// the method.
bool _usesSIMDTypes;
bool usesSIMDTypes()
{
return _usesSIMDTypes;
}
void setUsesSIMDTypes(bool value)
{
_usesSIMDTypes = value;
}
// This is a temp lclVar allocated on the stack as TYP_SIMD. It is used to implement intrinsics
// that require indexed access to the individual fields of the vector, which is not well supported
// by the hardware. It is allocated when/if such situations are encountered during Lowering.
unsigned lvaSIMDInitTempVarNum;
struct SIMDHandlesCache
{
// SIMD Types
CORINFO_CLASS_HANDLE SIMDFloatHandle;
CORINFO_CLASS_HANDLE SIMDDoubleHandle;
CORINFO_CLASS_HANDLE SIMDIntHandle;
CORINFO_CLASS_HANDLE SIMDUShortHandle;
CORINFO_CLASS_HANDLE SIMDUByteHandle;
CORINFO_CLASS_HANDLE SIMDShortHandle;
CORINFO_CLASS_HANDLE SIMDByteHandle;
CORINFO_CLASS_HANDLE SIMDLongHandle;
CORINFO_CLASS_HANDLE SIMDUIntHandle;
CORINFO_CLASS_HANDLE SIMDULongHandle;
CORINFO_CLASS_HANDLE SIMDNIntHandle;
CORINFO_CLASS_HANDLE SIMDNUIntHandle;
CORINFO_CLASS_HANDLE SIMDVector2Handle;
CORINFO_CLASS_HANDLE SIMDVector3Handle;
CORINFO_CLASS_HANDLE SIMDVector4Handle;
CORINFO_CLASS_HANDLE SIMDVectorHandle;
#ifdef FEATURE_HW_INTRINSICS
#if defined(TARGET_ARM64)
CORINFO_CLASS_HANDLE Vector64FloatHandle;
CORINFO_CLASS_HANDLE Vector64DoubleHandle;
CORINFO_CLASS_HANDLE Vector64IntHandle;
CORINFO_CLASS_HANDLE Vector64UShortHandle;
CORINFO_CLASS_HANDLE Vector64UByteHandle;
CORINFO_CLASS_HANDLE Vector64ShortHandle;
CORINFO_CLASS_HANDLE Vector64ByteHandle;
CORINFO_CLASS_HANDLE Vector64LongHandle;
CORINFO_CLASS_HANDLE Vector64UIntHandle;
CORINFO_CLASS_HANDLE Vector64ULongHandle;
CORINFO_CLASS_HANDLE Vector64NIntHandle;
CORINFO_CLASS_HANDLE Vector64NUIntHandle;
#endif // defined(TARGET_ARM64)
CORINFO_CLASS_HANDLE Vector128FloatHandle;
CORINFO_CLASS_HANDLE Vector128DoubleHandle;
CORINFO_CLASS_HANDLE Vector128IntHandle;
CORINFO_CLASS_HANDLE Vector128UShortHandle;
CORINFO_CLASS_HANDLE Vector128UByteHandle;
CORINFO_CLASS_HANDLE Vector128ShortHandle;
CORINFO_CLASS_HANDLE Vector128ByteHandle;
CORINFO_CLASS_HANDLE Vector128LongHandle;
CORINFO_CLASS_HANDLE Vector128UIntHandle;
CORINFO_CLASS_HANDLE Vector128ULongHandle;
CORINFO_CLASS_HANDLE Vector128NIntHandle;
CORINFO_CLASS_HANDLE Vector128NUIntHandle;
#if defined(TARGET_XARCH)
CORINFO_CLASS_HANDLE Vector256FloatHandle;
CORINFO_CLASS_HANDLE Vector256DoubleHandle;
CORINFO_CLASS_HANDLE Vector256IntHandle;
CORINFO_CLASS_HANDLE Vector256UShortHandle;
CORINFO_CLASS_HANDLE Vector256UByteHandle;
CORINFO_CLASS_HANDLE Vector256ShortHandle;
CORINFO_CLASS_HANDLE Vector256ByteHandle;
CORINFO_CLASS_HANDLE Vector256LongHandle;
CORINFO_CLASS_HANDLE Vector256UIntHandle;
CORINFO_CLASS_HANDLE Vector256ULongHandle;
CORINFO_CLASS_HANDLE Vector256NIntHandle;
CORINFO_CLASS_HANDLE Vector256NUIntHandle;
#endif // defined(TARGET_XARCH)
#endif // FEATURE_HW_INTRINSICS
SIMDHandlesCache()
{
memset(this, 0, sizeof(*this));
}
};
SIMDHandlesCache* m_simdHandleCache;
// Get an appropriate "zero" for the given type and class handle.
GenTree* gtGetSIMDZero(var_types simdType, CorInfoType simdBaseJitType, CORINFO_CLASS_HANDLE simdHandle);
// Get the handle for a SIMD type.
CORINFO_CLASS_HANDLE gtGetStructHandleForSIMD(var_types simdType, CorInfoType simdBaseJitType)
{
if (m_simdHandleCache == nullptr)
{
// This may happen if the JIT generates SIMD node on its own, without importing them.
// Otherwise getBaseJitTypeAndSizeOfSIMDType should have created the cache.
return NO_CLASS_HANDLE;
}
if (simdBaseJitType == CORINFO_TYPE_FLOAT)
{
switch (simdType)
{
case TYP_SIMD8:
return m_simdHandleCache->SIMDVector2Handle;
case TYP_SIMD12:
return m_simdHandleCache->SIMDVector3Handle;
case TYP_SIMD16:
if ((getSIMDVectorType() == TYP_SIMD32) ||
(m_simdHandleCache->SIMDVector4Handle != NO_CLASS_HANDLE))
{
return m_simdHandleCache->SIMDVector4Handle;
}
break;
case TYP_SIMD32:
break;
default:
unreached();
}
}
assert(emitTypeSize(simdType) <= largestEnregisterableStructSize());
switch (simdBaseJitType)
{
case CORINFO_TYPE_FLOAT:
return m_simdHandleCache->SIMDFloatHandle;
case CORINFO_TYPE_DOUBLE:
return m_simdHandleCache->SIMDDoubleHandle;
case CORINFO_TYPE_INT:
return m_simdHandleCache->SIMDIntHandle;
case CORINFO_TYPE_USHORT:
return m_simdHandleCache->SIMDUShortHandle;
case CORINFO_TYPE_UBYTE:
return m_simdHandleCache->SIMDUByteHandle;
case CORINFO_TYPE_SHORT:
return m_simdHandleCache->SIMDShortHandle;
case CORINFO_TYPE_BYTE:
return m_simdHandleCache->SIMDByteHandle;
case CORINFO_TYPE_LONG:
return m_simdHandleCache->SIMDLongHandle;
case CORINFO_TYPE_UINT:
return m_simdHandleCache->SIMDUIntHandle;
case CORINFO_TYPE_ULONG:
return m_simdHandleCache->SIMDULongHandle;
case CORINFO_TYPE_NATIVEINT:
return m_simdHandleCache->SIMDNIntHandle;
case CORINFO_TYPE_NATIVEUINT:
return m_simdHandleCache->SIMDNUIntHandle;
default:
assert(!"Didn't find a class handle for simdType");
}
return NO_CLASS_HANDLE;
}
// Returns true if this is a SIMD type that should be considered an opaque
// vector type (i.e. do not analyze or promote its fields).
// Note that all but the fixed vector types are opaque, even though they may
// actually be declared as having fields.
bool isOpaqueSIMDType(CORINFO_CLASS_HANDLE structHandle) const
{
return ((m_simdHandleCache != nullptr) && (structHandle != m_simdHandleCache->SIMDVector2Handle) &&
(structHandle != m_simdHandleCache->SIMDVector3Handle) &&
(structHandle != m_simdHandleCache->SIMDVector4Handle));
}
// Returns true if the tree corresponds to a TYP_SIMD lcl var.
// Note that both SIMD vector args and locals are mared as lvSIMDType = true, but
// type of an arg node is TYP_BYREF and a local node is TYP_SIMD or TYP_STRUCT.
bool isSIMDTypeLocal(GenTree* tree)
{
return tree->OperIsLocal() && lvaGetDesc(tree->AsLclVarCommon())->lvSIMDType;
}
// Returns true if the lclVar is an opaque SIMD type.
bool isOpaqueSIMDLclVar(const LclVarDsc* varDsc) const
{
if (!varDsc->lvSIMDType)
{
return false;
}
return isOpaqueSIMDType(varDsc->GetStructHnd());
}
static bool isRelOpSIMDIntrinsic(SIMDIntrinsicID intrinsicId)
{
return (intrinsicId == SIMDIntrinsicEqual);
}
// Returns base JIT type of a TYP_SIMD local.
// Returns CORINFO_TYPE_UNDEF if the local is not TYP_SIMD.
CorInfoType getBaseJitTypeOfSIMDLocal(GenTree* tree)
{
if (isSIMDTypeLocal(tree))
{
return lvaGetDesc(tree->AsLclVarCommon())->GetSimdBaseJitType();
}
return CORINFO_TYPE_UNDEF;
}
bool isSIMDClass(CORINFO_CLASS_HANDLE clsHnd)
{
if (isIntrinsicType(clsHnd))
{
const char* namespaceName = nullptr;
(void)getClassNameFromMetadata(clsHnd, &namespaceName);
return strcmp(namespaceName, "System.Numerics") == 0;
}
return false;
}
bool isSIMDClass(typeInfo* pTypeInfo)
{
return pTypeInfo->IsStruct() && isSIMDClass(pTypeInfo->GetClassHandleForValueClass());
}
bool isHWSIMDClass(CORINFO_CLASS_HANDLE clsHnd)
{
#ifdef FEATURE_HW_INTRINSICS
if (isIntrinsicType(clsHnd))
{
const char* namespaceName = nullptr;
(void)getClassNameFromMetadata(clsHnd, &namespaceName);
return strcmp(namespaceName, "System.Runtime.Intrinsics") == 0;
}
#endif // FEATURE_HW_INTRINSICS
return false;
}
bool isHWSIMDClass(typeInfo* pTypeInfo)
{
#ifdef FEATURE_HW_INTRINSICS
return pTypeInfo->IsStruct() && isHWSIMDClass(pTypeInfo->GetClassHandleForValueClass());
#else
return false;
#endif
}
bool isSIMDorHWSIMDClass(CORINFO_CLASS_HANDLE clsHnd)
{
return isSIMDClass(clsHnd) || isHWSIMDClass(clsHnd);
}
bool isSIMDorHWSIMDClass(typeInfo* pTypeInfo)
{
return isSIMDClass(pTypeInfo) || isHWSIMDClass(pTypeInfo);
}
// Get the base (element) type and size in bytes for a SIMD type. Returns CORINFO_TYPE_UNDEF
// if it is not a SIMD type or is an unsupported base JIT type.
CorInfoType getBaseJitTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd, unsigned* sizeBytes = nullptr);
CorInfoType getBaseJitTypeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd)
{
return getBaseJitTypeAndSizeOfSIMDType(typeHnd, nullptr);
}
// Get SIMD Intrinsic info given the method handle.
// Also sets typeHnd, argCount, baseType and sizeBytes out params.
const SIMDIntrinsicInfo* getSIMDIntrinsicInfo(CORINFO_CLASS_HANDLE* typeHnd,
CORINFO_METHOD_HANDLE methodHnd,
CORINFO_SIG_INFO* sig,
bool isNewObj,
unsigned* argCount,
CorInfoType* simdBaseJitType,
unsigned* sizeBytes);
// Pops and returns GenTree node from importers type stack.
// Normalizes TYP_STRUCT value in case of GT_CALL, GT_RET_EXPR and arg nodes.
GenTree* impSIMDPopStack(var_types type, bool expectAddr = false, CORINFO_CLASS_HANDLE structType = nullptr);
// Transforms operands and returns the SIMD intrinsic to be applied on
// transformed operands to obtain given relop result.
SIMDIntrinsicID impSIMDRelOp(SIMDIntrinsicID relOpIntrinsicId,
CORINFO_CLASS_HANDLE typeHnd,
unsigned simdVectorSize,
CorInfoType* inOutBaseJitType,
GenTree** op1,
GenTree** op2);
#if defined(TARGET_XARCH)
// Transforms operands and returns the SIMD intrinsic to be applied on
// transformed operands to obtain == comparison result.
SIMDIntrinsicID impSIMDLongRelOpEqual(CORINFO_CLASS_HANDLE typeHnd,
unsigned simdVectorSize,
GenTree** op1,
GenTree** op2);
#endif // defined(TARGET_XARCH)
void setLclRelatedToSIMDIntrinsic(GenTree* tree);
bool areFieldsContiguous(GenTree* op1, GenTree* op2);
bool areLocalFieldsContiguous(GenTreeLclFld* first, GenTreeLclFld* second);
bool areArrayElementsContiguous(GenTree* op1, GenTree* op2);
bool areArgumentsContiguous(GenTree* op1, GenTree* op2);
GenTree* createAddressNodeForSIMDInit(GenTree* tree, unsigned simdSize);
// check methodHnd to see if it is a SIMD method that is expanded as an intrinsic in the JIT.
GenTree* impSIMDIntrinsic(OPCODE opcode,
GenTree* newobjThis,
CORINFO_CLASS_HANDLE clsHnd,
CORINFO_METHOD_HANDLE method,
CORINFO_SIG_INFO* sig,
unsigned methodFlags,
int memberRef);
GenTree* getOp1ForConstructor(OPCODE opcode, GenTree* newobjThis, CORINFO_CLASS_HANDLE clsHnd);
// Whether SIMD vector occupies part of SIMD register.
// SSE2: vector2f/3f are considered sub register SIMD types.
// AVX: vector2f, 3f and 4f are all considered sub register SIMD types.
bool isSubRegisterSIMDType(GenTreeSIMD* simdNode)
{
unsigned vectorRegisterByteLength;
#if defined(TARGET_XARCH)
// Calling the getSIMDVectorRegisterByteLength api causes the size of Vector<T> to be recorded
// with the AOT compiler, so that it cannot change from aot compilation time to runtime
// This api does not require such fixing as it merely pertains to the size of the simd type
// relative to the Vector<T> size as used at compile time. (So detecting a vector length of 16 here
// does not preclude the code from being used on a machine with a larger vector length.)
if (getSIMDSupportLevel() < SIMD_AVX2_Supported)
{
vectorRegisterByteLength = 16;
}
else
{
vectorRegisterByteLength = 32;
}
#else
vectorRegisterByteLength = getSIMDVectorRegisterByteLength();
#endif
return (simdNode->GetSimdSize() < vectorRegisterByteLength);
}
// Get the type for the hardware SIMD vector.
// This is the maximum SIMD type supported for this target.
var_types getSIMDVectorType()
{
#if defined(TARGET_XARCH)
if (getSIMDSupportLevel() == SIMD_AVX2_Supported)
{
return TYP_SIMD32;
}
else
{
// Verify and record that AVX2 isn't supported
compVerifyInstructionSetUnusable(InstructionSet_AVX2);
assert(getSIMDSupportLevel() >= SIMD_SSE2_Supported);
return TYP_SIMD16;
}
#elif defined(TARGET_ARM64)
return TYP_SIMD16;
#else
assert(!"getSIMDVectorType() unimplemented on target arch");
unreached();
#endif
}
// Get the size of the SIMD type in bytes
int getSIMDTypeSizeInBytes(CORINFO_CLASS_HANDLE typeHnd)
{
unsigned sizeBytes = 0;
(void)getBaseJitTypeAndSizeOfSIMDType(typeHnd, &sizeBytes);
return sizeBytes;
}
// Get the the number of elements of baseType of SIMD vector given by its size and baseType
static int getSIMDVectorLength(unsigned simdSize, var_types baseType);
// Get the the number of elements of baseType of SIMD vector given by its type handle
int getSIMDVectorLength(CORINFO_CLASS_HANDLE typeHnd);
// Get preferred alignment of SIMD type.
int getSIMDTypeAlignment(var_types simdType);
// Get the number of bytes in a System.Numeric.Vector<T> for the current compilation.
// Note - cannot be used for System.Runtime.Intrinsic
unsigned getSIMDVectorRegisterByteLength()
{
#if defined(TARGET_XARCH)
if (getSIMDSupportLevel() == SIMD_AVX2_Supported)
{
return YMM_REGSIZE_BYTES;
}
else
{
// Verify and record that AVX2 isn't supported
compVerifyInstructionSetUnusable(InstructionSet_AVX2);
assert(getSIMDSupportLevel() >= SIMD_SSE2_Supported);
return XMM_REGSIZE_BYTES;
}
#elif defined(TARGET_ARM64)
return FP_REGSIZE_BYTES;
#else
assert(!"getSIMDVectorRegisterByteLength() unimplemented on target arch");
unreached();
#endif
}
// The minimum and maximum possible number of bytes in a SIMD vector.
// maxSIMDStructBytes
// The minimum SIMD size supported by System.Numeric.Vectors or System.Runtime.Intrinsic
// SSE: 16-byte Vector<T> and Vector128<T>
// AVX: 32-byte Vector256<T> (Vector<T> is 16-byte)
// AVX2: 32-byte Vector<T> and Vector256<T>
unsigned int maxSIMDStructBytes()
{
#if defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH)
if (compOpportunisticallyDependsOn(InstructionSet_AVX))
{
return YMM_REGSIZE_BYTES;
}
else
{
// Verify and record that AVX2 isn't supported
compVerifyInstructionSetUnusable(InstructionSet_AVX2);
assert(getSIMDSupportLevel() >= SIMD_SSE2_Supported);
return XMM_REGSIZE_BYTES;
}
#else
return getSIMDVectorRegisterByteLength();
#endif
}
unsigned int minSIMDStructBytes()
{
return emitTypeSize(TYP_SIMD8);
}
public:
// Returns the codegen type for a given SIMD size.
static var_types getSIMDTypeForSize(unsigned size)
{
var_types simdType = TYP_UNDEF;
if (size == 8)
{
simdType = TYP_SIMD8;
}
else if (size == 12)
{
simdType = TYP_SIMD12;
}
else if (size == 16)
{
simdType = TYP_SIMD16;
}
else if (size == 32)
{
simdType = TYP_SIMD32;
}
else
{
noway_assert(!"Unexpected size for SIMD type");
}
return simdType;
}
private:
unsigned getSIMDInitTempVarNum(var_types simdType);
#else // !FEATURE_SIMD
bool isOpaqueSIMDLclVar(LclVarDsc* varDsc)
{
return false;
}
#endif // FEATURE_SIMD
public:
//------------------------------------------------------------------------
// largestEnregisterableStruct: The size in bytes of the largest struct that can be enregistered.
//
// Notes: It is not guaranteed that the struct of this size or smaller WILL be a
// candidate for enregistration.
unsigned largestEnregisterableStructSize()
{
#ifdef FEATURE_SIMD
#if defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH)
if (opts.IsReadyToRun())
{
// Return constant instead of maxSIMDStructBytes, as maxSIMDStructBytes performs
// checks that are effected by the current level of instruction set support would
// otherwise cause the highest level of instruction set support to be reported to crossgen2.
// and this api is only ever used as an optimization or assert, so no reporting should
// ever happen.
return YMM_REGSIZE_BYTES;
}
#endif // defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH)
unsigned vectorRegSize = maxSIMDStructBytes();
assert(vectorRegSize >= TARGET_POINTER_SIZE);
return vectorRegSize;
#else // !FEATURE_SIMD
return TARGET_POINTER_SIZE;
#endif // !FEATURE_SIMD
}
// Use to determine if a struct *might* be a SIMD type. As this function only takes a size, many
// structs will fit the criteria.
bool structSizeMightRepresentSIMDType(size_t structSize)
{
#ifdef FEATURE_SIMD
// Do not use maxSIMDStructBytes as that api in R2R on X86 and X64 may notify the JIT
// about the size of a struct under the assumption that the struct size needs to be recorded.
// By using largestEnregisterableStructSize here, the detail of whether or not Vector256<T> is
// enregistered or not will not be messaged to the R2R compiler.
return (structSize >= minSIMDStructBytes()) && (structSize <= largestEnregisterableStructSize());
#else
return false;
#endif // FEATURE_SIMD
}
#ifdef FEATURE_SIMD
static bool vnEncodesResultTypeForSIMDIntrinsic(SIMDIntrinsicID intrinsicId);
#endif // !FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
static bool vnEncodesResultTypeForHWIntrinsic(NamedIntrinsic hwIntrinsicID);
#endif // FEATURE_HW_INTRINSICS
private:
// These routines need not be enclosed under FEATURE_SIMD since lvIsSIMDType()
// is defined for both FEATURE_SIMD and !FEATURE_SIMD apropriately. The use
// of this routines also avoids the need of #ifdef FEATURE_SIMD specific code.
// Is this var is of type simd struct?
bool lclVarIsSIMDType(unsigned varNum)
{
return lvaGetDesc(varNum)->lvIsSIMDType();
}
// Is this Local node a SIMD local?
bool lclVarIsSIMDType(GenTreeLclVarCommon* lclVarTree)
{
return lclVarIsSIMDType(lclVarTree->GetLclNum());
}
// Returns true if the TYP_SIMD locals on stack are aligned at their
// preferred byte boundary specified by getSIMDTypeAlignment().
//
// As per the Intel manual, the preferred alignment for AVX vectors is
// 32-bytes. It is not clear whether additional stack space used in
// aligning stack is worth the benefit and for now will use 16-byte
// alignment for AVX 256-bit vectors with unaligned load/stores to/from
// memory. On x86, the stack frame is aligned to 4 bytes. We need to extend
// existing support for double (8-byte) alignment to 16 or 32 byte
// alignment for frames with local SIMD vars, if that is determined to be
// profitable.
//
// On Amd64 and SysV, RSP+8 is aligned on entry to the function (before
// prolog has run). This means that in RBP-based frames RBP will be 16-byte
// aligned. For RSP-based frames these are only sometimes aligned, depending
// on the frame size.
//
bool isSIMDTypeLocalAligned(unsigned varNum)
{
#if defined(FEATURE_SIMD) && ALIGN_SIMD_TYPES
if (lclVarIsSIMDType(varNum) && lvaTable[varNum].lvType != TYP_BYREF)
{
// TODO-Cleanup: Can't this use the lvExactSize on the varDsc?
int alignment = getSIMDTypeAlignment(lvaTable[varNum].lvType);
if (alignment <= STACK_ALIGN)
{
bool rbpBased;
int off = lvaFrameAddress(varNum, &rbpBased);
// On SysV and Winx64 ABIs RSP+8 will be 16-byte aligned at the
// first instruction of a function. If our frame is RBP based
// then RBP will always be 16 bytes aligned, so we can simply
// check the offset.
if (rbpBased)
{
return (off % alignment) == 0;
}
// For RSP-based frame the alignment of RSP depends on our
// locals. rsp+8 is aligned on entry and we just subtract frame
// size so it is not hard to compute. Note that the compiler
// tries hard to make sure the frame size means RSP will be
// 16-byte aligned, but for leaf functions without locals (i.e.
// frameSize = 0) it will not be.
int frameSize = codeGen->genTotalFrameSize();
return ((8 - frameSize + off) % alignment) == 0;
}
}
#endif // FEATURE_SIMD
return false;
}
#ifdef DEBUG
// Answer the question: Is a particular ISA supported?
// Use this api when asking the question so that future
// ISA questions can be asked correctly or when asserting
// support/nonsupport for an instruction set
bool compIsaSupportedDebugOnly(CORINFO_InstructionSet isa) const
{
#if defined(TARGET_XARCH) || defined(TARGET_ARM64)
return (opts.compSupportsISA & (1ULL << isa)) != 0;
#else
return false;
#endif
}
#endif // DEBUG
bool notifyInstructionSetUsage(CORINFO_InstructionSet isa, bool supported) const;
// Answer the question: Is a particular ISA allowed to be used implicitly by optimizations?
// The result of this api call will exactly match the target machine
// on which the function is executed (except for CoreLib, where there are special rules)
bool compExactlyDependsOn(CORINFO_InstructionSet isa) const
{
#if defined(TARGET_XARCH) || defined(TARGET_ARM64)
uint64_t isaBit = (1ULL << isa);
if ((opts.compSupportsISAReported & isaBit) == 0)
{
if (notifyInstructionSetUsage(isa, (opts.compSupportsISA & isaBit) != 0))
((Compiler*)this)->opts.compSupportsISAExactly |= isaBit;
((Compiler*)this)->opts.compSupportsISAReported |= isaBit;
}
return (opts.compSupportsISAExactly & isaBit) != 0;
#else
return false;
#endif
}
// Ensure that code will not execute if an instruction set is usable. Call only
// if the instruction set has previously reported as unusable, but when
// that that status has not yet been recorded to the AOT compiler
void compVerifyInstructionSetUnusable(CORINFO_InstructionSet isa)
{
// use compExactlyDependsOn to capture are record the use of the isa
bool isaUsable = compExactlyDependsOn(isa);
// Assert that the is unusable. If true, this function should never be called.
assert(!isaUsable);
}
// Answer the question: Is a particular ISA allowed to be used implicitly by optimizations?
// The result of this api call will match the target machine if the result is true
// If the result is false, then the target machine may have support for the instruction
bool compOpportunisticallyDependsOn(CORINFO_InstructionSet isa) const
{
if ((opts.compSupportsISA & (1ULL << isa)) != 0)
{
return compExactlyDependsOn(isa);
}
else
{
return false;
}
}
// Answer the question: Is a particular ISA supported for explicit hardware intrinsics?
bool compHWIntrinsicDependsOn(CORINFO_InstructionSet isa) const
{
// Report intent to use the ISA to the EE
compExactlyDependsOn(isa);
return ((opts.compSupportsISA & (1ULL << isa)) != 0);
}
bool canUseVexEncoding() const
{
#ifdef TARGET_XARCH
return compOpportunisticallyDependsOn(InstructionSet_AVX);
#else
return false;
#endif
}
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Compiler XX
XX XX
XX Generic info about the compilation and the method being compiled. XX
XX It is responsible for driving the other phases. XX
XX It is also responsible for all the memory management. XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
public:
Compiler* InlineeCompiler; // The Compiler instance for the inlinee
InlineResult* compInlineResult; // The result of importing the inlinee method.
bool compDoAggressiveInlining; // If true, mark every method as CORINFO_FLG_FORCEINLINE
bool compJmpOpUsed; // Does the method do a JMP
bool compLongUsed; // Does the method use TYP_LONG
bool compFloatingPointUsed; // Does the method use TYP_FLOAT or TYP_DOUBLE
bool compTailCallUsed; // Does the method do a tailcall
bool compTailPrefixSeen; // Does the method IL have tail. prefix
bool compLocallocSeen; // Does the method IL have localloc opcode
bool compLocallocUsed; // Does the method use localloc.
bool compLocallocOptimized; // Does the method have an optimized localloc
bool compQmarkUsed; // Does the method use GT_QMARK/GT_COLON
bool compQmarkRationalized; // Is it allowed to use a GT_QMARK/GT_COLON node.
bool compHasBackwardJump; // Does the method (or some inlinee) have a lexically backwards jump?
bool compHasBackwardJumpInHandler; // Does the method have a lexically backwards jump in a handler?
bool compSwitchedToOptimized; // Codegen initially was Tier0 but jit switched to FullOpts
bool compSwitchedToMinOpts; // Codegen initially was Tier1/FullOpts but jit switched to MinOpts
bool compSuppressedZeroInit; // There are vars with lvSuppressedZeroInit set
// NOTE: These values are only reliable after
// the importing is completely finished.
#ifdef DEBUG
// State information - which phases have completed?
// These are kept together for easy discoverability
bool bRangeAllowStress;
bool compCodeGenDone;
int64_t compNumStatementLinksTraversed; // # of links traversed while doing debug checks
bool fgNormalizeEHDone; // Has the flowgraph EH normalization phase been done?
size_t compSizeEstimate; // The estimated size of the method as per `gtSetEvalOrder`.
size_t compCycleEstimate; // The estimated cycle count of the method as per `gtSetEvalOrder`
#endif // DEBUG
bool fgLocalVarLivenessDone; // Note that this one is used outside of debug.
bool fgLocalVarLivenessChanged;
bool compLSRADone;
bool compRationalIRForm;
bool compUsesThrowHelper; // There is a call to a THROW_HELPER for the compiled method.
bool compGeneratingProlog;
bool compGeneratingEpilog;
bool compNeedsGSSecurityCookie; // There is an unsafe buffer (or localloc) on the stack.
// Insert cookie on frame and code to check the cookie, like VC++ -GS.
bool compGSReorderStackLayout; // There is an unsafe buffer on the stack, reorder locals and make local
// copies of susceptible parameters to avoid buffer overrun attacks through locals/params
bool getNeedsGSSecurityCookie() const
{
return compNeedsGSSecurityCookie;
}
void setNeedsGSSecurityCookie()
{
compNeedsGSSecurityCookie = true;
}
FrameLayoutState lvaDoneFrameLayout; // The highest frame layout state that we've completed. During
// frame layout calculations, this is the level we are currently
// computing.
//---------------------------- JITing options -----------------------------
enum codeOptimize
{
BLENDED_CODE,
SMALL_CODE,
FAST_CODE,
COUNT_OPT_CODE
};
struct Options
{
JitFlags* jitFlags; // all flags passed from the EE
// The instruction sets that the compiler is allowed to emit.
uint64_t compSupportsISA;
// The instruction sets that were reported to the VM as being used by the current method. Subset of
// compSupportsISA.
uint64_t compSupportsISAReported;
// The instruction sets that the compiler is allowed to take advantage of implicitly during optimizations.
// Subset of compSupportsISA.
// The instruction sets available in compSupportsISA and not available in compSupportsISAExactly can be only
// used via explicit hardware intrinsics.
uint64_t compSupportsISAExactly;
void setSupportedISAs(CORINFO_InstructionSetFlags isas)
{
compSupportsISA = isas.GetFlagsRaw();
}
unsigned compFlags; // method attributes
unsigned instrCount;
unsigned lvRefCount;
codeOptimize compCodeOpt; // what type of code optimizations
bool compUseCMOV;
// optimize maximally and/or favor speed over size?
#define DEFAULT_MIN_OPTS_CODE_SIZE 60000
#define DEFAULT_MIN_OPTS_INSTR_COUNT 20000
#define DEFAULT_MIN_OPTS_BB_COUNT 2000
#define DEFAULT_MIN_OPTS_LV_NUM_COUNT 2000
#define DEFAULT_MIN_OPTS_LV_REF_COUNT 8000
// Maximun number of locals before turning off the inlining
#define MAX_LV_NUM_COUNT_FOR_INLINING 512
bool compMinOpts;
bool compMinOptsIsSet;
#ifdef DEBUG
mutable bool compMinOptsIsUsed;
bool MinOpts() const
{
assert(compMinOptsIsSet);
compMinOptsIsUsed = true;
return compMinOpts;
}
bool IsMinOptsSet() const
{
return compMinOptsIsSet;
}
#else // !DEBUG
bool MinOpts() const
{
return compMinOpts;
}
bool IsMinOptsSet() const
{
return compMinOptsIsSet;
}
#endif // !DEBUG
bool OptimizationDisabled() const
{
return MinOpts() || compDbgCode;
}
bool OptimizationEnabled() const
{
return !OptimizationDisabled();
}
void SetMinOpts(bool val)
{
assert(!compMinOptsIsUsed);
assert(!compMinOptsIsSet || (compMinOpts == val));
compMinOpts = val;
compMinOptsIsSet = true;
}
// true if the CLFLG_* for an optimization is set.
bool OptEnabled(unsigned optFlag) const
{
return !!(compFlags & optFlag);
}
#ifdef FEATURE_READYTORUN
bool IsReadyToRun() const
{
return jitFlags->IsSet(JitFlags::JIT_FLAG_READYTORUN);
}
#else
bool IsReadyToRun() const
{
return false;
}
#endif
// Check if the compilation is control-flow guard enabled.
bool IsCFGEnabled() const
{
#if defined(TARGET_ARM64) || defined(TARGET_AMD64)
// On these platforms we assume the register that the target is
// passed in is preserved by the validator and take care to get the
// target from the register for the call (even in debug mode).
static_assert_no_msg((RBM_VALIDATE_INDIRECT_CALL_TRASH & (1 << REG_VALIDATE_INDIRECT_CALL_ADDR)) == 0);
if (JitConfig.JitForceControlFlowGuard())
return true;
return jitFlags->IsSet(JitFlags::JIT_FLAG_ENABLE_CFG);
#else
// The remaining platforms are not supported and would require some
// work to support.
//
// ARM32:
// The ARM32 validator does not preserve any volatile registers
// which means we have to take special care to allocate and use a
// callee-saved register (reloading the target from memory is a
// security issue).
//
// x86:
// On x86 some VSD calls disassemble the call site and expect an
// indirect call which is fundamentally incompatible with CFG.
// This would require a different way to pass this information
// through.
//
return false;
#endif
}
#ifdef FEATURE_ON_STACK_REPLACEMENT
bool IsOSR() const
{
return jitFlags->IsSet(JitFlags::JIT_FLAG_OSR);
}
#else
bool IsOSR() const
{
return false;
}
#endif
// true if we should use the PINVOKE_{BEGIN,END} helpers instead of generating
// PInvoke transitions inline. Normally used by R2R, but also used when generating a reverse pinvoke frame, as
// the current logic for frame setup initializes and pushes
// the InlinedCallFrame before performing the Reverse PInvoke transition, which is invalid (as frames cannot
// safely be pushed/popped while the thread is in a preemptive state.).
bool ShouldUsePInvokeHelpers()
{
return jitFlags->IsSet(JitFlags::JIT_FLAG_USE_PINVOKE_HELPERS) ||
jitFlags->IsSet(JitFlags::JIT_FLAG_REVERSE_PINVOKE);
}
// true if we should use insert the REVERSE_PINVOKE_{ENTER,EXIT} helpers in the method
// prolog/epilog
bool IsReversePInvoke()
{
return jitFlags->IsSet(JitFlags::JIT_FLAG_REVERSE_PINVOKE);
}
bool compScopeInfo; // Generate the LocalVar info ?
bool compDbgCode; // Generate debugger-friendly code?
bool compDbgInfo; // Gather debugging info?
bool compDbgEnC;
#ifdef PROFILING_SUPPORTED
bool compNoPInvokeInlineCB;
#else
static const bool compNoPInvokeInlineCB;
#endif
#ifdef DEBUG
bool compGcChecks; // Check arguments and return values to ensure they are sane
#endif
#if defined(DEBUG) && defined(TARGET_XARCH)
bool compStackCheckOnRet; // Check stack pointer on return to ensure it is correct.
#endif // defined(DEBUG) && defined(TARGET_XARCH)
#if defined(DEBUG) && defined(TARGET_X86)
bool compStackCheckOnCall; // Check stack pointer after call to ensure it is correct. Only for x86.
#endif // defined(DEBUG) && defined(TARGET_X86)
bool compReloc; // Generate relocs for pointers in code, true for all ngen/prejit codegen
#ifdef DEBUG
#if defined(TARGET_XARCH)
bool compEnablePCRelAddr; // Whether absolute addr be encoded as PC-rel offset by RyuJIT where possible
#endif
#endif // DEBUG
#ifdef UNIX_AMD64_ABI
// This flag is indicating if there is a need to align the frame.
// On AMD64-Windows, if there are calls, 4 slots for the outgoing ars are allocated, except for
// FastTailCall. This slots makes the frame size non-zero, so alignment logic will be called.
// On AMD64-Unix, there are no such slots. There is a possibility to have calls in the method with frame size of
// 0. The frame alignment logic won't kick in. This flags takes care of the AMD64-Unix case by remembering that
// there are calls and making sure the frame alignment logic is executed.
bool compNeedToAlignFrame;
#endif // UNIX_AMD64_ABI
bool compProcedureSplitting; // Separate cold code from hot code
bool genFPorder; // Preserve FP order (operations are non-commutative)
bool genFPopt; // Can we do frame-pointer-omission optimization?
bool altJit; // True if we are an altjit and are compiling this method
#ifdef OPT_CONFIG
bool optRepeat; // Repeat optimizer phases k times
#endif
#ifdef DEBUG
bool compProcedureSplittingEH; // Separate cold code from hot code for functions with EH
bool dspCode; // Display native code generated
bool dspEHTable; // Display the EH table reported to the VM
bool dspDebugInfo; // Display the Debug info reported to the VM
bool dspInstrs; // Display the IL instructions intermixed with the native code output
bool dspLines; // Display source-code lines intermixed with native code output
bool dmpHex; // Display raw bytes in hex of native code output
bool varNames; // Display variables names in native code output
bool disAsm; // Display native code as it is generated
bool disAsmSpilled; // Display native code when any register spilling occurs
bool disasmWithGC; // Display GC info interleaved with disassembly.
bool disDiffable; // Makes the Disassembly code 'diff-able'
bool disAddr; // Display process address next to each instruction in disassembly code
bool disAlignment; // Display alignment boundaries in disassembly code
bool disAsm2; // Display native code after it is generated using external disassembler
bool dspOrder; // Display names of each of the methods that we ngen/jit
bool dspUnwind; // Display the unwind info output
bool dspDiffable; // Makes the Jit Dump 'diff-able' (currently uses same COMPlus_* flag as disDiffable)
bool compLongAddress; // Force using large pseudo instructions for long address
// (IF_LARGEJMP/IF_LARGEADR/IF_LARGLDC)
bool dspGCtbls; // Display the GC tables
#endif
bool compExpandCallsEarly; // True if we should expand virtual call targets early for this method
// Default numbers used to perform loop alignment. All the numbers are choosen
// based on experimenting with various benchmarks.
// Default minimum loop block weight required to enable loop alignment.
#define DEFAULT_ALIGN_LOOP_MIN_BLOCK_WEIGHT 4
// By default a loop will be aligned at 32B address boundary to get better
// performance as per architecture manuals.
#define DEFAULT_ALIGN_LOOP_BOUNDARY 0x20
// For non-adaptive loop alignment, by default, only align a loop whose size is
// at most 3 times the alignment block size. If the loop is bigger than that, it is most
// likely complicated enough that loop alignment will not impact performance.
#define DEFAULT_MAX_LOOPSIZE_FOR_ALIGN DEFAULT_ALIGN_LOOP_BOUNDARY * 3
#ifdef DEBUG
// Loop alignment variables
// If set, for non-adaptive alignment, ensure loop jmps are not on or cross alignment boundary.
bool compJitAlignLoopForJcc;
#endif
// For non-adaptive alignment, minimum loop size (in bytes) for which alignment will be done.
unsigned short compJitAlignLoopMaxCodeSize;
// Minimum weight needed for the first block of a loop to make it a candidate for alignment.
unsigned short compJitAlignLoopMinBlockWeight;
// For non-adaptive alignment, address boundary (power of 2) at which loop alignment should
// be done. By default, 32B.
unsigned short compJitAlignLoopBoundary;
// Padding limit to align a loop.
unsigned short compJitAlignPaddingLimit;
// If set, perform adaptive loop alignment that limits number of padding based on loop size.
bool compJitAlignLoopAdaptive;
// If set, tries to hide alignment instructions behind unconditional jumps.
bool compJitHideAlignBehindJmp;
// If set, tracks the hidden return buffer for struct arg.
bool compJitOptimizeStructHiddenBuffer;
#ifdef LATE_DISASM
bool doLateDisasm; // Run the late disassembler
#endif // LATE_DISASM
#if DUMP_GC_TABLES && !defined(DEBUG)
#pragma message("NOTE: this non-debug build has GC ptr table dumping always enabled!")
static const bool dspGCtbls = true;
#endif
#ifdef PROFILING_SUPPORTED
// Whether to emit Enter/Leave/TailCall hooks using a dummy stub (DummyProfilerELTStub()).
// This option helps make the JIT behave as if it is running under a profiler.
bool compJitELTHookEnabled;
#endif // PROFILING_SUPPORTED
#if FEATURE_TAILCALL_OPT
// Whether opportunistic or implicit tail call optimization is enabled.
bool compTailCallOpt;
// Whether optimization of transforming a recursive tail call into a loop is enabled.
bool compTailCallLoopOpt;
#endif
#if FEATURE_FASTTAILCALL
// Whether fast tail calls are allowed.
bool compFastTailCalls;
#endif // FEATURE_FASTTAILCALL
#if defined(TARGET_ARM64)
// Decision about whether to save FP/LR registers with callee-saved registers (see
// COMPlus_JitSaveFpLrWithCalleSavedRegisters).
int compJitSaveFpLrWithCalleeSavedRegisters;
#endif // defined(TARGET_ARM64)
#ifdef CONFIGURABLE_ARM_ABI
bool compUseSoftFP = false;
#else
#ifdef ARM_SOFTFP
static const bool compUseSoftFP = true;
#else // !ARM_SOFTFP
static const bool compUseSoftFP = false;
#endif // ARM_SOFTFP
#endif // CONFIGURABLE_ARM_ABI
} opts;
static bool s_pAltJitExcludeAssembliesListInitialized;
static AssemblyNamesList2* s_pAltJitExcludeAssembliesList;
#ifdef DEBUG
static bool s_pJitDisasmIncludeAssembliesListInitialized;
static AssemblyNamesList2* s_pJitDisasmIncludeAssembliesList;
static bool s_pJitFunctionFileInitialized;
static MethodSet* s_pJitMethodSet;
#endif // DEBUG
#ifdef DEBUG
// silence warning of cast to greater size. It is easier to silence than construct code the compiler is happy with, and
// it is safe in this case
#pragma warning(push)
#pragma warning(disable : 4312)
template <typename T>
T dspPtr(T p)
{
return (p == ZERO) ? ZERO : (opts.dspDiffable ? T(0xD1FFAB1E) : p);
}
template <typename T>
T dspOffset(T o)
{
return (o == ZERO) ? ZERO : (opts.dspDiffable ? T(0xD1FFAB1E) : o);
}
#pragma warning(pop)
static int dspTreeID(GenTree* tree)
{
return tree->gtTreeID;
}
static void printStmtID(Statement* stmt)
{
assert(stmt != nullptr);
printf(FMT_STMT, stmt->GetID());
}
static void printTreeID(GenTree* tree)
{
if (tree == nullptr)
{
printf("[------]");
}
else
{
printf("[%06d]", dspTreeID(tree));
}
}
const char* pgoSourceToString(ICorJitInfo::PgoSource p);
const char* devirtualizationDetailToString(CORINFO_DEVIRTUALIZATION_DETAIL detail);
#endif // DEBUG
// clang-format off
#define STRESS_MODES \
\
STRESS_MODE(NONE) \
\
/* "Variations" stress areas which we try to mix up with each other. */ \
/* These should not be exhaustively used as they might */ \
/* hide/trivialize other areas */ \
\
STRESS_MODE(REGS) \
STRESS_MODE(DBL_ALN) \
STRESS_MODE(LCL_FLDS) \
STRESS_MODE(UNROLL_LOOPS) \
STRESS_MODE(MAKE_CSE) \
STRESS_MODE(LEGACY_INLINE) \
STRESS_MODE(CLONE_EXPR) \
STRESS_MODE(USE_CMOV) \
STRESS_MODE(FOLD) \
STRESS_MODE(MERGED_RETURNS) \
STRESS_MODE(BB_PROFILE) \
STRESS_MODE(OPT_BOOLS_GC) \
STRESS_MODE(REMORPH_TREES) \
STRESS_MODE(64RSLT_MUL) \
STRESS_MODE(DO_WHILE_LOOPS) \
STRESS_MODE(MIN_OPTS) \
STRESS_MODE(REVERSE_FLAG) /* Will set GTF_REVERSE_OPS whenever we can */ \
STRESS_MODE(REVERSE_COMMA) /* Will reverse commas created with gtNewCommaNode */ \
STRESS_MODE(TAILCALL) /* Will make the call as a tailcall whenever legal */ \
STRESS_MODE(CATCH_ARG) /* Will spill catch arg */ \
STRESS_MODE(UNSAFE_BUFFER_CHECKS) \
STRESS_MODE(NULL_OBJECT_CHECK) \
STRESS_MODE(PINVOKE_RESTORE_ESP) \
STRESS_MODE(RANDOM_INLINE) \
STRESS_MODE(SWITCH_CMP_BR_EXPANSION) \
STRESS_MODE(GENERIC_VARN) \
STRESS_MODE(PROFILER_CALLBACKS) /* Will generate profiler hooks for ELT callbacks */ \
STRESS_MODE(BYREF_PROMOTION) /* Change undoPromotion decisions for byrefs */ \
STRESS_MODE(PROMOTE_FEWER_STRUCTS)/* Don't promote some structs that can be promoted */ \
STRESS_MODE(VN_BUDGET)/* Randomize the VN budget */ \
\
/* After COUNT_VARN, stress level 2 does all of these all the time */ \
\
STRESS_MODE(COUNT_VARN) \
\
/* "Check" stress areas that can be exhaustively used if we */ \
/* dont care about performance at all */ \
\
STRESS_MODE(FORCE_INLINE) /* Treat every method as AggressiveInlining */ \
STRESS_MODE(CHK_FLOW_UPDATE) \
STRESS_MODE(EMITTER) \
STRESS_MODE(CHK_REIMPORT) \
STRESS_MODE(FLATFP) \
STRESS_MODE(GENERIC_CHECK) \
STRESS_MODE(COUNT)
enum compStressArea
{
#define STRESS_MODE(mode) STRESS_##mode,
STRESS_MODES
#undef STRESS_MODE
};
// clang-format on
#ifdef DEBUG
static const LPCWSTR s_compStressModeNames[STRESS_COUNT + 1];
BYTE compActiveStressModes[STRESS_COUNT];
#endif // DEBUG
#define MAX_STRESS_WEIGHT 100
bool compStressCompile(compStressArea stressArea, unsigned weightPercentage);
bool compStressCompileHelper(compStressArea stressArea, unsigned weightPercentage);
#ifdef DEBUG
bool compInlineStress()
{
return compStressCompile(STRESS_LEGACY_INLINE, 50);
}
bool compRandomInlineStress()
{
return compStressCompile(STRESS_RANDOM_INLINE, 50);
}
bool compPromoteFewerStructs(unsigned lclNum);
#endif // DEBUG
bool compTailCallStress()
{
#ifdef DEBUG
// Do not stress tailcalls in IL stubs as the runtime creates several IL
// stubs to implement the tailcall mechanism, which would then
// recursively create more IL stubs.
return !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) &&
(JitConfig.TailcallStress() != 0 || compStressCompile(STRESS_TAILCALL, 5));
#else
return false;
#endif
}
const char* compGetTieringName(bool wantShortName = false) const;
const char* compGetStressMessage() const;
codeOptimize compCodeOpt() const
{
#if 0
// Switching between size & speed has measurable throughput impact
// (3.5% on NGen CoreLib when measured). It used to be enabled for
// DEBUG, but should generate identical code between CHK & RET builds,
// so that's not acceptable.
// TODO-Throughput: Figure out what to do about size vs. speed & throughput.
// Investigate the cause of the throughput regression.
return opts.compCodeOpt;
#else
return BLENDED_CODE;
#endif
}
//--------------------- Info about the procedure --------------------------
struct Info
{
COMP_HANDLE compCompHnd;
CORINFO_MODULE_HANDLE compScopeHnd;
CORINFO_CLASS_HANDLE compClassHnd;
CORINFO_METHOD_HANDLE compMethodHnd;
CORINFO_METHOD_INFO* compMethodInfo;
bool hasCircularClassConstraints;
bool hasCircularMethodConstraints;
#if defined(DEBUG) || defined(LATE_DISASM) || DUMP_FLOWGRAPHS
const char* compMethodName;
const char* compClassName;
const char* compFullName;
double compPerfScore;
int compMethodSuperPMIIndex; // useful when debugging under SuperPMI
#endif // defined(DEBUG) || defined(LATE_DISASM) || DUMP_FLOWGRAPHS
#if defined(DEBUG) || defined(INLINE_DATA)
// Method hash is logically const, but computed
// on first demand.
mutable unsigned compMethodHashPrivate;
unsigned compMethodHash() const;
#endif // defined(DEBUG) || defined(INLINE_DATA)
#ifdef PSEUDORANDOM_NOP_INSERTION
// things for pseudorandom nop insertion
unsigned compChecksum;
CLRRandom compRNG;
#endif
// The following holds the FLG_xxxx flags for the method we're compiling.
unsigned compFlags;
// The following holds the class attributes for the method we're compiling.
unsigned compClassAttr;
const BYTE* compCode;
IL_OFFSET compILCodeSize; // The IL code size
IL_OFFSET compILImportSize; // Estimated amount of IL actually imported
IL_OFFSET compILEntry; // The IL entry point (normally 0)
PatchpointInfo* compPatchpointInfo; // Patchpoint data for OSR (normally nullptr)
UNATIVE_OFFSET compNativeCodeSize; // The native code size, after instructions are issued. This
// is less than (compTotalHotCodeSize + compTotalColdCodeSize) only if:
// (1) the code is not hot/cold split, and we issued less code than we expected, or
// (2) the code is hot/cold split, and we issued less code than we expected
// in the cold section (the hot section will always be padded out to compTotalHotCodeSize).
bool compIsStatic : 1; // Is the method static (no 'this' pointer)?
bool compIsVarArgs : 1; // Does the method have varargs parameters?
bool compInitMem : 1; // Is the CORINFO_OPT_INIT_LOCALS bit set in the method info options?
bool compProfilerCallback : 1; // JIT inserted a profiler Enter callback
bool compPublishStubParam : 1; // EAX captured in prolog will be available through an intrinsic
bool compHasNextCallRetAddr : 1; // The NextCallReturnAddress intrinsic is used.
var_types compRetType; // Return type of the method as declared in IL
var_types compRetNativeType; // Normalized return type as per target arch ABI
unsigned compILargsCount; // Number of arguments (incl. implicit but not hidden)
unsigned compArgsCount; // Number of arguments (incl. implicit and hidden)
#if FEATURE_FASTTAILCALL
unsigned compArgStackSize; // Incoming argument stack size in bytes
#endif // FEATURE_FASTTAILCALL
unsigned compRetBuffArg; // position of hidden return param var (0, 1) (BAD_VAR_NUM means not present);
int compTypeCtxtArg; // position of hidden param for type context for generic code (CORINFO_CALLCONV_PARAMTYPE)
unsigned compThisArg; // position of implicit this pointer param (not to be confused with lvaArg0Var)
unsigned compILlocalsCount; // Number of vars : args + locals (incl. implicit but not hidden)
unsigned compLocalsCount; // Number of vars : args + locals (incl. implicit and hidden)
unsigned compMaxStack;
UNATIVE_OFFSET compTotalHotCodeSize; // Total number of bytes of Hot Code in the method
UNATIVE_OFFSET compTotalColdCodeSize; // Total number of bytes of Cold Code in the method
unsigned compUnmanagedCallCountWithGCTransition; // count of unmanaged calls with GC transition.
CorInfoCallConvExtension compCallConv; // The entry-point calling convention for this method.
unsigned compLvFrameListRoot; // lclNum for the Frame root
unsigned compXcptnsCount; // Number of exception-handling clauses read in the method's IL.
// You should generally use compHndBBtabCount instead: it is the
// current number of EH clauses (after additions like synchronized
// methods and funclets, and removals like unreachable code deletion).
Target::ArgOrder compArgOrder;
bool compMatchedVM; // true if the VM is "matched": either the JIT is a cross-compiler
// and the VM expects that, or the JIT is a "self-host" compiler
// (e.g., x86 hosted targeting x86) and the VM expects that.
/* The following holds IL scope information about local variables.
*/
unsigned compVarScopesCount;
VarScopeDsc* compVarScopes;
/* The following holds information about instr offsets for
* which we need to report IP-mappings
*/
IL_OFFSET* compStmtOffsets; // sorted
unsigned compStmtOffsetsCount;
ICorDebugInfo::BoundaryTypes compStmtOffsetsImplicit;
#define CPU_X86 0x0100 // The generic X86 CPU
#define CPU_X86_PENTIUM_4 0x0110
#define CPU_X64 0x0200 // The generic x64 CPU
#define CPU_AMD_X64 0x0210 // AMD x64 CPU
#define CPU_INTEL_X64 0x0240 // Intel x64 CPU
#define CPU_ARM 0x0300 // The generic ARM CPU
#define CPU_ARM64 0x0400 // The generic ARM64 CPU
unsigned genCPU; // What CPU are we running on
// Number of class profile probes in this method
unsigned compClassProbeCount;
} info;
// Returns true if the method being compiled returns a non-void and non-struct value.
// Note that lvaInitTypeRef() normalizes compRetNativeType for struct returns in a
// single register as per target arch ABI (e.g on Amd64 Windows structs of size 1, 2,
// 4 or 8 gets normalized to TYP_BYTE/TYP_SHORT/TYP_INT/TYP_LONG; On Arm HFA structs).
// Methods returning such structs are considered to return non-struct return value and
// this method returns true in that case.
bool compMethodReturnsNativeScalarType()
{
return (info.compRetType != TYP_VOID) && !varTypeIsStruct(info.compRetNativeType);
}
// Returns true if the method being compiled returns RetBuf addr as its return value
bool compMethodReturnsRetBufAddr()
{
// There are cases where implicit RetBuf argument should be explicitly returned in a register.
// In such cases the return type is changed to TYP_BYREF and appropriate IR is generated.
// These cases are:
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_AMD64
// 1. on x64 Windows and Unix the address of RetBuf needs to be returned by
// methods with hidden RetBufArg in RAX. In such case GT_RETURN is of TYP_BYREF,
// returning the address of RetBuf.
return (info.compRetBuffArg != BAD_VAR_NUM);
#else // TARGET_AMD64
#ifdef PROFILING_SUPPORTED
// 2. Profiler Leave callback expects the address of retbuf as return value for
// methods with hidden RetBuf argument. impReturnInstruction() when profiler
// callbacks are needed creates GT_RETURN(TYP_BYREF, op1 = Addr of RetBuf) for
// methods with hidden RetBufArg.
if (compIsProfilerHookNeeded())
{
return (info.compRetBuffArg != BAD_VAR_NUM);
}
#endif
// 3. Windows ARM64 native instance calling convention requires the address of RetBuff
// to be returned in x0.
CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_ARM64)
if (TargetOS::IsWindows)
{
auto callConv = info.compCallConv;
if (callConvIsInstanceMethodCallConv(callConv))
{
return (info.compRetBuffArg != BAD_VAR_NUM);
}
}
#endif // TARGET_ARM64
// 4. x86 unmanaged calling conventions require the address of RetBuff to be returned in eax.
CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_X86)
if (info.compCallConv != CorInfoCallConvExtension::Managed)
{
return (info.compRetBuffArg != BAD_VAR_NUM);
}
#endif
return false;
#endif // TARGET_AMD64
}
// Returns true if the method returns a value in more than one return register
// TODO-ARM-Bug: Deal with multi-register genReturnLocaled structs?
// TODO-ARM64: Does this apply for ARM64 too?
bool compMethodReturnsMultiRegRetType()
{
#if FEATURE_MULTIREG_RET
#if defined(TARGET_X86)
// On x86, 64-bit longs and structs are returned in multiple registers
return varTypeIsLong(info.compRetNativeType) ||
(varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM));
#else // targets: X64-UNIX, ARM64 or ARM32
// On all other targets that support multireg return values:
// Methods returning a struct in multiple registers have a return value of TYP_STRUCT.
// Such method's compRetNativeType is TYP_STRUCT without a hidden RetBufArg
return varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM);
#endif // TARGET_XXX
#else // not FEATURE_MULTIREG_RET
// For this architecture there are no multireg returns
return false;
#endif // FEATURE_MULTIREG_RET
}
bool compEnregLocals()
{
return ((opts.compFlags & CLFLG_REGVAR) != 0);
}
bool compEnregStructLocals()
{
return (JitConfig.JitEnregStructLocals() != 0);
}
bool compObjectStackAllocation()
{
return (JitConfig.JitObjectStackAllocation() != 0);
}
// Returns true if the method returns a value in more than one return register,
// it should replace/be merged with compMethodReturnsMultiRegRetType when #36868 is fixed.
// The difference from original `compMethodReturnsMultiRegRetType` is in ARM64 SIMD* handling,
// this method correctly returns false for it (it is passed as HVA), when the original returns true.
bool compMethodReturnsMultiRegRegTypeAlternate()
{
#if FEATURE_MULTIREG_RET
#if defined(TARGET_X86)
// On x86, 64-bit longs and structs are returned in multiple registers
return varTypeIsLong(info.compRetNativeType) ||
(varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM));
#else // targets: X64-UNIX, ARM64 or ARM32
#if defined(TARGET_ARM64)
// TYP_SIMD* are returned in one register.
if (varTypeIsSIMD(info.compRetNativeType))
{
return false;
}
#endif
// On all other targets that support multireg return values:
// Methods returning a struct in multiple registers have a return value of TYP_STRUCT.
// Such method's compRetNativeType is TYP_STRUCT without a hidden RetBufArg
return varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM);
#endif // TARGET_XXX
#else // not FEATURE_MULTIREG_RET
// For this architecture there are no multireg returns
return false;
#endif // FEATURE_MULTIREG_RET
}
// Returns true if the method being compiled returns a value
bool compMethodHasRetVal()
{
return compMethodReturnsNativeScalarType() || compMethodReturnsRetBufAddr() ||
compMethodReturnsMultiRegRetType();
}
// Returns true if the method requires a PInvoke prolog and epilog
bool compMethodRequiresPInvokeFrame()
{
return (info.compUnmanagedCallCountWithGCTransition > 0);
}
// Returns true if address-exposed user variables should be poisoned with a recognizable value
bool compShouldPoisonFrame()
{
#ifdef FEATURE_ON_STACK_REPLACEMENT
if (opts.IsOSR())
return false;
#endif
return !info.compInitMem && opts.compDbgCode;
}
// Returns true if the jit supports having patchpoints in this method.
// Optionally, get the reason why not.
bool compCanHavePatchpoints(const char** reason = nullptr);
#if defined(DEBUG)
void compDispLocalVars();
#endif // DEBUG
private:
class ClassLayoutTable* m_classLayoutTable;
class ClassLayoutTable* typCreateClassLayoutTable();
class ClassLayoutTable* typGetClassLayoutTable();
public:
// Get the layout having the specified layout number.
ClassLayout* typGetLayoutByNum(unsigned layoutNum);
// Get the layout number of the specified layout.
unsigned typGetLayoutNum(ClassLayout* layout);
// Get the layout having the specified size but no class handle.
ClassLayout* typGetBlkLayout(unsigned blockSize);
// Get the number of a layout having the specified size but no class handle.
unsigned typGetBlkLayoutNum(unsigned blockSize);
// Get the layout for the specified class handle.
ClassLayout* typGetObjLayout(CORINFO_CLASS_HANDLE classHandle);
// Get the number of a layout for the specified class handle.
unsigned typGetObjLayoutNum(CORINFO_CLASS_HANDLE classHandle);
//-------------------------- Global Compiler Data ------------------------------------
#ifdef DEBUG
private:
static LONG s_compMethodsCount; // to produce unique label names
#endif
public:
#ifdef DEBUG
LONG compMethodID;
unsigned compGenTreeID;
unsigned compStatementID;
unsigned compBasicBlockID;
#endif
BasicBlock* compCurBB; // the current basic block in process
Statement* compCurStmt; // the current statement in process
GenTree* compCurTree; // the current tree in process
// The following is used to create the 'method JIT info' block.
size_t compInfoBlkSize;
BYTE* compInfoBlkAddr;
EHblkDsc* compHndBBtab; // array of EH data
unsigned compHndBBtabCount; // element count of used elements in EH data array
unsigned compHndBBtabAllocCount; // element count of allocated elements in EH data array
#if defined(TARGET_X86)
//-------------------------------------------------------------------------
// Tracking of region covered by the monitor in synchronized methods
void* syncStartEmitCookie; // the emitter cookie for first instruction after the call to MON_ENTER
void* syncEndEmitCookie; // the emitter cookie for first instruction after the call to MON_EXIT
#endif // !TARGET_X86
Phases mostRecentlyActivePhase; // the most recently active phase
PhaseChecks activePhaseChecks; // the currently active phase checks
//-------------------------------------------------------------------------
// The following keeps track of how many bytes of local frame space we've
// grabbed so far in the current function, and how many argument bytes we
// need to pop when we return.
//
unsigned compLclFrameSize; // secObject+lclBlk+locals+temps
// Count of callee-saved regs we pushed in the prolog.
// Does not include EBP for isFramePointerUsed() and double-aligned frames.
// In case of Amd64 this doesn't include float regs saved on stack.
unsigned compCalleeRegsPushed;
#if defined(TARGET_XARCH)
// Mask of callee saved float regs on stack.
regMaskTP compCalleeFPRegsSavedMask;
#endif
#ifdef TARGET_AMD64
// Quirk for VS debug-launch scenario to work:
// Bytes of padding between save-reg area and locals.
#define VSQUIRK_STACK_PAD (2 * REGSIZE_BYTES)
unsigned compVSQuirkStackPaddingNeeded;
#endif
unsigned compArgSize; // total size of arguments in bytes (including register args (lvIsRegArg))
#ifdef TARGET_ARM
bool compHasSplitParam;
#endif
unsigned compMapILargNum(unsigned ILargNum); // map accounting for hidden args
unsigned compMapILvarNum(unsigned ILvarNum); // map accounting for hidden args
unsigned compMap2ILvarNum(unsigned varNum) const; // map accounting for hidden args
#if defined(TARGET_ARM64)
struct FrameInfo
{
// Frame type (1-5)
int frameType;
// Distance from established (method body) SP to base of callee save area
int calleeSaveSpOffset;
// Amount to subtract from SP before saving (prolog) OR
// to add to SP after restoring (epilog) callee saves
int calleeSaveSpDelta;
// Distance from established SP to where caller's FP was saved
int offsetSpToSavedFp;
} compFrameInfo;
#endif
//-------------------------------------------------------------------------
static void compStartup(); // One-time initialization
static void compShutdown(); // One-time finalization
void compInit(ArenaAllocator* pAlloc,
CORINFO_METHOD_HANDLE methodHnd,
COMP_HANDLE compHnd,
CORINFO_METHOD_INFO* methodInfo,
InlineInfo* inlineInfo);
void compDone();
static void compDisplayStaticSizes(FILE* fout);
//------------ Some utility functions --------------
void* compGetHelperFtn(CorInfoHelpFunc ftnNum, /* IN */
void** ppIndirection); /* OUT */
// Several JIT/EE interface functions return a CorInfoType, and also return a
// class handle as an out parameter if the type is a value class. Returns the
// size of the type these describe.
unsigned compGetTypeSize(CorInfoType cit, CORINFO_CLASS_HANDLE clsHnd);
#ifdef DEBUG
// Components used by the compiler may write unit test suites, and
// have them run within this method. They will be run only once per process, and only
// in debug. (Perhaps should be under the control of a COMPlus_ flag.)
// These should fail by asserting.
void compDoComponentUnitTestsOnce();
#endif // DEBUG
int compCompile(CORINFO_MODULE_HANDLE classPtr,
void** methodCodePtr,
uint32_t* methodCodeSize,
JitFlags* compileFlags);
void compCompileFinish();
int compCompileHelper(CORINFO_MODULE_HANDLE classPtr,
COMP_HANDLE compHnd,
CORINFO_METHOD_INFO* methodInfo,
void** methodCodePtr,
uint32_t* methodCodeSize,
JitFlags* compileFlag);
ArenaAllocator* compGetArenaAllocator();
void generatePatchpointInfo();
#if MEASURE_MEM_ALLOC
static bool s_dspMemStats; // Display per-phase memory statistics for every function
#endif // MEASURE_MEM_ALLOC
#if LOOP_HOIST_STATS
unsigned m_loopsConsidered;
bool m_curLoopHasHoistedExpression;
unsigned m_loopsWithHoistedExpressions;
unsigned m_totalHoistedExpressions;
void AddLoopHoistStats();
void PrintPerMethodLoopHoistStats();
static CritSecObject s_loopHoistStatsLock; // This lock protects the data structures below.
static unsigned s_loopsConsidered;
static unsigned s_loopsWithHoistedExpressions;
static unsigned s_totalHoistedExpressions;
static void PrintAggregateLoopHoistStats(FILE* f);
#endif // LOOP_HOIST_STATS
#if TRACK_ENREG_STATS
class EnregisterStats
{
private:
unsigned m_totalNumberOfVars;
unsigned m_totalNumberOfStructVars;
unsigned m_totalNumberOfEnregVars;
unsigned m_totalNumberOfStructEnregVars;
unsigned m_addrExposed;
unsigned m_hiddenStructArg;
unsigned m_VMNeedsStackAddr;
unsigned m_localField;
unsigned m_blockOp;
unsigned m_dontEnregStructs;
unsigned m_notRegSizeStruct;
unsigned m_structArg;
unsigned m_lclAddrNode;
unsigned m_castTakesAddr;
unsigned m_storeBlkSrc;
unsigned m_oneAsgRetyping;
unsigned m_swizzleArg;
unsigned m_blockOpRet;
unsigned m_returnSpCheck;
unsigned m_simdUserForcesDep;
unsigned m_liveInOutHndlr;
unsigned m_depField;
unsigned m_noRegVars;
unsigned m_minOptsGC;
#ifdef JIT32_GCENCODER
unsigned m_PinningRef;
#endif // JIT32_GCENCODER
#if !defined(TARGET_64BIT)
unsigned m_longParamField;
#endif // !TARGET_64BIT
unsigned m_parentExposed;
unsigned m_tooConservative;
unsigned m_escapeAddress;
unsigned m_osrExposed;
unsigned m_stressLclFld;
unsigned m_copyFldByFld;
unsigned m_dispatchRetBuf;
unsigned m_wideIndir;
public:
void RecordLocal(const LclVarDsc* varDsc);
void Dump(FILE* fout) const;
};
static EnregisterStats s_enregisterStats;
#endif // TRACK_ENREG_STATS
bool compIsForImportOnly();
bool compIsForInlining() const;
bool compDonotInline();
#ifdef DEBUG
// Get the default fill char value we randomize this value when JitStress is enabled.
static unsigned char compGetJitDefaultFill(Compiler* comp);
const char* compLocalVarName(unsigned varNum, unsigned offs);
VarName compVarName(regNumber reg, bool isFloatReg = false);
const char* compRegVarName(regNumber reg, bool displayVar = false, bool isFloatReg = false);
const char* compRegNameForSize(regNumber reg, size_t size);
const char* compFPregVarName(unsigned fpReg, bool displayVar = false);
void compDspSrcLinesByNativeIP(UNATIVE_OFFSET curIP);
void compDspSrcLinesByLineNum(unsigned line, bool seek = false);
#endif // DEBUG
//-------------------------------------------------------------------------
struct VarScopeListNode
{
VarScopeDsc* data;
VarScopeListNode* next;
static VarScopeListNode* Create(VarScopeDsc* value, CompAllocator alloc)
{
VarScopeListNode* node = new (alloc) VarScopeListNode;
node->data = value;
node->next = nullptr;
return node;
}
};
struct VarScopeMapInfo
{
VarScopeListNode* head;
VarScopeListNode* tail;
static VarScopeMapInfo* Create(VarScopeListNode* node, CompAllocator alloc)
{
VarScopeMapInfo* info = new (alloc) VarScopeMapInfo;
info->head = node;
info->tail = node;
return info;
}
};
// Max value of scope count for which we would use linear search; for larger values we would use hashtable lookup.
static const unsigned MAX_LINEAR_FIND_LCL_SCOPELIST = 32;
typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, VarScopeMapInfo*> VarNumToScopeDscMap;
// Map to keep variables' scope indexed by varNum containing it's scope dscs at the index.
VarNumToScopeDscMap* compVarScopeMap;
VarScopeDsc* compFindLocalVar(unsigned varNum, unsigned lifeBeg, unsigned lifeEnd);
VarScopeDsc* compFindLocalVar(unsigned varNum, unsigned offs);
VarScopeDsc* compFindLocalVarLinear(unsigned varNum, unsigned offs);
void compInitVarScopeMap();
VarScopeDsc** compEnterScopeList; // List has the offsets where variables
// enter scope, sorted by instr offset
unsigned compNextEnterScope;
VarScopeDsc** compExitScopeList; // List has the offsets where variables
// go out of scope, sorted by instr offset
unsigned compNextExitScope;
void compInitScopeLists();
void compResetScopeLists();
VarScopeDsc* compGetNextEnterScope(unsigned offs, bool scan = false);
VarScopeDsc* compGetNextExitScope(unsigned offs, bool scan = false);
void compProcessScopesUntil(unsigned offset,
VARSET_TP* inScope,
void (Compiler::*enterScopeFn)(VARSET_TP* inScope, VarScopeDsc*),
void (Compiler::*exitScopeFn)(VARSET_TP* inScope, VarScopeDsc*));
#ifdef DEBUG
void compDispScopeLists();
#endif // DEBUG
bool compIsProfilerHookNeeded();
//-------------------------------------------------------------------------
/* Statistical Data Gathering */
void compJitStats(); // call this function and enable
// various ifdef's below for statistical data
#if CALL_ARG_STATS
void compCallArgStats();
static void compDispCallArgStats(FILE* fout);
#endif
//-------------------------------------------------------------------------
protected:
#ifdef DEBUG
bool skipMethod();
#endif
ArenaAllocator* compArenaAllocator;
public:
void compFunctionTraceStart();
void compFunctionTraceEnd(void* methodCodePtr, ULONG methodCodeSize, bool isNYI);
protected:
size_t compMaxUncheckedOffsetForNullObject;
void compInitOptions(JitFlags* compileFlags);
void compSetProcessor();
void compInitDebuggingInfo();
void compSetOptimizationLevel();
#ifdef TARGET_ARMARCH
bool compRsvdRegCheck(FrameLayoutState curState);
#endif
void compCompile(void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlags);
// Clear annotations produced during optimizations; to be used between iterations when repeating opts.
void ResetOptAnnotations();
// Regenerate loop descriptors; to be used between iterations when repeating opts.
void RecomputeLoopInfo();
#ifdef PROFILING_SUPPORTED
// Data required for generating profiler Enter/Leave/TailCall hooks
bool compProfilerHookNeeded; // Whether profiler Enter/Leave/TailCall hook needs to be generated for the method
void* compProfilerMethHnd; // Profiler handle of the method being compiled. Passed as param to ELT callbacks
bool compProfilerMethHndIndirected; // Whether compProfilerHandle is pointer to the handle or is an actual handle
#endif
public:
// Assumes called as part of process shutdown; does any compiler-specific work associated with that.
static void ProcessShutdownWork(ICorStaticInfo* statInfo);
CompAllocator getAllocator(CompMemKind cmk = CMK_Generic)
{
return CompAllocator(compArenaAllocator, cmk);
}
CompAllocator getAllocatorGC()
{
return getAllocator(CMK_GC);
}
CompAllocator getAllocatorLoopHoist()
{
return getAllocator(CMK_LoopHoist);
}
#ifdef DEBUG
CompAllocator getAllocatorDebugOnly()
{
return getAllocator(CMK_DebugOnly);
}
#endif // DEBUG
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX typeInfo XX
XX XX
XX Checks for type compatibility and merges types XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
public:
// Returns true if child is equal to or a subtype of parent for merge purposes
// This support is necessary to suport attributes that are not described in
// for example, signatures. For example, the permanent home byref (byref that
// points to the gc heap), isn't a property of method signatures, therefore,
// it is safe to have mismatches here (that tiCompatibleWith will not flag),
// but when deciding if we need to reimport a block, we need to take these
// in account
bool tiMergeCompatibleWith(const typeInfo& pChild, const typeInfo& pParent, bool normalisedForStack) const;
// Returns true if child is equal to or a subtype of parent.
// normalisedForStack indicates that both types are normalised for the stack
bool tiCompatibleWith(const typeInfo& pChild, const typeInfo& pParent, bool normalisedForStack) const;
// Merges pDest and pSrc. Returns false if merge is undefined.
// *pDest is modified to represent the merged type. Sets "*changed" to true
// if this changes "*pDest".
bool tiMergeToCommonParent(typeInfo* pDest, const typeInfo* pSrc, bool* changed) const;
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX IL verification stuff XX
XX XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
public:
// The following is used to track liveness of local variables, initialization
// of valueclass constructors, and type safe use of IL instructions.
// dynamic state info needed for verification
EntryState verCurrentState;
// this ptr of object type .ctors are considered intited only after
// the base class ctor is called, or an alternate ctor is called.
// An uninited this ptr can be used to access fields, but cannot
// be used to call a member function.
bool verTrackObjCtorInitState;
void verInitBBEntryState(BasicBlock* block, EntryState* currentState);
// Requires that "tis" is not TIS_Bottom -- it's a definite init/uninit state.
void verSetThisInit(BasicBlock* block, ThisInitState tis);
void verInitCurrentState();
void verResetCurrentState(BasicBlock* block, EntryState* currentState);
// Merges the current verification state into the entry state of "block", return false if that merge fails,
// TRUE if it succeeds. Further sets "*changed" to true if this changes the entry state of "block".
bool verMergeEntryStates(BasicBlock* block, bool* changed);
void verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg));
void verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg));
typeInfo verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd,
bool bashStructToRef = false); // converts from jit type representation to typeInfo
typeInfo verMakeTypeInfo(CorInfoType ciType,
CORINFO_CLASS_HANDLE clsHnd); // converts from jit type representation to typeInfo
bool verIsSDArray(const typeInfo& ti);
typeInfo verGetArrayElemType(const typeInfo& ti);
typeInfo verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args);
bool verIsByRefLike(const typeInfo& ti);
bool verIsSafeToReturnByRef(const typeInfo& ti);
// generic type variables range over types that satisfy IsBoxable
bool verIsBoxable(const typeInfo& ti);
void DECLSPEC_NORETURN verRaiseVerifyException(INDEBUG(const char* reason) DEBUGARG(const char* file)
DEBUGARG(unsigned line));
void verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* reason) DEBUGARG(const char* file)
DEBUGARG(unsigned line));
bool verCheckTailCallConstraint(OPCODE opcode,
CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call
// on a type parameter?
bool speculative // If true, won't throw if verificatoin fails. Instead it will
// return false to the caller.
// If false, it will throw.
);
bool verIsBoxedValueType(const typeInfo& ti);
void verVerifyCall(OPCODE opcode,
CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
bool tailCall,
bool readonlyCall, // is this a "readonly." call?
const BYTE* delegateCreateStart,
const BYTE* codeAddr,
CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName));
bool verCheckDelegateCreation(const BYTE* delegateCreateStart, const BYTE* codeAddr, mdMemberRef& targetMemberRef);
typeInfo verVerifySTIND(const typeInfo& ptr, const typeInfo& value, const typeInfo& instrType);
typeInfo verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType);
void verVerifyField(CORINFO_RESOLVED_TOKEN* pResolvedToken,
const CORINFO_FIELD_INFO& fieldInfo,
const typeInfo* tiThis,
bool mutator,
bool allowPlainStructAsThis = false);
void verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode);
void verVerifyThisPtrInitialised();
bool verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target);
#ifdef DEBUG
// One line log function. Default level is 0. Increasing it gives you
// more log information
// levels are currently unused: #define JITDUMP(level,...) ();
void JitLogEE(unsigned level, const char* fmt, ...);
bool compDebugBreak;
bool compJitHaltMethod();
#endif
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX GS Security checks for unsafe buffers XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
public:
struct ShadowParamVarInfo
{
FixedBitVect* assignGroup; // the closure set of variables whose values depend on each other
unsigned shadowCopy; // Lcl var num, if not valid set to BAD_VAR_NUM
static bool mayNeedShadowCopy(LclVarDsc* varDsc)
{
#if defined(TARGET_AMD64)
// GS cookie logic to create shadow slots, create trees to copy reg args to shadow
// slots and update all trees to refer to shadow slots is done immediately after
// fgMorph(). Lsra could potentially mark a param as DoNotEnregister after JIT determines
// not to shadow a parameter. Also, LSRA could potentially spill a param which is passed
// in register. Therefore, conservatively all params may need a shadow copy. Note that
// GS cookie logic further checks whether the param is a ptr or an unsafe buffer before
// creating a shadow slot even though this routine returns true.
//
// TODO-AMD64-CQ: Revisit this conservative approach as it could create more shadow slots than
// required. There are two cases under which a reg arg could potentially be used from its
// home location:
// a) LSRA marks it as DoNotEnregister (see LinearScan::identifyCandidates())
// b) LSRA spills it
//
// Possible solution to address case (a)
// - The conditions under which LSRA marks a varDsc as DoNotEnregister could be checked
// in this routine. Note that live out of exception handler is something we may not be
// able to do it here since GS cookie logic is invoked ahead of liveness computation.
// Therefore, for methods with exception handling and need GS cookie check we might have
// to take conservative approach.
//
// Possible solution to address case (b)
// - Whenver a parameter passed in an argument register needs to be spilled by LSRA, we
// create a new spill temp if the method needs GS cookie check.
return varDsc->lvIsParam;
#else // !defined(TARGET_AMD64)
return varDsc->lvIsParam && !varDsc->lvIsRegArg;
#endif
}
#ifdef DEBUG
void Print()
{
printf("assignGroup [%p]; shadowCopy: [%d];\n", assignGroup, shadowCopy);
}
#endif
};
GSCookie* gsGlobalSecurityCookieAddr; // Address of global cookie for unsafe buffer checks
GSCookie gsGlobalSecurityCookieVal; // Value of global cookie if addr is NULL
ShadowParamVarInfo* gsShadowVarInfo; // Table used by shadow param analysis code
void gsGSChecksInitCookie(); // Grabs cookie variable
void gsCopyShadowParams(); // Identify vulnerable params and create dhadow copies
bool gsFindVulnerableParams(); // Shadow param analysis code
void gsParamsToShadows(); // Insert copy code and replave param uses by shadow
static fgWalkPreFn gsMarkPtrsAndAssignGroups; // Shadow param analysis tree-walk
static fgWalkPreFn gsReplaceShadowParams; // Shadow param replacement tree-walk
#define DEFAULT_MAX_INLINE_SIZE 100 // Methods with > DEFAULT_MAX_INLINE_SIZE IL bytes will never be inlined.
// This can be overwritten by setting complus_JITInlineSize env variable.
#define DEFAULT_MAX_INLINE_DEPTH 20 // Methods at more than this level deep will not be inlined
#define DEFAULT_MAX_LOCALLOC_TO_LOCAL_SIZE 32 // fixed locallocs of this size or smaller will convert to local buffers
private:
#ifdef FEATURE_JIT_METHOD_PERF
JitTimer* pCompJitTimer; // Timer data structure (by phases) for current compilation.
static CompTimeSummaryInfo s_compJitTimerSummary; // Summary of the Timer information for the whole run.
static LPCWSTR JitTimeLogCsv(); // Retrieve the file name for CSV from ConfigDWORD.
static LPCWSTR compJitTimeLogFilename; // If a log file for JIT time is desired, filename to write it to.
#endif
void BeginPhase(Phases phase); // Indicate the start of the given phase.
void EndPhase(Phases phase); // Indicate the end of the given phase.
#if MEASURE_CLRAPI_CALLS
// Thin wrappers that call into JitTimer (if present).
inline void CLRApiCallEnter(unsigned apix);
inline void CLRApiCallLeave(unsigned apix);
public:
inline void CLR_API_Enter(API_ICorJitInfo_Names ename);
inline void CLR_API_Leave(API_ICorJitInfo_Names ename);
private:
#endif
#if defined(DEBUG) || defined(INLINE_DATA)
// These variables are associated with maintaining SQM data about compile time.
unsigned __int64 m_compCyclesAtEndOfInlining; // The thread-virtualized cycle count at the end of the inlining phase
// in the current compilation.
unsigned __int64 m_compCycles; // Net cycle count for current compilation
DWORD m_compTickCountAtEndOfInlining; // The result of GetTickCount() (# ms since some epoch marker) at the end of
// the inlining phase in the current compilation.
#endif // defined(DEBUG) || defined(INLINE_DATA)
// Records the SQM-relevant (cycles and tick count). Should be called after inlining is complete.
// (We do this after inlining because this marks the last point at which the JIT is likely to cause
// type-loading and class initialization).
void RecordStateAtEndOfInlining();
// Assumes being called at the end of compilation. Update the SQM state.
void RecordStateAtEndOfCompilation();
public:
#if FUNC_INFO_LOGGING
static LPCWSTR compJitFuncInfoFilename; // If a log file for per-function information is required, this is the
// filename to write it to.
static FILE* compJitFuncInfoFile; // And this is the actual FILE* to write to.
#endif // FUNC_INFO_LOGGING
Compiler* prevCompiler; // Previous compiler on stack for TLS Compiler* linked list for reentrant compilers.
#if MEASURE_NOWAY
void RecordNowayAssert(const char* filename, unsigned line, const char* condStr);
#endif // MEASURE_NOWAY
#ifndef FEATURE_TRACELOGGING
// Should we actually fire the noway assert body and the exception handler?
bool compShouldThrowOnNoway();
#else // FEATURE_TRACELOGGING
// Should we actually fire the noway assert body and the exception handler?
bool compShouldThrowOnNoway(const char* filename, unsigned line);
// Telemetry instance to use per method compilation.
JitTelemetry compJitTelemetry;
// Get common parameters that have to be logged with most telemetry data.
void compGetTelemetryDefaults(const char** assemblyName,
const char** scopeName,
const char** methodName,
unsigned* methodHash);
#endif // !FEATURE_TRACELOGGING
#ifdef DEBUG
private:
NodeToTestDataMap* m_nodeTestData;
static const unsigned FIRST_LOOP_HOIST_CSE_CLASS = 1000;
unsigned m_loopHoistCSEClass; // LoopHoist test annotations turn into CSE requirements; we
// label them with CSE Class #'s starting at FIRST_LOOP_HOIST_CSE_CLASS.
// Current kept in this.
public:
NodeToTestDataMap* GetNodeTestData()
{
Compiler* compRoot = impInlineRoot();
if (compRoot->m_nodeTestData == nullptr)
{
compRoot->m_nodeTestData = new (getAllocatorDebugOnly()) NodeToTestDataMap(getAllocatorDebugOnly());
}
return compRoot->m_nodeTestData;
}
typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, int> NodeToIntMap;
// Returns the set (i.e., the domain of the result map) of nodes that are keys in m_nodeTestData, and
// currently occur in the AST graph.
NodeToIntMap* FindReachableNodesInNodeTestData();
// Node "from" is being eliminated, and being replaced by node "to". If "from" had any associated
// test data, associate that data with "to".
void TransferTestDataToNode(GenTree* from, GenTree* to);
// These are the methods that test that the various conditions implied by the
// test attributes are satisfied.
void JitTestCheckSSA(); // SSA builder tests.
void JitTestCheckVN(); // Value numbering tests.
#endif // DEBUG
// The "FieldSeqStore", for canonicalizing field sequences. See the definition of FieldSeqStore for
// operations.
FieldSeqStore* m_fieldSeqStore;
FieldSeqStore* GetFieldSeqStore()
{
Compiler* compRoot = impInlineRoot();
if (compRoot->m_fieldSeqStore == nullptr)
{
// Create a CompAllocator that labels sub-structure with CMK_FieldSeqStore, and use that for allocation.
CompAllocator ialloc(getAllocator(CMK_FieldSeqStore));
compRoot->m_fieldSeqStore = new (ialloc) FieldSeqStore(ialloc);
}
return compRoot->m_fieldSeqStore;
}
typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, FieldSeqNode*> NodeToFieldSeqMap;
// Some nodes of "TYP_BYREF" or "TYP_I_IMPL" actually represent the address of a field within a struct, but since
// the offset of the field is zero, there's no "GT_ADD" node. We normally attach a field sequence to the constant
// that is added, but what do we do when that constant is zero, and is thus not present? We use this mechanism to
// attach the field sequence directly to the address node.
NodeToFieldSeqMap* m_zeroOffsetFieldMap;
NodeToFieldSeqMap* GetZeroOffsetFieldMap()
{
// Don't need to worry about inlining here
if (m_zeroOffsetFieldMap == nullptr)
{
// Create a CompAllocator that labels sub-structure with CMK_ZeroOffsetFieldMap, and use that for
// allocation.
CompAllocator ialloc(getAllocator(CMK_ZeroOffsetFieldMap));
m_zeroOffsetFieldMap = new (ialloc) NodeToFieldSeqMap(ialloc);
}
return m_zeroOffsetFieldMap;
}
// Requires that "op1" is a node of type "TYP_BYREF" or "TYP_I_IMPL". We are dereferencing this with the fields in
// "fieldSeq", whose offsets are required all to be zero. Ensures that any field sequence annotation currently on
// "op1" or its components is augmented by appending "fieldSeq". In practice, if "op1" is a GT_LCL_FLD, it has
// a field sequence as a member; otherwise, it may be the addition of an a byref and a constant, where the const
// has a field sequence -- in this case "fieldSeq" is appended to that of the constant; otherwise, we
// record the the field sequence using the ZeroOffsetFieldMap described above.
//
// One exception above is that "op1" is a node of type "TYP_REF" where "op1" is a GT_LCL_VAR.
// This happens when System.Object vtable pointer is a regular field at offset 0 in System.Private.CoreLib in
// CoreRT. Such case is handled same as the default case.
void fgAddFieldSeqForZeroOffset(GenTree* op1, FieldSeqNode* fieldSeq);
NodeToUnsignedMap* m_memorySsaMap[MemoryKindCount];
// In some cases, we want to assign intermediate SSA #'s to memory states, and know what nodes create those memory
// states. (We do this for try blocks, where, if the try block doesn't do a call that loses track of the memory
// state, all the possible memory states are possible initial states of the corresponding catch block(s).)
NodeToUnsignedMap* GetMemorySsaMap(MemoryKind memoryKind)
{
if (memoryKind == GcHeap && byrefStatesMatchGcHeapStates)
{
// Use the same map for GCHeap and ByrefExposed when their states match.
memoryKind = ByrefExposed;
}
assert(memoryKind < MemoryKindCount);
Compiler* compRoot = impInlineRoot();
if (compRoot->m_memorySsaMap[memoryKind] == nullptr)
{
// Create a CompAllocator that labels sub-structure with CMK_MemorySsaMap, and use that for allocation.
CompAllocator ialloc(getAllocator(CMK_MemorySsaMap));
compRoot->m_memorySsaMap[memoryKind] = new (ialloc) NodeToUnsignedMap(ialloc);
}
return compRoot->m_memorySsaMap[memoryKind];
}
// The Refany type is the only struct type whose structure is implicitly assumed by IL. We need its fields.
CORINFO_CLASS_HANDLE m_refAnyClass;
CORINFO_FIELD_HANDLE GetRefanyDataField()
{
if (m_refAnyClass == nullptr)
{
m_refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF);
}
return info.compCompHnd->getFieldInClass(m_refAnyClass, 0);
}
CORINFO_FIELD_HANDLE GetRefanyTypeField()
{
if (m_refAnyClass == nullptr)
{
m_refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF);
}
return info.compCompHnd->getFieldInClass(m_refAnyClass, 1);
}
#if VARSET_COUNTOPS
static BitSetSupport::BitSetOpCounter m_varsetOpCounter;
#endif
#if ALLVARSET_COUNTOPS
static BitSetSupport::BitSetOpCounter m_allvarsetOpCounter;
#endif
static HelperCallProperties s_helperCallProperties;
#ifdef UNIX_AMD64_ABI
static var_types GetTypeFromClassificationAndSizes(SystemVClassificationType classType, int size);
static var_types GetEightByteType(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR& structDesc,
unsigned slotNum);
static void GetStructTypeOffset(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR& structDesc,
var_types* type0,
var_types* type1,
unsigned __int8* offset0,
unsigned __int8* offset1);
void GetStructTypeOffset(CORINFO_CLASS_HANDLE typeHnd,
var_types* type0,
var_types* type1,
unsigned __int8* offset0,
unsigned __int8* offset1);
#endif // defined(UNIX_AMD64_ABI)
void fgMorphMultiregStructArgs(GenTreeCall* call);
GenTree* fgMorphMultiregStructArg(GenTree* arg, fgArgTabEntry* fgEntryPtr);
bool killGCRefs(GenTree* tree);
}; // end of class Compiler
//---------------------------------------------------------------------------------------------------------------------
// GenTreeVisitor: a flexible tree walker implemented using the curiously-recurring-template pattern.
//
// This class implements a configurable walker for IR trees. There are five configuration options (defaults values are
// shown in parentheses):
//
// - ComputeStack (false): when true, the walker will push each node onto the `m_ancestors` stack. "Ancestors" is a bit
// of a misnomer, as the first entry will always be the current node.
//
// - DoPreOrder (false): when true, the walker will invoke `TVisitor::PreOrderVisit` with the current node as an
// argument before visiting the node's operands.
//
// - DoPostOrder (false): when true, the walker will invoke `TVisitor::PostOrderVisit` with the current node as an
// argument after visiting the node's operands.
//
// - DoLclVarsOnly (false): when true, the walker will only invoke `TVisitor::PreOrderVisit` for lclVar nodes.
// `DoPreOrder` must be true if this option is true.
//
// - UseExecutionOrder (false): when true, then walker will visit a node's operands in execution order (e.g. if a
// binary operator has the `GTF_REVERSE_OPS` flag set, the second operand will be
// visited before the first).
//
// At least one of `DoPreOrder` and `DoPostOrder` must be specified.
//
// A simple pre-order visitor might look something like the following:
//
// class CountingVisitor final : public GenTreeVisitor<CountingVisitor>
// {
// public:
// enum
// {
// DoPreOrder = true
// };
//
// unsigned m_count;
//
// CountingVisitor(Compiler* compiler)
// : GenTreeVisitor<CountingVisitor>(compiler), m_count(0)
// {
// }
//
// Compiler::fgWalkResult PreOrderVisit(GenTree* node)
// {
// m_count++;
// }
// };
//
// This visitor would then be used like so:
//
// CountingVisitor countingVisitor(compiler);
// countingVisitor.WalkTree(root);
//
template <typename TVisitor>
class GenTreeVisitor
{
protected:
typedef Compiler::fgWalkResult fgWalkResult;
enum
{
ComputeStack = false,
DoPreOrder = false,
DoPostOrder = false,
DoLclVarsOnly = false,
UseExecutionOrder = false,
};
Compiler* m_compiler;
ArrayStack<GenTree*> m_ancestors;
GenTreeVisitor(Compiler* compiler) : m_compiler(compiler), m_ancestors(compiler->getAllocator(CMK_ArrayStack))
{
assert(compiler != nullptr);
static_assert_no_msg(TVisitor::DoPreOrder || TVisitor::DoPostOrder);
static_assert_no_msg(!TVisitor::DoLclVarsOnly || TVisitor::DoPreOrder);
}
fgWalkResult PreOrderVisit(GenTree** use, GenTree* user)
{
return fgWalkResult::WALK_CONTINUE;
}
fgWalkResult PostOrderVisit(GenTree** use, GenTree* user)
{
return fgWalkResult::WALK_CONTINUE;
}
public:
fgWalkResult WalkTree(GenTree** use, GenTree* user)
{
assert(use != nullptr);
GenTree* node = *use;
if (TVisitor::ComputeStack)
{
m_ancestors.Push(node);
}
fgWalkResult result = fgWalkResult::WALK_CONTINUE;
if (TVisitor::DoPreOrder && !TVisitor::DoLclVarsOnly)
{
result = reinterpret_cast<TVisitor*>(this)->PreOrderVisit(use, user);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
node = *use;
if ((node == nullptr) || (result == fgWalkResult::WALK_SKIP_SUBTREES))
{
goto DONE;
}
}
switch (node->OperGet())
{
// Leaf lclVars
case GT_LCL_VAR:
case GT_LCL_FLD:
case GT_LCL_VAR_ADDR:
case GT_LCL_FLD_ADDR:
if (TVisitor::DoLclVarsOnly)
{
result = reinterpret_cast<TVisitor*>(this)->PreOrderVisit(use, user);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
FALLTHROUGH;
// Leaf nodes
case GT_CATCH_ARG:
case GT_LABEL:
case GT_FTN_ADDR:
case GT_RET_EXPR:
case GT_CNS_INT:
case GT_CNS_LNG:
case GT_CNS_DBL:
case GT_CNS_STR:
case GT_MEMORYBARRIER:
case GT_JMP:
case GT_JCC:
case GT_SETCC:
case GT_NO_OP:
case GT_START_NONGC:
case GT_START_PREEMPTGC:
case GT_PROF_HOOK:
#if !defined(FEATURE_EH_FUNCLETS)
case GT_END_LFIN:
#endif // !FEATURE_EH_FUNCLETS
case GT_PHI_ARG:
case GT_JMPTABLE:
case GT_CLS_VAR:
case GT_CLS_VAR_ADDR:
case GT_ARGPLACE:
case GT_PHYSREG:
case GT_EMITNOP:
case GT_PINVOKE_PROLOG:
case GT_PINVOKE_EPILOG:
case GT_IL_OFFSET:
break;
// Lclvar unary operators
case GT_STORE_LCL_VAR:
case GT_STORE_LCL_FLD:
if (TVisitor::DoLclVarsOnly)
{
result = reinterpret_cast<TVisitor*>(this)->PreOrderVisit(use, user);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
FALLTHROUGH;
// Standard unary operators
case GT_NOT:
case GT_NEG:
case GT_BSWAP:
case GT_BSWAP16:
case GT_COPY:
case GT_RELOAD:
case GT_ARR_LENGTH:
case GT_CAST:
case GT_BITCAST:
case GT_CKFINITE:
case GT_LCLHEAP:
case GT_ADDR:
case GT_IND:
case GT_OBJ:
case GT_BLK:
case GT_BOX:
case GT_ALLOCOBJ:
case GT_INIT_VAL:
case GT_JTRUE:
case GT_SWITCH:
case GT_NULLCHECK:
case GT_PUTARG_REG:
case GT_PUTARG_STK:
case GT_PUTARG_TYPE:
case GT_RETURNTRAP:
case GT_NOP:
case GT_FIELD:
case GT_RETURN:
case GT_RETFILT:
case GT_RUNTIMELOOKUP:
case GT_ARR_ADDR:
case GT_KEEPALIVE:
case GT_INC_SATURATE:
{
GenTreeUnOp* const unOp = node->AsUnOp();
if (unOp->gtOp1 != nullptr)
{
result = WalkTree(&unOp->gtOp1, unOp);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
break;
}
// Special nodes
case GT_PHI:
for (GenTreePhi::Use& use : node->AsPhi()->Uses())
{
result = WalkTree(&use.NodeRef(), node);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
break;
case GT_FIELD_LIST:
for (GenTreeFieldList::Use& use : node->AsFieldList()->Uses())
{
result = WalkTree(&use.NodeRef(), node);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
break;
case GT_CMPXCHG:
{
GenTreeCmpXchg* const cmpXchg = node->AsCmpXchg();
result = WalkTree(&cmpXchg->gtOpLocation, cmpXchg);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
result = WalkTree(&cmpXchg->gtOpValue, cmpXchg);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
result = WalkTree(&cmpXchg->gtOpComparand, cmpXchg);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
break;
}
case GT_ARR_ELEM:
{
GenTreeArrElem* const arrElem = node->AsArrElem();
result = WalkTree(&arrElem->gtArrObj, arrElem);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
const unsigned rank = arrElem->gtArrRank;
for (unsigned dim = 0; dim < rank; dim++)
{
result = WalkTree(&arrElem->gtArrInds[dim], arrElem);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
break;
}
case GT_ARR_OFFSET:
{
GenTreeArrOffs* const arrOffs = node->AsArrOffs();
result = WalkTree(&arrOffs->gtOffset, arrOffs);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
result = WalkTree(&arrOffs->gtIndex, arrOffs);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
result = WalkTree(&arrOffs->gtArrObj, arrOffs);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
break;
}
case GT_STORE_DYN_BLK:
{
GenTreeStoreDynBlk* const dynBlock = node->AsStoreDynBlk();
GenTree** op1Use = &dynBlock->gtOp1;
GenTree** op2Use = &dynBlock->gtOp2;
GenTree** op3Use = &dynBlock->gtDynamicSize;
result = WalkTree(op1Use, dynBlock);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
result = WalkTree(op2Use, dynBlock);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
result = WalkTree(op3Use, dynBlock);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
break;
}
case GT_CALL:
{
GenTreeCall* const call = node->AsCall();
if (call->gtCallThisArg != nullptr)
{
result = WalkTree(&call->gtCallThisArg->NodeRef(), call);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
for (GenTreeCall::Use& use : call->Args())
{
result = WalkTree(&use.NodeRef(), call);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
for (GenTreeCall::Use& use : call->LateArgs())
{
result = WalkTree(&use.NodeRef(), call);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
if (call->gtCallType == CT_INDIRECT)
{
if (call->gtCallCookie != nullptr)
{
result = WalkTree(&call->gtCallCookie, call);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
result = WalkTree(&call->gtCallAddr, call);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
if (call->gtControlExpr != nullptr)
{
result = WalkTree(&call->gtControlExpr, call);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
break;
}
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
#if defined(FEATURE_SIMD)
case GT_SIMD:
#endif
#if defined(FEATURE_HW_INTRINSICS)
case GT_HWINTRINSIC:
#endif
if (TVisitor::UseExecutionOrder && node->IsReverseOp())
{
assert(node->AsMultiOp()->GetOperandCount() == 2);
result = WalkTree(&node->AsMultiOp()->Op(2), node);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
result = WalkTree(&node->AsMultiOp()->Op(1), node);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
else
{
for (GenTree** use : node->AsMultiOp()->UseEdges())
{
result = WalkTree(use, node);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
}
break;
#endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
// Binary nodes
default:
{
assert(node->OperIsBinary());
GenTreeOp* const op = node->AsOp();
GenTree** op1Use = &op->gtOp1;
GenTree** op2Use = &op->gtOp2;
if (TVisitor::UseExecutionOrder && node->IsReverseOp())
{
std::swap(op1Use, op2Use);
}
if (*op1Use != nullptr)
{
result = WalkTree(op1Use, op);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
if (*op2Use != nullptr)
{
result = WalkTree(op2Use, op);
if (result == fgWalkResult::WALK_ABORT)
{
return result;
}
}
break;
}
}
DONE:
// Finally, visit the current node
if (TVisitor::DoPostOrder)
{
result = reinterpret_cast<TVisitor*>(this)->PostOrderVisit(use, user);
}
if (TVisitor::ComputeStack)
{
m_ancestors.Pop();
}
return result;
}
};
template <bool computeStack, bool doPreOrder, bool doPostOrder, bool doLclVarsOnly, bool useExecutionOrder>
class GenericTreeWalker final
: public GenTreeVisitor<GenericTreeWalker<computeStack, doPreOrder, doPostOrder, doLclVarsOnly, useExecutionOrder>>
{
public:
enum
{
ComputeStack = computeStack,
DoPreOrder = doPreOrder,
DoPostOrder = doPostOrder,
DoLclVarsOnly = doLclVarsOnly,
UseExecutionOrder = useExecutionOrder,
};
private:
Compiler::fgWalkData* m_walkData;
public:
GenericTreeWalker(Compiler::fgWalkData* walkData)
: GenTreeVisitor<GenericTreeWalker<computeStack, doPreOrder, doPostOrder, doLclVarsOnly, useExecutionOrder>>(
walkData->compiler)
, m_walkData(walkData)
{
assert(walkData != nullptr);
if (computeStack)
{
walkData->parentStack = &this->m_ancestors;
}
}
Compiler::fgWalkResult PreOrderVisit(GenTree** use, GenTree* user)
{
m_walkData->parent = user;
return m_walkData->wtprVisitorFn(use, m_walkData);
}
Compiler::fgWalkResult PostOrderVisit(GenTree** use, GenTree* user)
{
m_walkData->parent = user;
return m_walkData->wtpoVisitorFn(use, m_walkData);
}
};
// A dominator tree visitor implemented using the curiously-recurring-template pattern, similar to GenTreeVisitor.
template <typename TVisitor>
class DomTreeVisitor
{
protected:
Compiler* const m_compiler;
DomTreeNode* const m_domTree;
DomTreeVisitor(Compiler* compiler, DomTreeNode* domTree) : m_compiler(compiler), m_domTree(domTree)
{
}
void Begin()
{
}
void PreOrderVisit(BasicBlock* block)
{
}
void PostOrderVisit(BasicBlock* block)
{
}
void End()
{
}
public:
//------------------------------------------------------------------------
// WalkTree: Walk the dominator tree, starting from fgFirstBB.
//
// Notes:
// This performs a non-recursive, non-allocating walk of the tree by using
// DomTreeNode's firstChild and nextSibling links to locate the children of
// a node and BasicBlock's bbIDom parent link to go back up the tree when
// no more children are left.
//
// Forests are also supported, provided that all the roots are chained via
// DomTreeNode::nextSibling to fgFirstBB.
//
void WalkTree()
{
static_cast<TVisitor*>(this)->Begin();
for (BasicBlock *next, *block = m_compiler->fgFirstBB; block != nullptr; block = next)
{
static_cast<TVisitor*>(this)->PreOrderVisit(block);
next = m_domTree[block->bbNum].firstChild;
if (next != nullptr)
{
assert(next->bbIDom == block);
continue;
}
do
{
static_cast<TVisitor*>(this)->PostOrderVisit(block);
next = m_domTree[block->bbNum].nextSibling;
if (next != nullptr)
{
assert(next->bbIDom == block->bbIDom);
break;
}
block = block->bbIDom;
} while (block != nullptr);
}
static_cast<TVisitor*>(this)->End();
}
};
// EHClauses: adapter class for forward iteration of the exception handling table using range-based `for`, e.g.:
// for (EHblkDsc* const ehDsc : EHClauses(compiler))
//
class EHClauses
{
EHblkDsc* m_begin;
EHblkDsc* m_end;
// Forward iterator for the exception handling table entries. Iteration is in table order.
//
class iterator
{
EHblkDsc* m_ehDsc;
public:
iterator(EHblkDsc* ehDsc) : m_ehDsc(ehDsc)
{
}
EHblkDsc* operator*() const
{
return m_ehDsc;
}
iterator& operator++()
{
++m_ehDsc;
return *this;
}
bool operator!=(const iterator& i) const
{
return m_ehDsc != i.m_ehDsc;
}
};
public:
EHClauses(Compiler* comp) : m_begin(comp->compHndBBtab), m_end(comp->compHndBBtab + comp->compHndBBtabCount)
{
assert((m_begin != nullptr) || (m_begin == m_end));
}
iterator begin() const
{
return iterator(m_begin);
}
iterator end() const
{
return iterator(m_end);
}
};
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Miscellaneous Compiler stuff XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
// Values used to mark the types a stack slot is used for
const unsigned TYPE_REF_INT = 0x01; // slot used as a 32-bit int
const unsigned TYPE_REF_LNG = 0x02; // slot used as a 64-bit long
const unsigned TYPE_REF_FLT = 0x04; // slot used as a 32-bit float
const unsigned TYPE_REF_DBL = 0x08; // slot used as a 64-bit float
const unsigned TYPE_REF_PTR = 0x10; // slot used as a 32-bit pointer
const unsigned TYPE_REF_BYR = 0x20; // slot used as a byref pointer
const unsigned TYPE_REF_STC = 0x40; // slot used as a struct
const unsigned TYPE_REF_TYPEMASK = 0x7F; // bits that represent the type
// const unsigned TYPE_REF_ADDR_TAKEN = 0x80; // slots address was taken
/*****************************************************************************
*
* Variables to keep track of total code amounts.
*/
#if DISPLAY_SIZES
extern size_t grossVMsize;
extern size_t grossNCsize;
extern size_t totalNCsize;
extern unsigned genMethodICnt;
extern unsigned genMethodNCnt;
extern size_t gcHeaderISize;
extern size_t gcPtrMapISize;
extern size_t gcHeaderNSize;
extern size_t gcPtrMapNSize;
#endif // DISPLAY_SIZES
/*****************************************************************************
*
* Variables to keep track of basic block counts (more data on 1 BB methods)
*/
#if COUNT_BASIC_BLOCKS
extern Histogram bbCntTable;
extern Histogram bbOneBBSizeTable;
#endif
/*****************************************************************************
*
* Used by optFindNaturalLoops to gather statistical information such as
* - total number of natural loops
* - number of loops with 1, 2, ... exit conditions
* - number of loops that have an iterator (for like)
* - number of loops that have a constant iterator
*/
#if COUNT_LOOPS
extern unsigned totalLoopMethods; // counts the total number of methods that have natural loops
extern unsigned maxLoopsPerMethod; // counts the maximum number of loops a method has
extern unsigned totalLoopOverflows; // # of methods that identified more loops than we can represent
extern unsigned totalLoopCount; // counts the total number of natural loops
extern unsigned totalUnnatLoopCount; // counts the total number of (not-necessarily natural) loops
extern unsigned totalUnnatLoopOverflows; // # of methods that identified more unnatural loops than we can represent
extern unsigned iterLoopCount; // counts the # of loops with an iterator (for like)
extern unsigned constIterLoopCount; // counts the # of loops with a constant iterator (for like)
extern bool hasMethodLoops; // flag to keep track if we already counted a method as having loops
extern unsigned loopsThisMethod; // counts the number of loops in the current method
extern bool loopOverflowThisMethod; // True if we exceeded the max # of loops in the method.
extern Histogram loopCountTable; // Histogram of loop counts
extern Histogram loopExitCountTable; // Histogram of loop exit counts
#endif // COUNT_LOOPS
/*****************************************************************************
* variables to keep track of how many iterations we go in a dataflow pass
*/
#if DATAFLOW_ITER
extern unsigned CSEiterCount; // counts the # of iteration for the CSE dataflow
extern unsigned CFiterCount; // counts the # of iteration for the Const Folding dataflow
#endif // DATAFLOW_ITER
#if MEASURE_BLOCK_SIZE
extern size_t genFlowNodeSize;
extern size_t genFlowNodeCnt;
#endif // MEASURE_BLOCK_SIZE
#if MEASURE_NODE_SIZE
struct NodeSizeStats
{
void Init()
{
genTreeNodeCnt = 0;
genTreeNodeSize = 0;
genTreeNodeActualSize = 0;
}
// Count of tree nodes allocated.
unsigned __int64 genTreeNodeCnt;
// The size we allocate.
unsigned __int64 genTreeNodeSize;
// The actual size of the node. Note that the actual size will likely be smaller
// than the allocated size, but we sometimes use SetOper()/ChangeOper() to change
// a smaller node to a larger one. TODO-Cleanup: add stats on
// SetOper()/ChangeOper() usage to quantify this.
unsigned __int64 genTreeNodeActualSize;
};
extern NodeSizeStats genNodeSizeStats; // Total node size stats
extern NodeSizeStats genNodeSizeStatsPerFunc; // Per-function node size stats
extern Histogram genTreeNcntHist;
extern Histogram genTreeNsizHist;
#endif // MEASURE_NODE_SIZE
/*****************************************************************************
* Count fatal errors (including noway_asserts).
*/
#if MEASURE_FATAL
extern unsigned fatal_badCode;
extern unsigned fatal_noWay;
extern unsigned fatal_implLimitation;
extern unsigned fatal_NOMEM;
extern unsigned fatal_noWayAssertBody;
#ifdef DEBUG
extern unsigned fatal_noWayAssertBodyArgs;
#endif // DEBUG
extern unsigned fatal_NYI;
#endif // MEASURE_FATAL
/*****************************************************************************
* Codegen
*/
#ifdef TARGET_XARCH
const instruction INS_SHIFT_LEFT_LOGICAL = INS_shl;
const instruction INS_SHIFT_RIGHT_LOGICAL = INS_shr;
const instruction INS_SHIFT_RIGHT_ARITHM = INS_sar;
const instruction INS_AND = INS_and;
const instruction INS_OR = INS_or;
const instruction INS_XOR = INS_xor;
const instruction INS_NEG = INS_neg;
const instruction INS_TEST = INS_test;
const instruction INS_MUL = INS_imul;
const instruction INS_SIGNED_DIVIDE = INS_idiv;
const instruction INS_UNSIGNED_DIVIDE = INS_div;
const instruction INS_BREAKPOINT = INS_int3;
const instruction INS_ADDC = INS_adc;
const instruction INS_SUBC = INS_sbb;
const instruction INS_NOT = INS_not;
#endif // TARGET_XARCH
#ifdef TARGET_ARM
const instruction INS_SHIFT_LEFT_LOGICAL = INS_lsl;
const instruction INS_SHIFT_RIGHT_LOGICAL = INS_lsr;
const instruction INS_SHIFT_RIGHT_ARITHM = INS_asr;
const instruction INS_AND = INS_and;
const instruction INS_OR = INS_orr;
const instruction INS_XOR = INS_eor;
const instruction INS_NEG = INS_rsb;
const instruction INS_TEST = INS_tst;
const instruction INS_MUL = INS_mul;
const instruction INS_MULADD = INS_mla;
const instruction INS_SIGNED_DIVIDE = INS_sdiv;
const instruction INS_UNSIGNED_DIVIDE = INS_udiv;
const instruction INS_BREAKPOINT = INS_bkpt;
const instruction INS_ADDC = INS_adc;
const instruction INS_SUBC = INS_sbc;
const instruction INS_NOT = INS_mvn;
const instruction INS_ABS = INS_vabs;
const instruction INS_SQRT = INS_vsqrt;
#endif // TARGET_ARM
#ifdef TARGET_ARM64
const instruction INS_MULADD = INS_madd;
inline const instruction INS_BREAKPOINT_osHelper()
{
// GDB needs the encoding of brk #0
// Windbg needs the encoding of brk #F000
return TargetOS::IsUnix ? INS_brk_unix : INS_brk_windows;
}
#define INS_BREAKPOINT INS_BREAKPOINT_osHelper()
const instruction INS_ABS = INS_fabs;
const instruction INS_SQRT = INS_fsqrt;
#endif // TARGET_ARM64
/*****************************************************************************/
extern const BYTE genTypeSizes[];
extern const BYTE genTypeAlignments[];
extern const BYTE genTypeStSzs[];
extern const BYTE genActualTypes[];
/*****************************************************************************/
#ifdef DEBUG
void dumpConvertedVarSet(Compiler* comp, VARSET_VALARG_TP vars);
#endif // DEBUG
#include "compiler.hpp" // All the shared inline functions
/*****************************************************************************/
#endif //_COMPILER_H_
/*****************************************************************************/
| 1 |
dotnet/runtime | 65,922 | Rely on PGO for isinst/castclass | Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | EgorBo | "2022-02-26T22:13:46Z" | "2022-04-01T21:25:56Z" | 063d08018602291f8dbd9d9b6055b8e1af874cb8 | f249a3d58b0b2bb55adca1a56c5a39cf446437b7 | Rely on PGO for isinst/castclass. Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | ./src/coreclr/jit/importer.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Importer XX
XX XX
XX Imports the given method and converts it to semantic trees XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
#endif
#include "corexcep.h"
#define Verify(cond, msg) \
do \
{ \
if (!(cond)) \
{ \
verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \
} \
} while (0)
#define VerifyOrReturn(cond, msg) \
do \
{ \
if (!(cond)) \
{ \
verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \
return; \
} \
} while (0)
#define VerifyOrReturnSpeculative(cond, msg, speculative) \
do \
{ \
if (speculative) \
{ \
if (!(cond)) \
{ \
return false; \
} \
} \
else \
{ \
if (!(cond)) \
{ \
verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \
return false; \
} \
} \
} while (0)
/*****************************************************************************/
void Compiler::impInit()
{
impStmtList = impLastStmt = nullptr;
#ifdef DEBUG
impInlinedCodeSize = 0;
#endif // DEBUG
}
/*****************************************************************************
*
* Pushes the given tree on the stack.
*/
void Compiler::impPushOnStack(GenTree* tree, typeInfo ti)
{
/* Check for overflow. If inlining, we may be using a bigger stack */
if ((verCurrentState.esStackDepth >= info.compMaxStack) &&
(verCurrentState.esStackDepth >= impStkSize || ((compCurBB->bbFlags & BBF_IMPORTED) == 0)))
{
BADCODE("stack overflow");
}
#ifdef DEBUG
// If we are pushing a struct, make certain we know the precise type!
if (tree->TypeGet() == TYP_STRUCT)
{
assert(ti.IsType(TI_STRUCT));
CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandle();
assert(clsHnd != NO_CLASS_HANDLE);
}
#endif // DEBUG
verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = ti;
verCurrentState.esStack[verCurrentState.esStackDepth++].val = tree;
if ((tree->gtType == TYP_LONG) && (compLongUsed == false))
{
compLongUsed = true;
}
else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
{
compFloatingPointUsed = true;
}
}
inline void Compiler::impPushNullObjRefOnStack()
{
impPushOnStack(gtNewIconNode(0, TYP_REF), typeInfo(TI_NULL));
}
// This method gets called when we run into unverifiable code
// (and we are verifying the method)
inline void Compiler::verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* msg) DEBUGARG(const char* file)
DEBUGARG(unsigned line))
{
#ifdef DEBUG
const char* tail = strrchr(file, '\\');
if (tail)
{
file = tail + 1;
}
if (JitConfig.JitBreakOnUnsafeCode())
{
assert(!"Unsafe code detected");
}
#endif
JITLOG((LL_INFO10000, "Detected unsafe code: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
msg, info.compFullName, impCurOpcName, impCurOpcOffs));
if (compIsForImportOnly())
{
JITLOG((LL_ERROR, "Verification failure: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
msg, info.compFullName, impCurOpcName, impCurOpcOffs));
verRaiseVerifyException(INDEBUG(msg) DEBUGARG(file) DEBUGARG(line));
}
}
inline void DECLSPEC_NORETURN Compiler::verRaiseVerifyException(INDEBUG(const char* msg) DEBUGARG(const char* file)
DEBUGARG(unsigned line))
{
JITLOG((LL_ERROR, "Verification failure: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
msg, info.compFullName, impCurOpcName, impCurOpcOffs));
#ifdef DEBUG
// BreakIfDebuggerPresent();
if (getBreakOnBadCode())
{
assert(!"Typechecking error");
}
#endif
RaiseException(SEH_VERIFICATION_EXCEPTION, EXCEPTION_NONCONTINUABLE, 0, nullptr);
UNREACHABLE();
}
// helper function that will tell us if the IL instruction at the addr passed
// by param consumes an address at the top of the stack. We use it to save
// us lvAddrTaken
bool Compiler::impILConsumesAddr(const BYTE* codeAddr)
{
assert(!compIsForInlining());
OPCODE opcode;
opcode = (OPCODE)getU1LittleEndian(codeAddr);
switch (opcode)
{
// case CEE_LDFLDA: We're taking this one out as if you have a sequence
// like
//
// ldloca.0
// ldflda whatever
//
// of a primitivelike struct, you end up after morphing with addr of a local
// that's not marked as addrtaken, which is wrong. Also ldflda is usually used
// for structs that contain other structs, which isnt a case we handle very
// well now for other reasons.
case CEE_LDFLD:
{
// We won't collapse small fields. This is probably not the right place to have this
// check, but we're only using the function for this purpose, and is easy to factor
// out if we need to do so.
CORINFO_RESOLVED_TOKEN resolvedToken;
impResolveToken(codeAddr + sizeof(__int8), &resolvedToken, CORINFO_TOKENKIND_Field);
var_types lclTyp = JITtype2varType(info.compCompHnd->getFieldType(resolvedToken.hField));
// Preserve 'small' int types
if (!varTypeIsSmall(lclTyp))
{
lclTyp = genActualType(lclTyp);
}
if (varTypeIsSmall(lclTyp))
{
return false;
}
return true;
}
default:
break;
}
return false;
}
void Compiler::impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind)
{
pResolvedToken->tokenContext = impTokenLookupContextHandle;
pResolvedToken->tokenScope = info.compScopeHnd;
pResolvedToken->token = getU4LittleEndian(addr);
pResolvedToken->tokenType = kind;
info.compCompHnd->resolveToken(pResolvedToken);
}
/*****************************************************************************
*
* Pop one tree from the stack.
*/
StackEntry Compiler::impPopStack()
{
if (verCurrentState.esStackDepth == 0)
{
BADCODE("stack underflow");
}
return verCurrentState.esStack[--verCurrentState.esStackDepth];
}
/*****************************************************************************
*
* Peep at n'th (0-based) tree on the top of the stack.
*/
StackEntry& Compiler::impStackTop(unsigned n)
{
if (verCurrentState.esStackDepth <= n)
{
BADCODE("stack underflow");
}
return verCurrentState.esStack[verCurrentState.esStackDepth - n - 1];
}
unsigned Compiler::impStackHeight()
{
return verCurrentState.esStackDepth;
}
/*****************************************************************************
* Some of the trees are spilled specially. While unspilling them, or
* making a copy, these need to be handled specially. The function
* enumerates the operators possible after spilling.
*/
#ifdef DEBUG // only used in asserts
static bool impValidSpilledStackEntry(GenTree* tree)
{
if (tree->gtOper == GT_LCL_VAR)
{
return true;
}
if (tree->OperIsConst())
{
return true;
}
return false;
}
#endif
/*****************************************************************************
*
* The following logic is used to save/restore stack contents.
* If 'copy' is true, then we make a copy of the trees on the stack. These
* have to all be cloneable/spilled values.
*/
void Compiler::impSaveStackState(SavedStack* savePtr, bool copy)
{
savePtr->ssDepth = verCurrentState.esStackDepth;
if (verCurrentState.esStackDepth)
{
savePtr->ssTrees = new (this, CMK_ImpStack) StackEntry[verCurrentState.esStackDepth];
size_t saveSize = verCurrentState.esStackDepth * sizeof(*savePtr->ssTrees);
if (copy)
{
StackEntry* table = savePtr->ssTrees;
/* Make a fresh copy of all the stack entries */
for (unsigned level = 0; level < verCurrentState.esStackDepth; level++, table++)
{
table->seTypeInfo = verCurrentState.esStack[level].seTypeInfo;
GenTree* tree = verCurrentState.esStack[level].val;
assert(impValidSpilledStackEntry(tree));
switch (tree->gtOper)
{
case GT_CNS_INT:
case GT_CNS_LNG:
case GT_CNS_DBL:
case GT_CNS_STR:
case GT_LCL_VAR:
table->val = gtCloneExpr(tree);
break;
default:
assert(!"Bad oper - Not covered by impValidSpilledStackEntry()");
break;
}
}
}
else
{
memcpy(savePtr->ssTrees, verCurrentState.esStack, saveSize);
}
}
}
void Compiler::impRestoreStackState(SavedStack* savePtr)
{
verCurrentState.esStackDepth = savePtr->ssDepth;
if (verCurrentState.esStackDepth)
{
memcpy(verCurrentState.esStack, savePtr->ssTrees,
verCurrentState.esStackDepth * sizeof(*verCurrentState.esStack));
}
}
//------------------------------------------------------------------------
// impBeginTreeList: Get the tree list started for a new basic block.
//
inline void Compiler::impBeginTreeList()
{
assert(impStmtList == nullptr && impLastStmt == nullptr);
}
/*****************************************************************************
*
* Store the given start and end stmt in the given basic block. This is
* mostly called by impEndTreeList(BasicBlock *block). It is called
* directly only for handling CEE_LEAVEs out of finally-protected try's.
*/
inline void Compiler::impEndTreeList(BasicBlock* block, Statement* firstStmt, Statement* lastStmt)
{
/* Make the list circular, so that we can easily walk it backwards */
firstStmt->SetPrevStmt(lastStmt);
/* Store the tree list in the basic block */
block->bbStmtList = firstStmt;
/* The block should not already be marked as imported */
assert((block->bbFlags & BBF_IMPORTED) == 0);
block->bbFlags |= BBF_IMPORTED;
}
inline void Compiler::impEndTreeList(BasicBlock* block)
{
if (impStmtList == nullptr)
{
// The block should not already be marked as imported.
assert((block->bbFlags & BBF_IMPORTED) == 0);
// Empty block. Just mark it as imported.
block->bbFlags |= BBF_IMPORTED;
}
else
{
impEndTreeList(block, impStmtList, impLastStmt);
}
#ifdef DEBUG
if (impLastILoffsStmt != nullptr)
{
impLastILoffsStmt->SetLastILOffset(compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs);
impLastILoffsStmt = nullptr;
}
#endif
impStmtList = impLastStmt = nullptr;
}
/*****************************************************************************
*
* Check that storing the given tree doesnt mess up the semantic order. Note
* that this has only limited value as we can only check [0..chkLevel).
*/
inline void Compiler::impAppendStmtCheck(Statement* stmt, unsigned chkLevel)
{
#ifndef DEBUG
return;
#else
if (chkLevel == (unsigned)CHECK_SPILL_ALL)
{
chkLevel = verCurrentState.esStackDepth;
}
if (verCurrentState.esStackDepth == 0 || chkLevel == 0 || chkLevel == (unsigned)CHECK_SPILL_NONE)
{
return;
}
GenTree* tree = stmt->GetRootNode();
// Calls can only be appended if there are no GTF_GLOB_EFFECT on the stack
if (tree->gtFlags & GTF_CALL)
{
for (unsigned level = 0; level < chkLevel; level++)
{
assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_EFFECT) == 0);
}
}
if (tree->gtOper == GT_ASG)
{
// For an assignment to a local variable, all references of that
// variable have to be spilled. If it is aliased, all calls and
// indirect accesses have to be spilled
if (tree->AsOp()->gtOp1->gtOper == GT_LCL_VAR)
{
unsigned lclNum = tree->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum();
for (unsigned level = 0; level < chkLevel; level++)
{
assert(!gtHasRef(verCurrentState.esStack[level].val, lclNum));
assert(!lvaTable[lclNum].IsAddressExposed() ||
(verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT) == 0);
}
}
// If the access may be to global memory, all side effects have to be spilled.
else if (tree->AsOp()->gtOp1->gtFlags & GTF_GLOB_REF)
{
for (unsigned level = 0; level < chkLevel; level++)
{
assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_REF) == 0);
}
}
}
#endif
}
//------------------------------------------------------------------------
// impAppendStmt: Append the given statement to the current block's tree list.
//
//
// Arguments:
// stmt - The statement to add.
// chkLevel - [0..chkLevel) is the portion of the stack which we will check
// for interference with stmt and spill if needed.
// checkConsumedDebugInfo - Whether to check for consumption of impCurStmtDI. impCurStmtDI
// marks the debug info of the current boundary and is set when we
// start importing IL at that boundary. If this parameter is true,
// then the function checks if 'stmt' has been associated with the
// current boundary, and if so, clears it so that we do not attach
// it to more upcoming statements.
//
void Compiler::impAppendStmt(Statement* stmt, unsigned chkLevel, bool checkConsumedDebugInfo)
{
if (chkLevel == (unsigned)CHECK_SPILL_ALL)
{
chkLevel = verCurrentState.esStackDepth;
}
if ((chkLevel != 0) && (chkLevel != (unsigned)CHECK_SPILL_NONE))
{
assert(chkLevel <= verCurrentState.esStackDepth);
/* If the statement being appended has any side-effects, check the stack
to see if anything needs to be spilled to preserve correct ordering. */
GenTree* expr = stmt->GetRootNode();
GenTreeFlags flags = expr->gtFlags & GTF_GLOB_EFFECT;
// Assignment to (unaliased) locals don't count as a side-effect as
// we handle them specially using impSpillLclRefs(). Temp locals should
// be fine too.
if ((expr->gtOper == GT_ASG) && (expr->AsOp()->gtOp1->gtOper == GT_LCL_VAR) &&
((expr->AsOp()->gtOp1->gtFlags & GTF_GLOB_REF) == 0) && !gtHasLocalsWithAddrOp(expr->AsOp()->gtOp2))
{
GenTreeFlags op2Flags = expr->AsOp()->gtOp2->gtFlags & GTF_GLOB_EFFECT;
assert(flags == (op2Flags | GTF_ASG));
flags = op2Flags;
}
if (flags != 0)
{
bool spillGlobEffects = false;
if ((flags & GTF_CALL) != 0)
{
// If there is a call, we have to spill global refs
spillGlobEffects = true;
}
else if (!expr->OperIs(GT_ASG))
{
if ((flags & GTF_ASG) != 0)
{
// The expression is not an assignment node but it has an assignment side effect, it
// must be an atomic op, HW intrinsic or some other kind of node that stores to memory.
// Since we don't know what it assigns to, we need to spill global refs.
spillGlobEffects = true;
}
}
else
{
GenTree* lhs = expr->gtGetOp1();
GenTree* rhs = expr->gtGetOp2();
if (((rhs->gtFlags | lhs->gtFlags) & GTF_ASG) != 0)
{
// Either side of the assignment node has an assignment side effect.
// Since we don't know what it assigns to, we need to spill global refs.
spillGlobEffects = true;
}
else if ((lhs->gtFlags & GTF_GLOB_REF) != 0)
{
spillGlobEffects = true;
}
}
impSpillSideEffects(spillGlobEffects, chkLevel DEBUGARG("impAppendStmt"));
}
else
{
impSpillSpecialSideEff();
}
}
impAppendStmtCheck(stmt, chkLevel);
impAppendStmt(stmt);
#ifdef FEATURE_SIMD
impMarkContiguousSIMDFieldAssignments(stmt);
#endif
// Once we set the current offset as debug info in an appended tree, we are
// ready to report the following offsets. Note that we need to compare
// offsets here instead of debug info, since we do not set the "is call"
// bit in impCurStmtDI.
if (checkConsumedDebugInfo &&
(impLastStmt->GetDebugInfo().GetLocation().GetOffset() == impCurStmtDI.GetLocation().GetOffset()))
{
impCurStmtOffsSet(BAD_IL_OFFSET);
}
#ifdef DEBUG
if (impLastILoffsStmt == nullptr)
{
impLastILoffsStmt = stmt;
}
if (verbose)
{
printf("\n\n");
gtDispStmt(stmt);
}
#endif
}
//------------------------------------------------------------------------
// impAppendStmt: Add the statement to the current stmts list.
//
// Arguments:
// stmt - the statement to add.
//
inline void Compiler::impAppendStmt(Statement* stmt)
{
if (impStmtList == nullptr)
{
// The stmt is the first in the list.
impStmtList = stmt;
}
else
{
// Append the expression statement to the existing list.
impLastStmt->SetNextStmt(stmt);
stmt->SetPrevStmt(impLastStmt);
}
impLastStmt = stmt;
}
//------------------------------------------------------------------------
// impExtractLastStmt: Extract the last statement from the current stmts list.
//
// Return Value:
// The extracted statement.
//
// Notes:
// It assumes that the stmt will be reinserted later.
//
Statement* Compiler::impExtractLastStmt()
{
assert(impLastStmt != nullptr);
Statement* stmt = impLastStmt;
impLastStmt = impLastStmt->GetPrevStmt();
if (impLastStmt == nullptr)
{
impStmtList = nullptr;
}
return stmt;
}
//-------------------------------------------------------------------------
// impInsertStmtBefore: Insert the given "stmt" before "stmtBefore".
//
// Arguments:
// stmt - a statement to insert;
// stmtBefore - an insertion point to insert "stmt" before.
//
inline void Compiler::impInsertStmtBefore(Statement* stmt, Statement* stmtBefore)
{
assert(stmt != nullptr);
assert(stmtBefore != nullptr);
if (stmtBefore == impStmtList)
{
impStmtList = stmt;
}
else
{
Statement* stmtPrev = stmtBefore->GetPrevStmt();
stmt->SetPrevStmt(stmtPrev);
stmtPrev->SetNextStmt(stmt);
}
stmt->SetNextStmt(stmtBefore);
stmtBefore->SetPrevStmt(stmt);
}
//------------------------------------------------------------------------
// impAppendTree: Append the given expression tree to the current block's tree list.
//
//
// Arguments:
// tree - The tree that will be the root of the newly created statement.
// chkLevel - [0..chkLevel) is the portion of the stack which we will check
// for interference with stmt and spill if needed.
// di - Debug information to associate with the statement.
// checkConsumedDebugInfo - Whether to check for consumption of impCurStmtDI. impCurStmtDI
// marks the debug info of the current boundary and is set when we
// start importing IL at that boundary. If this parameter is true,
// then the function checks if 'stmt' has been associated with the
// current boundary, and if so, clears it so that we do not attach
// it to more upcoming statements.
//
// Return value:
// The newly created statement.
//
Statement* Compiler::impAppendTree(GenTree* tree, unsigned chkLevel, const DebugInfo& di, bool checkConsumedDebugInfo)
{
assert(tree);
/* Allocate an 'expression statement' node */
Statement* stmt = gtNewStmt(tree, di);
/* Append the statement to the current block's stmt list */
impAppendStmt(stmt, chkLevel, checkConsumedDebugInfo);
return stmt;
}
/*****************************************************************************
*
* Insert the given expression tree before "stmtBefore"
*/
void Compiler::impInsertTreeBefore(GenTree* tree, const DebugInfo& di, Statement* stmtBefore)
{
/* Allocate an 'expression statement' node */
Statement* stmt = gtNewStmt(tree, di);
/* Append the statement to the current block's stmt list */
impInsertStmtBefore(stmt, stmtBefore);
}
/*****************************************************************************
*
* Append an assignment of the given value to a temp to the current tree list.
* curLevel is the stack level for which the spill to the temp is being done.
*/
void Compiler::impAssignTempGen(unsigned tmp,
GenTree* val,
unsigned curLevel,
Statement** pAfterStmt, /* = NULL */
const DebugInfo& di, /* = DebugInfo() */
BasicBlock* block /* = NULL */
)
{
GenTree* asg = gtNewTempAssign(tmp, val);
if (!asg->IsNothingNode())
{
if (pAfterStmt)
{
Statement* asgStmt = gtNewStmt(asg, di);
fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
*pAfterStmt = asgStmt;
}
else
{
impAppendTree(asg, curLevel, impCurStmtDI);
}
}
}
/*****************************************************************************
* same as above, but handle the valueclass case too
*/
void Compiler::impAssignTempGen(unsigned tmpNum,
GenTree* val,
CORINFO_CLASS_HANDLE structType,
unsigned curLevel,
Statement** pAfterStmt, /* = NULL */
const DebugInfo& di, /* = DebugInfo() */
BasicBlock* block /* = NULL */
)
{
GenTree* asg;
assert(val->TypeGet() != TYP_STRUCT || structType != NO_CLASS_HANDLE);
if (varTypeIsStruct(val) && (structType != NO_CLASS_HANDLE))
{
assert(tmpNum < lvaCount);
assert(structType != NO_CLASS_HANDLE);
// if the method is non-verifiable the assert is not true
// so at least ignore it in the case when verification is turned on
// since any block that tries to use the temp would have failed verification.
var_types varType = lvaTable[tmpNum].lvType;
assert(varType == TYP_UNDEF || varTypeIsStruct(varType));
lvaSetStruct(tmpNum, structType, false);
varType = lvaTable[tmpNum].lvType;
// Now, set the type of the struct value. Note that lvaSetStruct may modify the type
// of the lclVar to a specialized type (e.g. TYP_SIMD), based on the handle (structType)
// that has been passed in for the value being assigned to the temp, in which case we
// need to set 'val' to that same type.
// Note also that if we always normalized the types of any node that might be a struct
// type, this would not be necessary - but that requires additional JIT/EE interface
// calls that may not actually be required - e.g. if we only access a field of a struct.
GenTree* dst = gtNewLclvNode(tmpNum, varType);
asg = impAssignStruct(dst, val, structType, curLevel, pAfterStmt, di, block);
}
else
{
asg = gtNewTempAssign(tmpNum, val);
}
if (!asg->IsNothingNode())
{
if (pAfterStmt)
{
Statement* asgStmt = gtNewStmt(asg, di);
fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
*pAfterStmt = asgStmt;
}
else
{
impAppendTree(asg, curLevel, impCurStmtDI);
}
}
}
/*****************************************************************************
*
* Pop the given number of values from the stack and return a list node with
* their values.
* The 'prefixTree' argument may optionally contain an argument
* list that is prepended to the list returned from this function.
*
* The notion of prepended is a bit misleading in that the list is backwards
* from the way I would expect: The first element popped is at the end of
* the returned list, and prefixTree is 'before' that, meaning closer to
* the end of the list. To get to prefixTree, you have to walk to the
* end of the list.
*
* For ARG_ORDER_R2L prefixTree is only used to insert extra arguments, as
* such we reverse its meaning such that returnValue has a reversed
* prefixTree at the head of the list.
*/
GenTreeCall::Use* Compiler::impPopCallArgs(unsigned count, CORINFO_SIG_INFO* sig, GenTreeCall::Use* prefixArgs)
{
assert(sig == nullptr || count == sig->numArgs);
CORINFO_CLASS_HANDLE structType;
GenTreeCall::Use* argList;
if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
{
argList = nullptr;
}
else
{ // ARG_ORDER_L2R
argList = prefixArgs;
}
while (count--)
{
StackEntry se = impPopStack();
typeInfo ti = se.seTypeInfo;
GenTree* temp = se.val;
if (varTypeIsStruct(temp))
{
// Morph trees that aren't already OBJs or MKREFANY to be OBJs
assert(ti.IsType(TI_STRUCT));
structType = ti.GetClassHandleForValueClass();
bool forceNormalization = false;
if (varTypeIsSIMD(temp))
{
// We need to ensure that fgMorphArgs will use the correct struct handle to ensure proper
// ABI handling of this argument.
// Note that this can happen, for example, if we have a SIMD intrinsic that returns a SIMD type
// with a different baseType than we've seen.
// We also need to ensure an OBJ node if we have a FIELD node that might be transformed to LCL_FLD
// or a plain GT_IND.
// TODO-Cleanup: Consider whether we can eliminate all of these cases.
if ((gtGetStructHandleIfPresent(temp) != structType) || temp->OperIs(GT_FIELD))
{
forceNormalization = true;
}
}
#ifdef DEBUG
if (verbose)
{
printf("Calling impNormStructVal on:\n");
gtDispTree(temp);
}
#endif
temp = impNormStructVal(temp, structType, (unsigned)CHECK_SPILL_ALL, forceNormalization);
#ifdef DEBUG
if (verbose)
{
printf("resulting tree:\n");
gtDispTree(temp);
}
#endif
}
/* NOTE: we defer bashing the type for I_IMPL to fgMorphArgs */
argList = gtPrependNewCallArg(temp, argList);
}
if (sig != nullptr)
{
if (sig->retTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR && sig->retType != CORINFO_TYPE_VAR)
{
// Make sure that all valuetypes (including enums) that we push are loaded.
// This is to guarantee that if a GC is triggerred from the prestub of this methods,
// all valuetypes in the method signature are already loaded.
// We need to be able to find the size of the valuetypes, but we cannot
// do a class-load from within GC.
info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(sig->retTypeSigClass);
}
CORINFO_ARG_LIST_HANDLE sigArgs = sig->args;
GenTreeCall::Use* arg;
for (arg = argList, count = sig->numArgs; count > 0; arg = arg->GetNext(), count--)
{
PREFIX_ASSUME(arg != nullptr);
CORINFO_CLASS_HANDLE classHnd;
CorInfoType corType = strip(info.compCompHnd->getArgType(sig, sigArgs, &classHnd));
var_types jitSigType = JITtype2varType(corType);
if (!impCheckImplicitArgumentCoercion(jitSigType, arg->GetNode()->TypeGet()))
{
BADCODE("the call argument has a type that can't be implicitly converted to the signature type");
}
// insert implied casts (from float to double or double to float)
if ((jitSigType == TYP_DOUBLE) && (arg->GetNode()->TypeGet() == TYP_FLOAT))
{
arg->SetNode(gtNewCastNode(TYP_DOUBLE, arg->GetNode(), false, TYP_DOUBLE));
}
else if ((jitSigType == TYP_FLOAT) && (arg->GetNode()->TypeGet() == TYP_DOUBLE))
{
arg->SetNode(gtNewCastNode(TYP_FLOAT, arg->GetNode(), false, TYP_FLOAT));
}
// insert any widening or narrowing casts for backwards compatibility
arg->SetNode(impImplicitIorI4Cast(arg->GetNode(), jitSigType));
if (corType != CORINFO_TYPE_CLASS && corType != CORINFO_TYPE_BYREF && corType != CORINFO_TYPE_PTR &&
corType != CORINFO_TYPE_VAR)
{
CORINFO_CLASS_HANDLE argRealClass = info.compCompHnd->getArgClass(sig, sigArgs);
if (argRealClass != nullptr)
{
// Make sure that all valuetypes (including enums) that we push are loaded.
// This is to guarantee that if a GC is triggered from the prestub of this methods,
// all valuetypes in the method signature are already loaded.
// We need to be able to find the size of the valuetypes, but we cannot
// do a class-load from within GC.
info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(argRealClass);
}
}
const var_types nodeArgType = arg->GetNode()->TypeGet();
if (!varTypeIsStruct(jitSigType) && genTypeSize(nodeArgType) != genTypeSize(jitSigType))
{
assert(!varTypeIsStruct(nodeArgType));
// Some ABI require precise size information for call arguments less than target pointer size,
// for example arm64 OSX. Create a special node to keep this information until morph
// consumes it into `fgArgInfo`.
GenTree* putArgType = gtNewOperNode(GT_PUTARG_TYPE, jitSigType, arg->GetNode());
arg->SetNode(putArgType);
}
sigArgs = info.compCompHnd->getArgNext(sigArgs);
}
}
if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
{
// Prepend the prefixTree
// Simple in-place reversal to place treeList
// at the end of a reversed prefixTree
while (prefixArgs != nullptr)
{
GenTreeCall::Use* next = prefixArgs->GetNext();
prefixArgs->SetNext(argList);
argList = prefixArgs;
prefixArgs = next;
}
}
return argList;
}
static bool TypeIs(var_types type1, var_types type2)
{
return type1 == type2;
}
// Check if type1 matches any type from the list.
template <typename... T>
static bool TypeIs(var_types type1, var_types type2, T... rest)
{
return TypeIs(type1, type2) || TypeIs(type1, rest...);
}
//------------------------------------------------------------------------
// impCheckImplicitArgumentCoercion: check that the node's type is compatible with
// the signature's type using ECMA implicit argument coercion table.
//
// Arguments:
// sigType - the type in the call signature;
// nodeType - the node type.
//
// Return Value:
// true if they are compatible, false otherwise.
//
// Notes:
// - it is currently allowing byref->long passing, should be fixed in VM;
// - it can't check long -> native int case on 64-bit platforms,
// so the behavior is different depending on the target bitness.
//
bool Compiler::impCheckImplicitArgumentCoercion(var_types sigType, var_types nodeType) const
{
if (sigType == nodeType)
{
return true;
}
if (TypeIs(sigType, TYP_BOOL, TYP_UBYTE, TYP_BYTE, TYP_USHORT, TYP_SHORT, TYP_UINT, TYP_INT))
{
if (TypeIs(nodeType, TYP_BOOL, TYP_UBYTE, TYP_BYTE, TYP_USHORT, TYP_SHORT, TYP_UINT, TYP_INT, TYP_I_IMPL))
{
return true;
}
}
else if (TypeIs(sigType, TYP_ULONG, TYP_LONG))
{
if (TypeIs(nodeType, TYP_LONG))
{
return true;
}
}
else if (TypeIs(sigType, TYP_FLOAT, TYP_DOUBLE))
{
if (TypeIs(nodeType, TYP_FLOAT, TYP_DOUBLE))
{
return true;
}
}
else if (TypeIs(sigType, TYP_BYREF))
{
if (TypeIs(nodeType, TYP_I_IMPL))
{
return true;
}
// This condition tolerates such IL:
// ; V00 this ref this class-hnd
// ldarg.0
// call(byref)
if (TypeIs(nodeType, TYP_REF))
{
return true;
}
}
else if (varTypeIsStruct(sigType))
{
if (varTypeIsStruct(nodeType))
{
return true;
}
}
// This condition should not be under `else` because `TYP_I_IMPL`
// intersects with `TYP_LONG` or `TYP_INT`.
if (TypeIs(sigType, TYP_I_IMPL, TYP_U_IMPL))
{
// Note that it allows `ldc.i8 1; call(nint)` on 64-bit platforms,
// but we can't distinguish `nint` from `long` there.
if (TypeIs(nodeType, TYP_I_IMPL, TYP_U_IMPL, TYP_INT, TYP_UINT))
{
return true;
}
// It tolerates IL that ECMA does not allow but that is commonly used.
// Example:
// V02 loc1 struct <RTL_OSVERSIONINFOEX, 32>
// ldloca.s 0x2
// call(native int)
if (TypeIs(nodeType, TYP_BYREF))
{
return true;
}
}
return false;
}
/*****************************************************************************
*
* Pop the given number of values from the stack in reverse order (STDCALL/CDECL etc.)
* The first "skipReverseCount" items are not reversed.
*/
GenTreeCall::Use* Compiler::impPopReverseCallArgs(unsigned count, CORINFO_SIG_INFO* sig, unsigned skipReverseCount)
{
assert(skipReverseCount <= count);
GenTreeCall::Use* list = impPopCallArgs(count, sig);
// reverse the list
if (list == nullptr || skipReverseCount == count)
{
return list;
}
GenTreeCall::Use* ptr = nullptr; // Initialized to the first node that needs to be reversed
GenTreeCall::Use* lastSkipNode = nullptr; // Will be set to the last node that does not need to be reversed
if (skipReverseCount == 0)
{
ptr = list;
}
else
{
lastSkipNode = list;
// Get to the first node that needs to be reversed
for (unsigned i = 0; i < skipReverseCount - 1; i++)
{
lastSkipNode = lastSkipNode->GetNext();
}
PREFIX_ASSUME(lastSkipNode != nullptr);
ptr = lastSkipNode->GetNext();
}
GenTreeCall::Use* reversedList = nullptr;
do
{
GenTreeCall::Use* tmp = ptr->GetNext();
ptr->SetNext(reversedList);
reversedList = ptr;
ptr = tmp;
} while (ptr != nullptr);
if (skipReverseCount)
{
lastSkipNode->SetNext(reversedList);
return list;
}
else
{
return reversedList;
}
}
//------------------------------------------------------------------------
// impAssignStruct: Create a struct assignment
//
// Arguments:
// dest - the destination of the assignment
// src - the value to be assigned
// structHnd - handle representing the struct type
// curLevel - stack level for which a spill may be being done
// pAfterStmt - statement to insert any additional statements after
// ilOffset - il offset for new statements
// block - block to insert any additional statements in
//
// Return Value:
// The tree that should be appended to the statement list that represents the assignment.
//
// Notes:
// Temp assignments may be appended to impStmtList if spilling is necessary.
GenTree* Compiler::impAssignStruct(GenTree* dest,
GenTree* src,
CORINFO_CLASS_HANDLE structHnd,
unsigned curLevel,
Statement** pAfterStmt, /* = nullptr */
const DebugInfo& di, /* = DebugInfo() */
BasicBlock* block /* = nullptr */
)
{
assert(varTypeIsStruct(dest));
DebugInfo usedDI = di;
if (!usedDI.IsValid())
{
usedDI = impCurStmtDI;
}
while (dest->gtOper == GT_COMMA)
{
// Second thing is the struct.
assert(varTypeIsStruct(dest->AsOp()->gtOp2));
// Append all the op1 of GT_COMMA trees before we evaluate op2 of the GT_COMMA tree.
if (pAfterStmt)
{
Statement* newStmt = gtNewStmt(dest->AsOp()->gtOp1, usedDI);
fgInsertStmtAfter(block, *pAfterStmt, newStmt);
*pAfterStmt = newStmt;
}
else
{
impAppendTree(dest->AsOp()->gtOp1, curLevel, usedDI); // do the side effect
}
// set dest to the second thing
dest = dest->AsOp()->gtOp2;
}
assert(dest->gtOper == GT_LCL_VAR || dest->gtOper == GT_RETURN || dest->gtOper == GT_FIELD ||
dest->gtOper == GT_IND || dest->gtOper == GT_OBJ || dest->gtOper == GT_INDEX);
// Return a NOP if this is a self-assignment.
if (dest->OperGet() == GT_LCL_VAR && src->OperGet() == GT_LCL_VAR &&
src->AsLclVarCommon()->GetLclNum() == dest->AsLclVarCommon()->GetLclNum())
{
return gtNewNothingNode();
}
// TODO-1stClassStructs: Avoid creating an address if it is not needed,
// or re-creating a Blk node if it is.
GenTree* destAddr;
if (dest->gtOper == GT_IND || dest->OperIsBlk())
{
destAddr = dest->AsOp()->gtOp1;
}
else
{
destAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest);
}
return (impAssignStructPtr(destAddr, src, structHnd, curLevel, pAfterStmt, usedDI, block));
}
//------------------------------------------------------------------------
// impAssignStructPtr: Assign (copy) the structure from 'src' to 'destAddr'.
//
// Arguments:
// destAddr - address of the destination of the assignment
// src - source of the assignment
// structHnd - handle representing the struct type
// curLevel - stack level for which a spill may be being done
// pAfterStmt - statement to insert any additional statements after
// di - debug info for new statements
// block - block to insert any additional statements in
//
// Return Value:
// The tree that should be appended to the statement list that represents the assignment.
//
// Notes:
// Temp assignments may be appended to impStmtList if spilling is necessary.
GenTree* Compiler::impAssignStructPtr(GenTree* destAddr,
GenTree* src,
CORINFO_CLASS_HANDLE structHnd,
unsigned curLevel,
Statement** pAfterStmt, /* = NULL */
const DebugInfo& di, /* = DebugInfo() */
BasicBlock* block /* = NULL */
)
{
GenTree* dest = nullptr;
GenTreeFlags destFlags = GTF_EMPTY;
DebugInfo usedDI = di;
if (!usedDI.IsValid())
{
usedDI = impCurStmtDI;
}
#ifdef DEBUG
#ifdef FEATURE_HW_INTRINSICS
if (src->OperIs(GT_HWINTRINSIC))
{
const GenTreeHWIntrinsic* intrinsic = src->AsHWIntrinsic();
if (HWIntrinsicInfo::IsMultiReg(intrinsic->GetHWIntrinsicId()))
{
assert(src->TypeGet() == TYP_STRUCT);
}
else
{
assert(varTypeIsSIMD(src));
}
}
else
#endif // FEATURE_HW_INTRINSICS
{
assert(src->OperIs(GT_LCL_VAR, GT_LCL_FLD, GT_FIELD, GT_IND, GT_OBJ, GT_CALL, GT_MKREFANY, GT_RET_EXPR,
GT_COMMA) ||
((src->TypeGet() != TYP_STRUCT) && src->OperIsSIMD()));
}
#endif // DEBUG
var_types asgType = src->TypeGet();
if (src->gtOper == GT_CALL)
{
GenTreeCall* srcCall = src->AsCall();
if (srcCall->TreatAsHasRetBufArg(this))
{
// Case of call returning a struct via hidden retbuf arg
CLANG_FORMAT_COMMENT_ANCHOR;
#if !defined(TARGET_ARM)
// Unmanaged instance methods on Windows or Unix X86 need the retbuf arg after the first (this) parameter
if ((TargetOS::IsWindows || compUnixX86Abi()) && srcCall->IsUnmanaged())
{
if (callConvIsInstanceMethodCallConv(srcCall->GetUnmanagedCallConv()))
{
#ifdef TARGET_X86
// The argument list has already been reversed.
// Insert the return buffer as the second-to-last node
// so it will be pushed on to the stack after the user args but before the native this arg
// as required by the native ABI.
GenTreeCall::Use* lastArg = srcCall->gtCallArgs;
if (lastArg == nullptr)
{
srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, srcCall->gtCallArgs);
}
else if (srcCall->GetUnmanagedCallConv() == CorInfoCallConvExtension::Thiscall)
{
// For thiscall, the "this" parameter is not included in the argument list reversal,
// so we need to put the return buffer as the last parameter.
for (; lastArg->GetNext() != nullptr; lastArg = lastArg->GetNext())
;
gtInsertNewCallArgAfter(destAddr, lastArg);
}
else if (lastArg->GetNext() == nullptr)
{
srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, lastArg);
}
else
{
assert(lastArg != nullptr && lastArg->GetNext() != nullptr);
GenTreeCall::Use* secondLastArg = lastArg;
lastArg = lastArg->GetNext();
for (; lastArg->GetNext() != nullptr; secondLastArg = lastArg, lastArg = lastArg->GetNext())
;
assert(secondLastArg->GetNext() != nullptr);
gtInsertNewCallArgAfter(destAddr, secondLastArg);
}
#else
GenTreeCall::Use* thisArg = gtInsertNewCallArgAfter(destAddr, srcCall->gtCallArgs);
#endif
}
else
{
#ifdef TARGET_X86
// The argument list has already been reversed.
// Insert the return buffer as the last node so it will be pushed on to the stack last
// as required by the native ABI.
GenTreeCall::Use* lastArg = srcCall->gtCallArgs;
if (lastArg == nullptr)
{
srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, srcCall->gtCallArgs);
}
else
{
for (; lastArg->GetNext() != nullptr; lastArg = lastArg->GetNext())
;
gtInsertNewCallArgAfter(destAddr, lastArg);
}
#else
// insert the return value buffer into the argument list as first byref parameter
srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, srcCall->gtCallArgs);
#endif
}
}
else
#endif // !defined(TARGET_ARM)
{
// insert the return value buffer into the argument list as first byref parameter
srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, srcCall->gtCallArgs);
}
// now returns void, not a struct
src->gtType = TYP_VOID;
// return the morphed call node
return src;
}
else
{
// Case of call returning a struct in one or more registers.
var_types returnType = (var_types)srcCall->gtReturnType;
// First we try to change this to "LclVar/LclFld = call"
//
if ((destAddr->gtOper == GT_ADDR) && (destAddr->AsOp()->gtOp1->gtOper == GT_LCL_VAR))
{
// If it is a multi-reg struct return, don't change the oper to GT_LCL_FLD.
// That is, the IR will be of the form lclVar = call for multi-reg return
//
GenTreeLclVar* lcl = destAddr->AsOp()->gtOp1->AsLclVar();
unsigned lclNum = lcl->GetLclNum();
LclVarDsc* varDsc = lvaGetDesc(lclNum);
if (src->AsCall()->HasMultiRegRetVal())
{
// Mark the struct LclVar as used in a MultiReg return context
// which currently makes it non promotable.
// TODO-1stClassStructs: Eliminate this pessimization when we can more generally
// handle multireg returns.
lcl->gtFlags |= GTF_DONT_CSE;
varDsc->lvIsMultiRegRet = true;
}
dest = lcl;
#if defined(TARGET_ARM)
// TODO-Cleanup: This should have been taken care of in the above HasMultiRegRetVal() case,
// but that method has not been updadted to include ARM.
impMarkLclDstNotPromotable(lclNum, src, structHnd);
lcl->gtFlags |= GTF_DONT_CSE;
#elif defined(UNIX_AMD64_ABI)
// Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
assert(!src->AsCall()->IsVarargs() && "varargs not allowed for System V OSs.");
// Make the struct non promotable. The eightbytes could contain multiple fields.
// TODO-1stClassStructs: Eliminate this pessimization when we can more generally
// handle multireg returns.
// TODO-Cleanup: Why is this needed here? This seems that it will set this even for
// non-multireg returns.
lcl->gtFlags |= GTF_DONT_CSE;
varDsc->lvIsMultiRegRet = true;
#endif
}
else // we don't have a GT_ADDR of a GT_LCL_VAR
{
// !!! The destination could be on stack. !!!
// This flag will let us choose the correct write barrier.
asgType = returnType;
destFlags = GTF_IND_TGTANYWHERE;
}
}
}
else if (src->gtOper == GT_RET_EXPR)
{
GenTreeCall* call = src->AsRetExpr()->gtInlineCandidate->AsCall();
noway_assert(call->gtOper == GT_CALL);
if (call->HasRetBufArg())
{
// insert the return value buffer into the argument list as first byref parameter
call->gtCallArgs = gtPrependNewCallArg(destAddr, call->gtCallArgs);
// now returns void, not a struct
src->gtType = TYP_VOID;
call->gtType = TYP_VOID;
// We already have appended the write to 'dest' GT_CALL's args
// So now we just return an empty node (pruning the GT_RET_EXPR)
return src;
}
else
{
// Case of inline method returning a struct in one or more registers.
// We won't need a return buffer
asgType = src->gtType;
if ((destAddr->gtOper != GT_ADDR) || (destAddr->AsOp()->gtOp1->gtOper != GT_LCL_VAR))
{
// !!! The destination could be on stack. !!!
// This flag will let us choose the correct write barrier.
destFlags = GTF_IND_TGTANYWHERE;
}
}
}
else if (src->OperIsBlk())
{
asgType = impNormStructType(structHnd);
if (src->gtOper == GT_OBJ)
{
assert(src->AsObj()->GetLayout()->GetClassHandle() == structHnd);
}
}
else if (src->gtOper == GT_INDEX)
{
asgType = impNormStructType(structHnd);
assert(src->AsIndex()->gtStructElemClass == structHnd);
}
else if (src->gtOper == GT_MKREFANY)
{
// Since we are assigning the result of a GT_MKREFANY,
// "destAddr" must point to a refany.
GenTree* destAddrClone;
destAddr =
impCloneExpr(destAddr, &destAddrClone, structHnd, curLevel, pAfterStmt DEBUGARG("MKREFANY assignment"));
assert(OFFSETOF__CORINFO_TypedReference__dataPtr == 0);
assert(destAddr->gtType == TYP_I_IMPL || destAddr->gtType == TYP_BYREF);
fgAddFieldSeqForZeroOffset(destAddr, GetFieldSeqStore()->CreateSingleton(GetRefanyDataField()));
GenTree* ptrSlot = gtNewOperNode(GT_IND, TYP_I_IMPL, destAddr);
GenTreeIntCon* typeFieldOffset = gtNewIconNode(OFFSETOF__CORINFO_TypedReference__type, TYP_I_IMPL);
typeFieldOffset->gtFieldSeq = GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField());
GenTree* typeSlot =
gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewOperNode(GT_ADD, destAddr->gtType, destAddrClone, typeFieldOffset));
// append the assign of the pointer value
GenTree* asg = gtNewAssignNode(ptrSlot, src->AsOp()->gtOp1);
if (pAfterStmt)
{
Statement* newStmt = gtNewStmt(asg, usedDI);
fgInsertStmtAfter(block, *pAfterStmt, newStmt);
*pAfterStmt = newStmt;
}
else
{
impAppendTree(asg, curLevel, usedDI);
}
// return the assign of the type value, to be appended
return gtNewAssignNode(typeSlot, src->AsOp()->gtOp2);
}
else if (src->gtOper == GT_COMMA)
{
// The second thing is the struct or its address.
assert(varTypeIsStruct(src->AsOp()->gtOp2) || src->AsOp()->gtOp2->gtType == TYP_BYREF);
if (pAfterStmt)
{
// Insert op1 after '*pAfterStmt'
Statement* newStmt = gtNewStmt(src->AsOp()->gtOp1, usedDI);
fgInsertStmtAfter(block, *pAfterStmt, newStmt);
*pAfterStmt = newStmt;
}
else if (impLastStmt != nullptr)
{
// Do the side-effect as a separate statement.
impAppendTree(src->AsOp()->gtOp1, curLevel, usedDI);
}
else
{
// In this case we have neither been given a statement to insert after, nor are we
// in the importer where we can append the side effect.
// Instead, we're going to sink the assignment below the COMMA.
src->AsOp()->gtOp2 =
impAssignStructPtr(destAddr, src->AsOp()->gtOp2, structHnd, curLevel, pAfterStmt, usedDI, block);
return src;
}
// Evaluate the second thing using recursion.
return impAssignStructPtr(destAddr, src->AsOp()->gtOp2, structHnd, curLevel, pAfterStmt, usedDI, block);
}
else if (src->IsLocal())
{
asgType = src->TypeGet();
}
else if (asgType == TYP_STRUCT)
{
// It should already have the appropriate type.
assert(asgType == impNormStructType(structHnd));
}
if ((dest == nullptr) && (destAddr->OperGet() == GT_ADDR))
{
GenTree* destNode = destAddr->gtGetOp1();
// If the actual destination is a local, a GT_INDEX or a block node, or is a node that
// will be morphed, don't insert an OBJ(ADDR) if it already has the right type.
if (destNode->OperIs(GT_LCL_VAR, GT_INDEX) || destNode->OperIsBlk())
{
var_types destType = destNode->TypeGet();
// If one or both types are TYP_STRUCT (one may not yet be normalized), they are compatible
// iff their handles are the same.
// Otherwise, they are compatible if their types are the same.
bool typesAreCompatible =
((destType == TYP_STRUCT) || (asgType == TYP_STRUCT))
? ((gtGetStructHandleIfPresent(destNode) == structHnd) && varTypeIsStruct(asgType))
: (destType == asgType);
if (typesAreCompatible)
{
dest = destNode;
if (destType != TYP_STRUCT)
{
// Use a normalized type if available. We know from above that they're equivalent.
asgType = destType;
}
}
}
}
if (dest == nullptr)
{
if (asgType == TYP_STRUCT)
{
dest = gtNewObjNode(structHnd, destAddr);
gtSetObjGcInfo(dest->AsObj());
// Although an obj as a call argument was always assumed to be a globRef
// (which is itself overly conservative), that is not true of the operands
// of a block assignment.
dest->gtFlags &= ~GTF_GLOB_REF;
dest->gtFlags |= (destAddr->gtFlags & GTF_GLOB_REF);
}
else
{
dest = gtNewOperNode(GT_IND, asgType, destAddr);
}
}
if (dest->OperIs(GT_LCL_VAR) &&
(src->IsMultiRegNode() ||
(src->OperIs(GT_RET_EXPR) && src->AsRetExpr()->gtInlineCandidate->AsCall()->HasMultiRegRetVal())))
{
if (lvaEnregMultiRegVars && varTypeIsStruct(dest))
{
dest->AsLclVar()->SetMultiReg();
}
if (src->OperIs(GT_CALL))
{
lvaGetDesc(dest->AsLclVar())->lvIsMultiRegRet = true;
}
}
dest->gtFlags |= destFlags;
destFlags = dest->gtFlags;
// return an assignment node, to be appended
GenTree* asgNode = gtNewAssignNode(dest, src);
gtBlockOpInit(asgNode, dest, src, false);
// TODO-1stClassStructs: Clean up the settings of GTF_DONT_CSE on the lhs
// of assignments.
if ((destFlags & GTF_DONT_CSE) == 0)
{
dest->gtFlags &= ~(GTF_DONT_CSE);
}
return asgNode;
}
/*****************************************************************************
Given a struct value, and the class handle for that structure, return
the expression for the address for that structure value.
willDeref - does the caller guarantee to dereference the pointer.
*/
GenTree* Compiler::impGetStructAddr(GenTree* structVal,
CORINFO_CLASS_HANDLE structHnd,
unsigned curLevel,
bool willDeref)
{
assert(varTypeIsStruct(structVal) || eeIsValueClass(structHnd));
var_types type = structVal->TypeGet();
genTreeOps oper = structVal->gtOper;
if (oper == GT_OBJ && willDeref)
{
assert(structVal->AsObj()->GetLayout()->GetClassHandle() == structHnd);
return (structVal->AsObj()->Addr());
}
else if (oper == GT_CALL || oper == GT_RET_EXPR || oper == GT_OBJ || oper == GT_MKREFANY ||
structVal->OperIsSimdOrHWintrinsic())
{
unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
// The 'return value' is now the temp itself
type = genActualType(lvaTable[tmpNum].TypeGet());
GenTree* temp = gtNewLclvNode(tmpNum, type);
temp = gtNewOperNode(GT_ADDR, TYP_BYREF, temp);
return temp;
}
else if (oper == GT_COMMA)
{
assert(structVal->AsOp()->gtOp2->gtType == type); // Second thing is the struct
Statement* oldLastStmt = impLastStmt;
structVal->AsOp()->gtOp2 = impGetStructAddr(structVal->AsOp()->gtOp2, structHnd, curLevel, willDeref);
structVal->gtType = TYP_BYREF;
if (oldLastStmt != impLastStmt)
{
// Some temp assignment statement was placed on the statement list
// for Op2, but that would be out of order with op1, so we need to
// spill op1 onto the statement list after whatever was last
// before we recursed on Op2 (i.e. before whatever Op2 appended).
Statement* beforeStmt;
if (oldLastStmt == nullptr)
{
// The op1 stmt should be the first in the list.
beforeStmt = impStmtList;
}
else
{
// Insert after the oldLastStmt before the first inserted for op2.
beforeStmt = oldLastStmt->GetNextStmt();
}
impInsertTreeBefore(structVal->AsOp()->gtOp1, impCurStmtDI, beforeStmt);
structVal->AsOp()->gtOp1 = gtNewNothingNode();
}
return (structVal);
}
return (gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
}
//------------------------------------------------------------------------
// impNormStructType: Normalize the type of a (known to be) struct class handle.
//
// Arguments:
// structHnd - The class handle for the struct type of interest.
// pSimdBaseJitType - (optional, default nullptr) - if non-null, and the struct is a SIMD
// type, set to the SIMD base JIT type
//
// Return Value:
// The JIT type for the struct (e.g. TYP_STRUCT, or TYP_SIMD*).
// It may also modify the compFloatingPointUsed flag if the type is a SIMD type.
//
// Notes:
// Normalizing the type involves examining the struct type to determine if it should
// be modified to one that is handled specially by the JIT, possibly being a candidate
// for full enregistration, e.g. TYP_SIMD16. If the size of the struct is already known
// call structSizeMightRepresentSIMDType to determine if this api needs to be called.
var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd, CorInfoType* pSimdBaseJitType)
{
assert(structHnd != NO_CLASS_HANDLE);
var_types structType = TYP_STRUCT;
#ifdef FEATURE_SIMD
const DWORD structFlags = info.compCompHnd->getClassAttribs(structHnd);
// Don't bother if the struct contains GC references of byrefs, it can't be a SIMD type.
if ((structFlags & (CORINFO_FLG_CONTAINS_GC_PTR | CORINFO_FLG_BYREF_LIKE)) == 0)
{
unsigned originalSize = info.compCompHnd->getClassSize(structHnd);
if (structSizeMightRepresentSIMDType(originalSize))
{
unsigned int sizeBytes;
CorInfoType simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(structHnd, &sizeBytes);
if (simdBaseJitType != CORINFO_TYPE_UNDEF)
{
assert(sizeBytes == originalSize);
structType = getSIMDTypeForSize(sizeBytes);
if (pSimdBaseJitType != nullptr)
{
*pSimdBaseJitType = simdBaseJitType;
}
// Also indicate that we use floating point registers.
compFloatingPointUsed = true;
}
}
}
#endif // FEATURE_SIMD
return structType;
}
//------------------------------------------------------------------------
// Compiler::impNormStructVal: Normalize a struct value
//
// Arguments:
// structVal - the node we are going to normalize
// structHnd - the class handle for the node
// curLevel - the current stack level
// forceNormalization - Force the creation of an OBJ node (default is false).
//
// Notes:
// Given struct value 'structVal', make sure it is 'canonical', that is
// it is either:
// - a known struct type (non-TYP_STRUCT, e.g. TYP_SIMD8)
// - an OBJ or a MKREFANY node, or
// - a node (e.g. GT_INDEX) that will be morphed.
// If the node is a CALL or RET_EXPR, a copy will be made to a new temp.
//
GenTree* Compiler::impNormStructVal(GenTree* structVal,
CORINFO_CLASS_HANDLE structHnd,
unsigned curLevel,
bool forceNormalization /*=false*/)
{
assert(forceNormalization || varTypeIsStruct(structVal));
assert(structHnd != NO_CLASS_HANDLE);
var_types structType = structVal->TypeGet();
bool makeTemp = false;
if (structType == TYP_STRUCT)
{
structType = impNormStructType(structHnd);
}
bool alreadyNormalized = false;
GenTreeLclVarCommon* structLcl = nullptr;
genTreeOps oper = structVal->OperGet();
switch (oper)
{
// GT_RETURN and GT_MKREFANY don't capture the handle.
case GT_RETURN:
break;
case GT_MKREFANY:
alreadyNormalized = true;
break;
case GT_CALL:
structVal->AsCall()->gtRetClsHnd = structHnd;
makeTemp = true;
break;
case GT_RET_EXPR:
structVal->AsRetExpr()->gtRetClsHnd = structHnd;
makeTemp = true;
break;
case GT_ARGPLACE:
structVal->AsArgPlace()->gtArgPlaceClsHnd = structHnd;
break;
case GT_INDEX:
// This will be transformed to an OBJ later.
alreadyNormalized = true;
structVal->AsIndex()->gtStructElemClass = structHnd;
structVal->AsIndex()->gtIndElemSize = info.compCompHnd->getClassSize(structHnd);
break;
case GT_FIELD:
// Wrap it in a GT_OBJ, if needed.
structVal->gtType = structType;
if ((structType == TYP_STRUCT) || forceNormalization)
{
structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
}
break;
case GT_LCL_VAR:
case GT_LCL_FLD:
structLcl = structVal->AsLclVarCommon();
// Wrap it in a GT_OBJ.
structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
FALLTHROUGH;
case GT_OBJ:
case GT_BLK:
case GT_ASG:
// These should already have the appropriate type.
assert(structVal->gtType == structType);
alreadyNormalized = true;
break;
case GT_IND:
assert(structVal->gtType == structType);
structVal = gtNewObjNode(structHnd, structVal->gtGetOp1());
alreadyNormalized = true;
break;
#ifdef FEATURE_SIMD
case GT_SIMD:
assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType));
break;
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
assert(structVal->gtType == structType);
assert(varTypeIsSIMD(structVal) ||
HWIntrinsicInfo::IsMultiReg(structVal->AsHWIntrinsic()->GetHWIntrinsicId()));
break;
#endif
case GT_COMMA:
{
// The second thing could either be a block node or a GT_FIELD or a GT_SIMD or a GT_COMMA node.
GenTree* blockNode = structVal->AsOp()->gtOp2;
assert(blockNode->gtType == structType);
// Is this GT_COMMA(op1, GT_COMMA())?
GenTree* parent = structVal;
if (blockNode->OperGet() == GT_COMMA)
{
// Find the last node in the comma chain.
do
{
assert(blockNode->gtType == structType);
parent = blockNode;
blockNode = blockNode->AsOp()->gtOp2;
} while (blockNode->OperGet() == GT_COMMA);
}
if (blockNode->OperGet() == GT_FIELD)
{
// If we have a GT_FIELD then wrap it in a GT_OBJ.
blockNode = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, blockNode));
}
#ifdef FEATURE_SIMD
if (blockNode->OperIsSimdOrHWintrinsic())
{
parent->AsOp()->gtOp2 = impNormStructVal(blockNode, structHnd, curLevel, forceNormalization);
alreadyNormalized = true;
}
else
#endif
{
noway_assert(blockNode->OperIsBlk());
// Sink the GT_COMMA below the blockNode addr.
// That is GT_COMMA(op1, op2=blockNode) is tranformed into
// blockNode(GT_COMMA(TYP_BYREF, op1, op2's op1)).
//
// In case of a chained GT_COMMA case, we sink the last
// GT_COMMA below the blockNode addr.
GenTree* blockNodeAddr = blockNode->AsOp()->gtOp1;
assert(blockNodeAddr->gtType == TYP_BYREF);
GenTree* commaNode = parent;
commaNode->gtType = TYP_BYREF;
commaNode->AsOp()->gtOp2 = blockNodeAddr;
blockNode->AsOp()->gtOp1 = commaNode;
if (parent == structVal)
{
structVal = blockNode;
}
alreadyNormalized = true;
}
}
break;
default:
noway_assert(!"Unexpected node in impNormStructVal()");
break;
}
structVal->gtType = structType;
if (!alreadyNormalized || forceNormalization)
{
if (makeTemp)
{
unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
// The structVal is now the temp itself
structLcl = gtNewLclvNode(tmpNum, structType)->AsLclVarCommon();
structVal = structLcl;
}
if ((forceNormalization || (structType == TYP_STRUCT)) && !structVal->OperIsBlk())
{
// Wrap it in a GT_OBJ
structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
}
}
if (structLcl != nullptr)
{
// A OBJ on a ADDR(LCL_VAR) can never raise an exception
// so we don't set GTF_EXCEPT here.
if (!lvaIsImplicitByRefLocal(structLcl->GetLclNum()))
{
structVal->gtFlags &= ~GTF_GLOB_REF;
}
}
else if (structVal->OperIsBlk())
{
// In general a OBJ is an indirection and could raise an exception.
structVal->gtFlags |= GTF_EXCEPT;
}
return structVal;
}
/******************************************************************************/
// Given a type token, generate code that will evaluate to the correct
// handle representation of that token (type handle, field handle, or method handle)
//
// For most cases, the handle is determined at compile-time, and the code
// generated is simply an embedded handle.
//
// Run-time lookup is required if the enclosing method is shared between instantiations
// and the token refers to formal type parameters whose instantiation is not known
// at compile-time.
//
GenTree* Compiler::impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken,
bool* pRuntimeLookup /* = NULL */,
bool mustRestoreHandle /* = false */,
bool importParent /* = false */)
{
assert(!fgGlobalMorph);
CORINFO_GENERICHANDLE_RESULT embedInfo;
info.compCompHnd->embedGenericHandle(pResolvedToken, importParent, &embedInfo);
if (pRuntimeLookup)
{
*pRuntimeLookup = embedInfo.lookup.lookupKind.needsRuntimeLookup;
}
if (mustRestoreHandle && !embedInfo.lookup.lookupKind.needsRuntimeLookup)
{
switch (embedInfo.handleType)
{
case CORINFO_HANDLETYPE_CLASS:
info.compCompHnd->classMustBeLoadedBeforeCodeIsRun((CORINFO_CLASS_HANDLE)embedInfo.compileTimeHandle);
break;
case CORINFO_HANDLETYPE_METHOD:
info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun((CORINFO_METHOD_HANDLE)embedInfo.compileTimeHandle);
break;
case CORINFO_HANDLETYPE_FIELD:
info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
info.compCompHnd->getFieldClass((CORINFO_FIELD_HANDLE)embedInfo.compileTimeHandle));
break;
default:
break;
}
}
// Generate the full lookup tree. May be null if we're abandoning an inline attempt.
GenTree* result = impLookupToTree(pResolvedToken, &embedInfo.lookup, gtTokenToIconFlags(pResolvedToken->token),
embedInfo.compileTimeHandle);
// If we have a result and it requires runtime lookup, wrap it in a runtime lookup node.
if ((result != nullptr) && embedInfo.lookup.lookupKind.needsRuntimeLookup)
{
result = gtNewRuntimeLookup(embedInfo.compileTimeHandle, embedInfo.handleType, result);
}
return result;
}
GenTree* Compiler::impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_LOOKUP* pLookup,
GenTreeFlags handleFlags,
void* compileTimeHandle)
{
if (!pLookup->lookupKind.needsRuntimeLookup)
{
// No runtime lookup is required.
// Access is direct or memory-indirect (of a fixed address) reference
CORINFO_GENERIC_HANDLE handle = nullptr;
void* pIndirection = nullptr;
assert(pLookup->constLookup.accessType != IAT_PPVALUE && pLookup->constLookup.accessType != IAT_RELPVALUE);
if (pLookup->constLookup.accessType == IAT_VALUE)
{
handle = pLookup->constLookup.handle;
}
else if (pLookup->constLookup.accessType == IAT_PVALUE)
{
pIndirection = pLookup->constLookup.addr;
}
GenTree* addr = gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle);
#ifdef DEBUG
size_t handleToTrack;
if (handleFlags == GTF_ICON_TOKEN_HDL)
{
handleToTrack = 0;
}
else
{
handleToTrack = (size_t)compileTimeHandle;
}
if (handle != nullptr)
{
addr->AsIntCon()->gtTargetHandle = handleToTrack;
}
else
{
addr->gtGetOp1()->AsIntCon()->gtTargetHandle = handleToTrack;
}
#endif
return addr;
}
if (pLookup->lookupKind.runtimeLookupKind == CORINFO_LOOKUP_NOT_SUPPORTED)
{
// Runtime does not support inlining of all shapes of runtime lookups
// Inlining has to be aborted in such a case
assert(compIsForInlining());
compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
return nullptr;
}
// Need to use dictionary-based access which depends on the typeContext
// which is only available at runtime, not at compile-time.
return impRuntimeLookupToTree(pResolvedToken, pLookup, compileTimeHandle);
}
#ifdef FEATURE_READYTORUN
GenTree* Compiler::impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup,
GenTreeFlags handleFlags,
void* compileTimeHandle)
{
CORINFO_GENERIC_HANDLE handle = nullptr;
void* pIndirection = nullptr;
assert(pLookup->accessType != IAT_PPVALUE && pLookup->accessType != IAT_RELPVALUE);
if (pLookup->accessType == IAT_VALUE)
{
handle = pLookup->handle;
}
else if (pLookup->accessType == IAT_PVALUE)
{
pIndirection = pLookup->addr;
}
GenTree* addr = gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle);
#ifdef DEBUG
assert((handleFlags == GTF_ICON_CLASS_HDL) || (handleFlags == GTF_ICON_METHOD_HDL));
if (handle != nullptr)
{
addr->AsIntCon()->gtTargetHandle = (size_t)compileTimeHandle;
}
else
{
addr->gtGetOp1()->AsIntCon()->gtTargetHandle = (size_t)compileTimeHandle;
}
#endif // DEBUG
return addr;
}
//------------------------------------------------------------------------
// impIsCastHelperEligibleForClassProbe: Checks whether a tree is a cast helper eligible to
// to be profiled and then optimized with PGO data
//
// Arguments:
// tree - the tree object to check
//
// Returns:
// true if the tree is a cast helper eligible to be profiled
//
bool Compiler::impIsCastHelperEligibleForClassProbe(GenTree* tree)
{
if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR) || (JitConfig.JitCastProfiling() != 1))
{
return false;
}
if (tree->IsCall() && tree->AsCall()->gtCallType == CT_HELPER)
{
const CorInfoHelpFunc helper = eeGetHelperNum(tree->AsCall()->gtCallMethHnd);
if ((helper == CORINFO_HELP_ISINSTANCEOFINTERFACE) || (helper == CORINFO_HELP_ISINSTANCEOFCLASS) ||
(helper == CORINFO_HELP_CHKCASTCLASS) || (helper == CORINFO_HELP_CHKCASTINTERFACE))
{
return true;
}
}
return false;
}
//------------------------------------------------------------------------
// impIsCastHelperMayHaveProfileData: Checks whether a tree is a cast helper that might
// have profile data
//
// Arguments:
// tree - the tree object to check
//
// Returns:
// true if the tree is a cast helper with potential profile data
//
bool Compiler::impIsCastHelperMayHaveProfileData(GenTree* tree)
{
if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBOPT) || (JitConfig.JitCastProfiling() != 1))
{
return false;
}
if (tree->IsCall() && tree->AsCall()->gtCallType == CT_HELPER)
{
const CorInfoHelpFunc helper = eeGetHelperNum(tree->AsCall()->gtCallMethHnd);
if ((helper == CORINFO_HELP_ISINSTANCEOFINTERFACE) || (helper == CORINFO_HELP_ISINSTANCEOFCLASS) ||
(helper == CORINFO_HELP_CHKCASTCLASS) || (helper == CORINFO_HELP_CHKCASTINTERFACE))
{
return true;
}
}
return false;
}
GenTreeCall* Compiler::impReadyToRunHelperToTree(
CORINFO_RESOLVED_TOKEN* pResolvedToken,
CorInfoHelpFunc helper,
var_types type,
GenTreeCall::Use* args /* = nullptr */,
CORINFO_LOOKUP_KIND* pGenericLookupKind /* =NULL. Only used with generics */)
{
CORINFO_CONST_LOOKUP lookup;
if (!info.compCompHnd->getReadyToRunHelper(pResolvedToken, pGenericLookupKind, helper, &lookup))
{
return nullptr;
}
GenTreeCall* op1 = gtNewHelperCallNode(helper, type, args);
op1->setEntryPoint(lookup);
return op1;
}
#endif
GenTree* Compiler::impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
{
GenTree* op1 = nullptr;
switch (pCallInfo->kind)
{
case CORINFO_CALL:
op1 = new (this, GT_FTN_ADDR) GenTreeFptrVal(TYP_I_IMPL, pCallInfo->hMethod);
#ifdef FEATURE_READYTORUN
if (opts.IsReadyToRun())
{
op1->AsFptrVal()->gtEntryPoint = pCallInfo->codePointerLookup.constLookup;
}
#endif
break;
case CORINFO_CALL_CODE_POINTER:
op1 = impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_FTN_ADDR, pCallInfo->hMethod);
break;
default:
noway_assert(!"unknown call kind");
break;
}
return op1;
}
//------------------------------------------------------------------------
// getRuntimeContextTree: find pointer to context for runtime lookup.
//
// Arguments:
// kind - lookup kind.
//
// Return Value:
// Return GenTree pointer to generic shared context.
//
// Notes:
// Reports about generic context using.
GenTree* Compiler::getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind)
{
GenTree* ctxTree = nullptr;
// Collectible types requires that for shared generic code, if we use the generic context parameter
// that we report it. (This is a conservative approach, we could detect some cases particularly when the
// context parameter is this that we don't need the eager reporting logic.)
lvaGenericsContextInUse = true;
Compiler* pRoot = impInlineRoot();
if (kind == CORINFO_LOOKUP_THISOBJ)
{
// this Object
ctxTree = gtNewLclvNode(pRoot->info.compThisArg, TYP_REF);
ctxTree->gtFlags |= GTF_VAR_CONTEXT;
// context is the method table pointer of the this object
ctxTree = gtNewMethodTableLookup(ctxTree);
}
else
{
assert(kind == CORINFO_LOOKUP_METHODPARAM || kind == CORINFO_LOOKUP_CLASSPARAM);
// Exact method descriptor as passed in
ctxTree = gtNewLclvNode(pRoot->info.compTypeCtxtArg, TYP_I_IMPL);
ctxTree->gtFlags |= GTF_VAR_CONTEXT;
}
return ctxTree;
}
/*****************************************************************************/
/* Import a dictionary lookup to access a handle in code shared between
generic instantiations.
The lookup depends on the typeContext which is only available at
runtime, and not at compile-time.
pLookup->token1 and pLookup->token2 specify the handle that is needed.
The cases are:
1. pLookup->indirections == CORINFO_USEHELPER : Call a helper passing it the
instantiation-specific handle, and the tokens to lookup the handle.
2. pLookup->indirections != CORINFO_USEHELPER :
2a. pLookup->testForNull == false : Dereference the instantiation-specific handle
to get the handle.
2b. pLookup->testForNull == true : Dereference the instantiation-specific handle.
If it is non-NULL, it is the handle required. Else, call a helper
to lookup the handle.
*/
GenTree* Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_LOOKUP* pLookup,
void* compileTimeHandle)
{
GenTree* ctxTree = getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind);
CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup;
// It's available only via the run-time helper function
if (pRuntimeLookup->indirections == CORINFO_USEHELPER)
{
#ifdef FEATURE_READYTORUN
if (opts.IsReadyToRun())
{
return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
gtNewCallArgs(ctxTree), &pLookup->lookupKind);
}
#endif
return gtNewRuntimeLookupHelperCallNode(pRuntimeLookup, ctxTree, compileTimeHandle);
}
// Slot pointer
GenTree* slotPtrTree = ctxTree;
if (pRuntimeLookup->testForNull)
{
slotPtrTree = impCloneExpr(ctxTree, &ctxTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("impRuntimeLookup slot"));
}
GenTree* indOffTree = nullptr;
GenTree* lastIndOfTree = nullptr;
// Applied repeated indirections
for (WORD i = 0; i < pRuntimeLookup->indirections; i++)
{
if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset))
{
indOffTree = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("impRuntimeLookup indirectOffset"));
}
// The last indirection could be subject to a size check (dynamic dictionary expansion)
bool isLastIndirectionWithSizeCheck =
((i == pRuntimeLookup->indirections - 1) && (pRuntimeLookup->sizeOffset != CORINFO_NO_SIZE_CHECK));
if (i != 0)
{
slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
if (!isLastIndirectionWithSizeCheck)
{
slotPtrTree->gtFlags |= GTF_IND_INVARIANT;
}
}
if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset))
{
slotPtrTree = gtNewOperNode(GT_ADD, TYP_I_IMPL, indOffTree, slotPtrTree);
}
if (pRuntimeLookup->offsets[i] != 0)
{
if (isLastIndirectionWithSizeCheck)
{
lastIndOfTree = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("impRuntimeLookup indirectOffset"));
}
slotPtrTree =
gtNewOperNode(GT_ADD, TYP_I_IMPL, slotPtrTree, gtNewIconNode(pRuntimeLookup->offsets[i], TYP_I_IMPL));
}
}
// No null test required
if (!pRuntimeLookup->testForNull)
{
if (pRuntimeLookup->indirections == 0)
{
return slotPtrTree;
}
slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
if (!pRuntimeLookup->testForFixup)
{
return slotPtrTree;
}
impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark0"));
unsigned slotLclNum = lvaGrabTemp(true DEBUGARG("impRuntimeLookup test"));
impAssignTempGen(slotLclNum, slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr, impCurStmtDI);
GenTree* slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL);
// downcast the pointer to a TYP_INT on 64-bit targets
slot = impImplicitIorI4Cast(slot, TYP_INT);
// Use a GT_AND to check for the lowest bit and indirect if it is set
GenTree* test = gtNewOperNode(GT_AND, TYP_INT, slot, gtNewIconNode(1));
GenTree* relop = gtNewOperNode(GT_EQ, TYP_INT, test, gtNewIconNode(0));
// slot = GT_IND(slot - 1)
slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL);
GenTree* add = gtNewOperNode(GT_ADD, TYP_I_IMPL, slot, gtNewIconNode(-1, TYP_I_IMPL));
GenTree* indir = gtNewOperNode(GT_IND, TYP_I_IMPL, add);
indir->gtFlags |= GTF_IND_NONFAULTING;
indir->gtFlags |= GTF_IND_INVARIANT;
slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL);
GenTree* asg = gtNewAssignNode(slot, indir);
GenTreeColon* colon = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), asg);
GenTreeQmark* qmark = gtNewQmarkNode(TYP_VOID, relop, colon);
impAppendTree(qmark, (unsigned)CHECK_SPILL_NONE, impCurStmtDI);
return gtNewLclvNode(slotLclNum, TYP_I_IMPL);
}
assert(pRuntimeLookup->indirections != 0);
impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark1"));
// Extract the handle
GenTree* handleForNullCheck = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
handleForNullCheck->gtFlags |= GTF_IND_NONFAULTING;
// Call the helper
// - Setup argNode with the pointer to the signature returned by the lookup
GenTree* argNode = gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_GLOBAL_PTR, compileTimeHandle);
GenTreeCall::Use* helperArgs = gtNewCallArgs(ctxTree, argNode);
GenTreeCall* helperCall = gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, helperArgs);
// Check for null and possibly call helper
GenTree* nullCheck = gtNewOperNode(GT_NE, TYP_INT, handleForNullCheck, gtNewIconNode(0, TYP_I_IMPL));
GenTree* handleForResult = gtCloneExpr(handleForNullCheck);
GenTree* result = nullptr;
if (pRuntimeLookup->sizeOffset != CORINFO_NO_SIZE_CHECK)
{
// Dynamic dictionary expansion support
assert((lastIndOfTree != nullptr) && (pRuntimeLookup->indirections > 0));
// sizeValue = dictionary[pRuntimeLookup->sizeOffset]
GenTreeIntCon* sizeOffset = gtNewIconNode(pRuntimeLookup->sizeOffset, TYP_I_IMPL);
GenTree* sizeValueOffset = gtNewOperNode(GT_ADD, TYP_I_IMPL, lastIndOfTree, sizeOffset);
GenTree* sizeValue = gtNewOperNode(GT_IND, TYP_I_IMPL, sizeValueOffset);
sizeValue->gtFlags |= GTF_IND_NONFAULTING;
// sizeCheck fails if sizeValue < pRuntimeLookup->offsets[i]
GenTree* offsetValue = gtNewIconNode(pRuntimeLookup->offsets[pRuntimeLookup->indirections - 1], TYP_I_IMPL);
GenTree* sizeCheck = gtNewOperNode(GT_LE, TYP_INT, sizeValue, offsetValue);
// revert null check condition.
nullCheck->ChangeOperUnchecked(GT_EQ);
// ((sizeCheck fails || nullCheck fails))) ? (helperCall : handle).
// Add checks and the handle as call arguments, indirect call transformer will handle this.
helperCall->gtCallArgs = gtPrependNewCallArg(handleForResult, helperCall->gtCallArgs);
helperCall->gtCallArgs = gtPrependNewCallArg(sizeCheck, helperCall->gtCallArgs);
helperCall->gtCallArgs = gtPrependNewCallArg(nullCheck, helperCall->gtCallArgs);
result = helperCall;
addExpRuntimeLookupCandidate(helperCall);
}
else
{
GenTreeColon* colonNullCheck = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL, handleForResult, helperCall);
result = gtNewQmarkNode(TYP_I_IMPL, nullCheck, colonNullCheck);
}
unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling Runtime Lookup tree"));
impAssignTempGen(tmp, result, (unsigned)CHECK_SPILL_NONE);
return gtNewLclvNode(tmp, TYP_I_IMPL);
}
/******************************************************************************
* Spills the stack at verCurrentState.esStack[level] and replaces it with a temp.
* If tnum!=BAD_VAR_NUM, the temp var used to replace the tree is tnum,
* else, grab a new temp.
* For structs (which can be pushed on the stack using obj, etc),
* special handling is needed
*/
struct RecursiveGuard
{
public:
RecursiveGuard()
{
m_pAddress = nullptr;
}
~RecursiveGuard()
{
if (m_pAddress)
{
*m_pAddress = false;
}
}
void Init(bool* pAddress, bool bInitialize)
{
assert(pAddress && *pAddress == false && "Recursive guard violation");
m_pAddress = pAddress;
if (bInitialize)
{
*m_pAddress = true;
}
}
protected:
bool* m_pAddress;
};
bool Compiler::impSpillStackEntry(unsigned level,
unsigned tnum
#ifdef DEBUG
,
bool bAssertOnRecursion,
const char* reason
#endif
)
{
#ifdef DEBUG
RecursiveGuard guard;
guard.Init(&impNestedStackSpill, bAssertOnRecursion);
#endif
GenTree* tree = verCurrentState.esStack[level].val;
/* Allocate a temp if we haven't been asked to use a particular one */
if (tnum != BAD_VAR_NUM && (tnum >= lvaCount))
{
return false;
}
bool isNewTemp = false;
if (tnum == BAD_VAR_NUM)
{
tnum = lvaGrabTemp(true DEBUGARG(reason));
isNewTemp = true;
}
/* Assign the spilled entry to the temp */
impAssignTempGen(tnum, tree, verCurrentState.esStack[level].seTypeInfo.GetClassHandle(), level);
// If temp is newly introduced and a ref type, grab what type info we can.
if (isNewTemp && (lvaTable[tnum].lvType == TYP_REF))
{
assert(lvaTable[tnum].lvSingleDef == 0);
lvaTable[tnum].lvSingleDef = 1;
JITDUMP("Marked V%02u as a single def temp\n", tnum);
CORINFO_CLASS_HANDLE stkHnd = verCurrentState.esStack[level].seTypeInfo.GetClassHandle();
lvaSetClass(tnum, tree, stkHnd);
// If we're assigning a GT_RET_EXPR, note the temp over on the call,
// so the inliner can use it in case it needs a return spill temp.
if (tree->OperGet() == GT_RET_EXPR)
{
JITDUMP("\n*** see V%02u = GT_RET_EXPR, noting temp\n", tnum);
GenTree* call = tree->AsRetExpr()->gtInlineCandidate;
InlineCandidateInfo* ici = call->AsCall()->gtInlineCandidateInfo;
ici->preexistingSpillTemp = tnum;
}
}
// The tree type may be modified by impAssignTempGen, so use the type of the lclVar.
var_types type = genActualType(lvaTable[tnum].TypeGet());
GenTree* temp = gtNewLclvNode(tnum, type);
verCurrentState.esStack[level].val = temp;
return true;
}
/*****************************************************************************
*
* Ensure that the stack has only spilled values
*/
void Compiler::impSpillStackEnsure(bool spillLeaves)
{
assert(!spillLeaves || opts.compDbgCode);
for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
{
GenTree* tree = verCurrentState.esStack[level].val;
if (!spillLeaves && tree->OperIsLeaf())
{
continue;
}
// Temps introduced by the importer itself don't need to be spilled
bool isTempLcl =
(tree->OperGet() == GT_LCL_VAR) && (tree->AsLclVarCommon()->GetLclNum() >= info.compLocalsCount);
if (isTempLcl)
{
continue;
}
impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillStackEnsure"));
}
}
void Compiler::impSpillEvalStack()
{
for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
{
impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillEvalStack"));
}
}
/*****************************************************************************
*
* If the stack contains any trees with side effects in them, assign those
* trees to temps and append the assignments to the statement list.
* On return the stack is guaranteed to be empty.
*/
inline void Compiler::impEvalSideEffects()
{
impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("impEvalSideEffects"));
verCurrentState.esStackDepth = 0;
}
/*****************************************************************************
*
* If the stack contains any trees with side effects in them, assign those
* trees to temps and replace them on the stack with refs to their temps.
* [0..chkLevel) is the portion of the stack which will be checked and spilled.
*/
inline void Compiler::impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason))
{
assert(chkLevel != (unsigned)CHECK_SPILL_NONE);
/* Before we make any appends to the tree list we must spill the
* "special" side effects (GTF_ORDER_SIDEEFF on a GT_CATCH_ARG) */
impSpillSpecialSideEff();
if (chkLevel == (unsigned)CHECK_SPILL_ALL)
{
chkLevel = verCurrentState.esStackDepth;
}
assert(chkLevel <= verCurrentState.esStackDepth);
GenTreeFlags spillFlags = spillGlobEffects ? GTF_GLOB_EFFECT : GTF_SIDE_EFFECT;
for (unsigned i = 0; i < chkLevel; i++)
{
GenTree* tree = verCurrentState.esStack[i].val;
if ((tree->gtFlags & spillFlags) != 0 ||
(spillGlobEffects && // Only consider the following when spillGlobEffects == true
!impIsAddressInLocal(tree) && // No need to spill the GT_ADDR node on a local.
gtHasLocalsWithAddrOp(tree))) // Spill if we still see GT_LCL_VAR that contains lvHasLdAddrOp or
// lvAddrTaken flag.
{
impSpillStackEntry(i, BAD_VAR_NUM DEBUGARG(false) DEBUGARG(reason));
}
}
}
/*****************************************************************************
*
* If the stack contains any trees with special side effects in them, assign
* those trees to temps and replace them on the stack with refs to their temps.
*/
inline void Compiler::impSpillSpecialSideEff()
{
// Only exception objects need to be carefully handled
if (!compCurBB->bbCatchTyp)
{
return;
}
for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
{
GenTree* tree = verCurrentState.esStack[level].val;
// Make sure if we have an exception object in the sub tree we spill ourselves.
if (gtHasCatchArg(tree))
{
impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillSpecialSideEff"));
}
}
}
/*****************************************************************************
*
* Spill all stack references to value classes (TYP_STRUCT nodes)
*/
void Compiler::impSpillValueClasses()
{
for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
{
GenTree* tree = verCurrentState.esStack[level].val;
if (fgWalkTreePre(&tree, impFindValueClasses) == WALK_ABORT)
{
// Tree walk was aborted, which means that we found a
// value class on the stack. Need to spill that
// stack entry.
impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillValueClasses"));
}
}
}
/*****************************************************************************
*
* Callback that checks if a tree node is TYP_STRUCT
*/
Compiler::fgWalkResult Compiler::impFindValueClasses(GenTree** pTree, fgWalkData* data)
{
fgWalkResult walkResult = WALK_CONTINUE;
if ((*pTree)->gtType == TYP_STRUCT)
{
// Abort the walk and indicate that we found a value class
walkResult = WALK_ABORT;
}
return walkResult;
}
/*****************************************************************************
*
* If the stack contains any trees with references to local #lclNum, assign
* those trees to temps and replace their place on the stack with refs to
* their temps.
*/
void Compiler::impSpillLclRefs(ssize_t lclNum)
{
/* Before we make any appends to the tree list we must spill the
* "special" side effects (GTF_ORDER_SIDEEFF) - GT_CATCH_ARG */
impSpillSpecialSideEff();
for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
{
GenTree* tree = verCurrentState.esStack[level].val;
/* If the tree may throw an exception, and the block has a handler,
then we need to spill assignments to the local if the local is
live on entry to the handler.
Just spill 'em all without considering the liveness */
bool xcptnCaught = ehBlockHasExnFlowDsc(compCurBB) && (tree->gtFlags & (GTF_CALL | GTF_EXCEPT));
/* Skip the tree if it doesn't have an affected reference,
unless xcptnCaught */
if (xcptnCaught || gtHasRef(tree, lclNum))
{
impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillLclRefs"));
}
}
}
/*****************************************************************************
*
* Push catch arg onto the stack.
* If there are jumps to the beginning of the handler, insert basic block
* and spill catch arg to a temp. Update the handler block if necessary.
*
* Returns the basic block of the actual handler.
*/
BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd, bool isSingleBlockFilter)
{
// Do not inject the basic block twice on reimport. This should be
// hit only under JIT stress. See if the block is the one we injected.
// Note that EH canonicalization can inject internal blocks here. We might
// be able to re-use such a block (but we don't, right now).
if ((hndBlk->bbFlags & (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE)) ==
(BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE))
{
Statement* stmt = hndBlk->firstStmt();
if (stmt != nullptr)
{
GenTree* tree = stmt->GetRootNode();
assert(tree != nullptr);
if ((tree->gtOper == GT_ASG) && (tree->AsOp()->gtOp1->gtOper == GT_LCL_VAR) &&
(tree->AsOp()->gtOp2->gtOper == GT_CATCH_ARG))
{
tree = gtNewLclvNode(tree->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum(), TYP_REF);
impPushOnStack(tree, typeInfo(TI_REF, clsHnd));
return hndBlk->bbNext;
}
}
// If we get here, it must have been some other kind of internal block. It's possible that
// someone prepended something to our injected block, but that's unlikely.
}
/* Push the exception address value on the stack */
GenTree* arg = new (this, GT_CATCH_ARG) GenTree(GT_CATCH_ARG, TYP_REF);
/* Mark the node as having a side-effect - i.e. cannot be
* moved around since it is tied to a fixed location (EAX) */
arg->gtFlags |= GTF_ORDER_SIDEEFF;
#if defined(JIT32_GCENCODER)
const bool forceInsertNewBlock = isSingleBlockFilter || compStressCompile(STRESS_CATCH_ARG, 5);
#else
const bool forceInsertNewBlock = compStressCompile(STRESS_CATCH_ARG, 5);
#endif // defined(JIT32_GCENCODER)
/* Spill GT_CATCH_ARG to a temp if there are jumps to the beginning of the handler */
if (hndBlk->bbRefs > 1 || forceInsertNewBlock)
{
if (hndBlk->bbRefs == 1)
{
hndBlk->bbRefs++;
}
/* Create extra basic block for the spill */
BasicBlock* newBlk = fgNewBBbefore(BBJ_NONE, hndBlk, /* extendRegion */ true);
newBlk->bbFlags |= BBF_IMPORTED | BBF_DONT_REMOVE;
newBlk->inheritWeight(hndBlk);
newBlk->bbCodeOffs = hndBlk->bbCodeOffs;
/* Account for the new link we are about to create */
hndBlk->bbRefs++;
// Spill into a temp.
unsigned tempNum = lvaGrabTemp(false DEBUGARG("SpillCatchArg"));
lvaTable[tempNum].lvType = TYP_REF;
GenTree* argAsg = gtNewTempAssign(tempNum, arg);
arg = gtNewLclvNode(tempNum, TYP_REF);
hndBlk->bbStkTempsIn = tempNum;
Statement* argStmt;
if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
{
// Report the debug info. impImportBlockCode won't treat the actual handler as exception block and thus
// won't do it for us.
// TODO-DEBUGINFO: Previous code always set stack as non-empty
// here. Can we not just use impCurStmtOffsSet? Are we out of sync
// here with the stack?
impCurStmtDI = DebugInfo(compInlineContext, ILLocation(newBlk->bbCodeOffs, false, false));
argStmt = gtNewStmt(argAsg, impCurStmtDI);
}
else
{
argStmt = gtNewStmt(argAsg);
}
fgInsertStmtAtEnd(newBlk, argStmt);
}
impPushOnStack(arg, typeInfo(TI_REF, clsHnd));
return hndBlk;
}
/*****************************************************************************
*
* Given a tree, clone it. *pClone is set to the cloned tree.
* Returns the original tree if the cloning was easy,
* else returns the temp to which the tree had to be spilled to.
* If the tree has side-effects, it will be spilled to a temp.
*/
GenTree* Compiler::impCloneExpr(GenTree* tree,
GenTree** pClone,
CORINFO_CLASS_HANDLE structHnd,
unsigned curLevel,
Statement** pAfterStmt DEBUGARG(const char* reason))
{
if (!(tree->gtFlags & GTF_GLOB_EFFECT))
{
GenTree* clone = gtClone(tree, true);
if (clone)
{
*pClone = clone;
return tree;
}
}
/* Store the operand in a temp and return the temp */
unsigned temp = lvaGrabTemp(true DEBUGARG(reason));
// impAssignTempGen() may change tree->gtType to TYP_VOID for calls which
// return a struct type. It also may modify the struct type to a more
// specialized type (e.g. a SIMD type). So we will get the type from
// the lclVar AFTER calling impAssignTempGen().
impAssignTempGen(temp, tree, structHnd, curLevel, pAfterStmt, impCurStmtDI);
var_types type = genActualType(lvaTable[temp].TypeGet());
*pClone = gtNewLclvNode(temp, type);
return gtNewLclvNode(temp, type);
}
//------------------------------------------------------------------------
// impCreateDIWithCurrentStackInfo: Create a DebugInfo instance with the
// specified IL offset and 'is call' bit, using the current stack to determine
// whether to set the 'stack empty' bit.
//
// Arguments:
// offs - the IL offset for the DebugInfo
// isCall - whether the created DebugInfo should have the IsCall bit set
//
// Return Value:
// The DebugInfo instance.
//
DebugInfo Compiler::impCreateDIWithCurrentStackInfo(IL_OFFSET offs, bool isCall)
{
assert(offs != BAD_IL_OFFSET);
bool isStackEmpty = verCurrentState.esStackDepth <= 0;
return DebugInfo(compInlineContext, ILLocation(offs, isStackEmpty, isCall));
}
//------------------------------------------------------------------------
// impCurStmtOffsSet: Set the "current debug info" to attach to statements that
// we are generating next.
//
// Arguments:
// offs - the IL offset
//
// Remarks:
// This function will be called in the main IL processing loop when it is
// determined that we have reached a location in the IL stream for which we
// want to report debug information. This is the main way we determine which
// statements to report debug info for to the EE: for other statements, they
// will have no debug information attached.
//
inline void Compiler::impCurStmtOffsSet(IL_OFFSET offs)
{
if (offs == BAD_IL_OFFSET)
{
impCurStmtDI = DebugInfo(compInlineContext, ILLocation());
}
else
{
impCurStmtDI = impCreateDIWithCurrentStackInfo(offs, false);
}
}
//------------------------------------------------------------------------
// impCanSpillNow: check is it possible to spill all values from eeStack to local variables.
//
// Arguments:
// prevOpcode - last importer opcode
//
// Return Value:
// true if it is legal, false if it could be a sequence that we do not want to divide.
bool Compiler::impCanSpillNow(OPCODE prevOpcode)
{
// Don't spill after ldtoken, newarr and newobj, because it could be a part of the InitializeArray sequence.
// Avoid breaking up to guarantee that impInitializeArrayIntrinsic can succeed.
return (prevOpcode != CEE_LDTOKEN) && (prevOpcode != CEE_NEWARR) && (prevOpcode != CEE_NEWOBJ);
}
/*****************************************************************************
*
* Remember the instr offset for the statements
*
* When we do impAppendTree(tree), we can't set stmt->SetLastILOffset(impCurOpcOffs),
* if the append was done because of a partial stack spill,
* as some of the trees corresponding to code up to impCurOpcOffs might
* still be sitting on the stack.
* So we delay calling of SetLastILOffset() until impNoteLastILoffs().
* This should be called when an opcode finally/explicitly causes
* impAppendTree(tree) to be called (as opposed to being called because of
* a spill caused by the opcode)
*/
#ifdef DEBUG
void Compiler::impNoteLastILoffs()
{
if (impLastILoffsStmt == nullptr)
{
// We should have added a statement for the current basic block
// Is this assert correct ?
assert(impLastStmt);
impLastStmt->SetLastILOffset(compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs);
}
else
{
impLastILoffsStmt->SetLastILOffset(compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs);
impLastILoffsStmt = nullptr;
}
}
#endif // DEBUG
/*****************************************************************************
* We don't create any GenTree (excluding spills) for a branch.
* For debugging info, we need a placeholder so that we can note
* the IL offset in gtStmt.gtStmtOffs. So append an empty statement.
*/
void Compiler::impNoteBranchOffs()
{
if (opts.compDbgCode)
{
impAppendTree(gtNewNothingNode(), (unsigned)CHECK_SPILL_NONE, impCurStmtDI);
}
}
/*****************************************************************************
* Locate the next stmt boundary for which we need to record info.
* We will have to spill the stack at such boundaries if it is not
* already empty.
* Returns the next stmt boundary (after the start of the block)
*/
unsigned Compiler::impInitBlockLineInfo()
{
/* Assume the block does not correspond with any IL offset. This prevents
us from reporting extra offsets. Extra mappings can cause confusing
stepping, especially if the extra mapping is a jump-target, and the
debugger does not ignore extra mappings, but instead rewinds to the
nearest known offset */
impCurStmtOffsSet(BAD_IL_OFFSET);
IL_OFFSET blockOffs = compCurBB->bbCodeOffs;
if ((verCurrentState.esStackDepth == 0) && (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES))
{
impCurStmtOffsSet(blockOffs);
}
/* Always report IL offset 0 or some tests get confused.
Probably a good idea anyways */
if (blockOffs == 0)
{
impCurStmtOffsSet(blockOffs);
}
if (!info.compStmtOffsetsCount)
{
return ~0;
}
/* Find the lowest explicit stmt boundary within the block */
/* Start looking at an entry that is based on our instr offset */
unsigned index = (info.compStmtOffsetsCount * blockOffs) / info.compILCodeSize;
if (index >= info.compStmtOffsetsCount)
{
index = info.compStmtOffsetsCount - 1;
}
/* If we've guessed too far, back up */
while (index > 0 && info.compStmtOffsets[index - 1] >= blockOffs)
{
index--;
}
/* If we guessed short, advance ahead */
while (info.compStmtOffsets[index] < blockOffs)
{
index++;
if (index == info.compStmtOffsetsCount)
{
return info.compStmtOffsetsCount;
}
}
assert(index < info.compStmtOffsetsCount);
if (info.compStmtOffsets[index] == blockOffs)
{
/* There is an explicit boundary for the start of this basic block.
So we will start with bbCodeOffs. Else we will wait until we
get to the next explicit boundary */
impCurStmtOffsSet(blockOffs);
index++;
}
return index;
}
/*****************************************************************************/
bool Compiler::impOpcodeIsCallOpcode(OPCODE opcode)
{
switch (opcode)
{
case CEE_CALL:
case CEE_CALLI:
case CEE_CALLVIRT:
return true;
default:
return false;
}
}
/*****************************************************************************/
static inline bool impOpcodeIsCallSiteBoundary(OPCODE opcode)
{
switch (opcode)
{
case CEE_CALL:
case CEE_CALLI:
case CEE_CALLVIRT:
case CEE_JMP:
case CEE_NEWOBJ:
case CEE_NEWARR:
return true;
default:
return false;
}
}
/*****************************************************************************/
// One might think it is worth caching these values, but results indicate
// that it isn't.
// In addition, caching them causes SuperPMI to be unable to completely
// encapsulate an individual method context.
CORINFO_CLASS_HANDLE Compiler::impGetRefAnyClass()
{
CORINFO_CLASS_HANDLE refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF);
assert(refAnyClass != (CORINFO_CLASS_HANDLE) nullptr);
return refAnyClass;
}
CORINFO_CLASS_HANDLE Compiler::impGetTypeHandleClass()
{
CORINFO_CLASS_HANDLE typeHandleClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPE_HANDLE);
assert(typeHandleClass != (CORINFO_CLASS_HANDLE) nullptr);
return typeHandleClass;
}
CORINFO_CLASS_HANDLE Compiler::impGetRuntimeArgumentHandle()
{
CORINFO_CLASS_HANDLE argIteratorClass = info.compCompHnd->getBuiltinClass(CLASSID_ARGUMENT_HANDLE);
assert(argIteratorClass != (CORINFO_CLASS_HANDLE) nullptr);
return argIteratorClass;
}
CORINFO_CLASS_HANDLE Compiler::impGetStringClass()
{
CORINFO_CLASS_HANDLE stringClass = info.compCompHnd->getBuiltinClass(CLASSID_STRING);
assert(stringClass != (CORINFO_CLASS_HANDLE) nullptr);
return stringClass;
}
CORINFO_CLASS_HANDLE Compiler::impGetObjectClass()
{
CORINFO_CLASS_HANDLE objectClass = info.compCompHnd->getBuiltinClass(CLASSID_SYSTEM_OBJECT);
assert(objectClass != (CORINFO_CLASS_HANDLE) nullptr);
return objectClass;
}
/*****************************************************************************
* "&var" can be used either as TYP_BYREF or TYP_I_IMPL, but we
* set its type to TYP_BYREF when we create it. We know if it can be
* changed to TYP_I_IMPL only at the point where we use it
*/
/* static */
void Compiler::impBashVarAddrsToI(GenTree* tree1, GenTree* tree2)
{
if (tree1->IsLocalAddrExpr() != nullptr)
{
tree1->gtType = TYP_I_IMPL;
}
if (tree2 && (tree2->IsLocalAddrExpr() != nullptr))
{
tree2->gtType = TYP_I_IMPL;
}
}
/*****************************************************************************
* TYP_INT and TYP_I_IMPL can be used almost interchangeably, but we want
* to make that an explicit cast in our trees, so any implicit casts that
* exist in the IL (at least on 64-bit where TYP_I_IMPL != TYP_INT) are
* turned into explicit casts here.
* We also allow an implicit conversion of a ldnull into a TYP_I_IMPL(0)
*/
GenTree* Compiler::impImplicitIorI4Cast(GenTree* tree, var_types dstTyp)
{
var_types currType = genActualType(tree->gtType);
var_types wantedType = genActualType(dstTyp);
if (wantedType != currType)
{
// Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
if ((tree->OperGet() == GT_CNS_INT) && varTypeIsI(dstTyp))
{
if (!varTypeIsI(tree->gtType) || ((tree->gtType == TYP_REF) && (tree->AsIntCon()->gtIconVal == 0)))
{
tree->gtType = TYP_I_IMPL;
}
}
#ifdef TARGET_64BIT
else if (varTypeIsI(wantedType) && (currType == TYP_INT))
{
// Note that this allows TYP_INT to be cast to a TYP_I_IMPL when wantedType is a TYP_BYREF or TYP_REF
tree = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL);
}
else if ((wantedType == TYP_INT) && varTypeIsI(currType))
{
// Note that this allows TYP_BYREF or TYP_REF to be cast to a TYP_INT
tree = gtNewCastNode(TYP_INT, tree, false, TYP_INT);
}
#endif // TARGET_64BIT
}
return tree;
}
/*****************************************************************************
* TYP_FLOAT and TYP_DOUBLE can be used almost interchangeably in some cases,
* but we want to make that an explicit cast in our trees, so any implicit casts
* that exist in the IL are turned into explicit casts here.
*/
GenTree* Compiler::impImplicitR4orR8Cast(GenTree* tree, var_types dstTyp)
{
if (varTypeIsFloating(tree) && varTypeIsFloating(dstTyp) && (dstTyp != tree->gtType))
{
tree = gtNewCastNode(dstTyp, tree, false, dstTyp);
}
return tree;
}
//------------------------------------------------------------------------
// impInitializeArrayIntrinsic: Attempts to replace a call to InitializeArray
// with a GT_COPYBLK node.
//
// Arguments:
// sig - The InitializeArray signature.
//
// Return Value:
// A pointer to the newly created GT_COPYBLK node if the replacement succeeds or
// nullptr otherwise.
//
// Notes:
// The function recognizes the following IL pattern:
// ldc <length> or a list of ldc <lower bound>/<length>
// newarr or newobj
// dup
// ldtoken <field handle>
// call InitializeArray
// The lower bounds need not be constant except when the array rank is 1.
// The function recognizes all kinds of arrays thus enabling a small runtime
// such as CoreRT to skip providing an implementation for InitializeArray.
GenTree* Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig)
{
assert(sig->numArgs == 2);
GenTree* fieldTokenNode = impStackTop(0).val;
GenTree* arrayLocalNode = impStackTop(1).val;
//
// Verify that the field token is known and valid. Note that It's also
// possible for the token to come from reflection, in which case we cannot do
// the optimization and must therefore revert to calling the helper. You can
// see an example of this in bvt\DynIL\initarray2.exe (in Main).
//
// Check to see if the ldtoken helper call is what we see here.
if (fieldTokenNode->gtOper != GT_CALL || (fieldTokenNode->AsCall()->gtCallType != CT_HELPER) ||
(fieldTokenNode->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD)))
{
return nullptr;
}
// Strip helper call away
fieldTokenNode = fieldTokenNode->AsCall()->gtCallArgs->GetNode();
if (fieldTokenNode->gtOper == GT_IND)
{
fieldTokenNode = fieldTokenNode->AsOp()->gtOp1;
}
// Check for constant
if (fieldTokenNode->gtOper != GT_CNS_INT)
{
return nullptr;
}
CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE)fieldTokenNode->AsIntCon()->gtCompileTimeHandle;
if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) || (fieldToken == nullptr))
{
return nullptr;
}
//
// We need to get the number of elements in the array and the size of each element.
// We verify that the newarr statement is exactly what we expect it to be.
// If it's not then we just return NULL and we don't optimize this call
//
// It is possible the we don't have any statements in the block yet.
if (impLastStmt == nullptr)
{
return nullptr;
}
//
// We start by looking at the last statement, making sure it's an assignment, and
// that the target of the assignment is the array passed to InitializeArray.
//
GenTree* arrayAssignment = impLastStmt->GetRootNode();
if ((arrayAssignment->gtOper != GT_ASG) || (arrayAssignment->AsOp()->gtOp1->gtOper != GT_LCL_VAR) ||
(arrayLocalNode->gtOper != GT_LCL_VAR) || (arrayAssignment->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum() !=
arrayLocalNode->AsLclVarCommon()->GetLclNum()))
{
return nullptr;
}
//
// Make sure that the object being assigned is a helper call.
//
GenTree* newArrayCall = arrayAssignment->AsOp()->gtOp2;
if ((newArrayCall->gtOper != GT_CALL) || (newArrayCall->AsCall()->gtCallType != CT_HELPER))
{
return nullptr;
}
//
// Verify that it is one of the new array helpers.
//
bool isMDArray = false;
if (newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_DIRECT) &&
newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_OBJ) &&
newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_VC) &&
newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_ALIGN8)
#ifdef FEATURE_READYTORUN
&& newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1)
#endif
)
{
if (newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEW_MDARR))
{
return nullptr;
}
isMDArray = true;
}
CORINFO_CLASS_HANDLE arrayClsHnd = (CORINFO_CLASS_HANDLE)newArrayCall->AsCall()->compileTimeHelperArgumentHandle;
//
// Make sure we found a compile time handle to the array
//
if (!arrayClsHnd)
{
return nullptr;
}
unsigned rank = 0;
S_UINT32 numElements;
if (isMDArray)
{
rank = info.compCompHnd->getArrayRank(arrayClsHnd);
if (rank == 0)
{
return nullptr;
}
GenTreeCall::Use* tokenArg = newArrayCall->AsCall()->gtCallArgs;
assert(tokenArg != nullptr);
GenTreeCall::Use* numArgsArg = tokenArg->GetNext();
assert(numArgsArg != nullptr);
GenTreeCall::Use* argsArg = numArgsArg->GetNext();
assert(argsArg != nullptr);
//
// The number of arguments should be a constant between 1 and 64. The rank can't be 0
// so at least one length must be present and the rank can't exceed 32 so there can
// be at most 64 arguments - 32 lengths and 32 lower bounds.
//
if ((!numArgsArg->GetNode()->IsCnsIntOrI()) || (numArgsArg->GetNode()->AsIntCon()->IconValue() < 1) ||
(numArgsArg->GetNode()->AsIntCon()->IconValue() > 64))
{
return nullptr;
}
unsigned numArgs = static_cast<unsigned>(numArgsArg->GetNode()->AsIntCon()->IconValue());
bool lowerBoundsSpecified;
if (numArgs == rank * 2)
{
lowerBoundsSpecified = true;
}
else if (numArgs == rank)
{
lowerBoundsSpecified = false;
//
// If the rank is 1 and a lower bound isn't specified then the runtime creates
// a SDArray. Note that even if a lower bound is specified it can be 0 and then
// we get a SDArray as well, see the for loop below.
//
if (rank == 1)
{
isMDArray = false;
}
}
else
{
return nullptr;
}
//
// The rank is known to be at least 1 so we can start with numElements being 1
// to avoid the need to special case the first dimension.
//
numElements = S_UINT32(1);
struct Match
{
static bool IsArgsFieldInit(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
{
return (tree->OperGet() == GT_ASG) && IsArgsFieldIndir(tree->gtGetOp1(), index, lvaNewObjArrayArgs) &&
IsArgsAddr(tree->gtGetOp1()->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
}
static bool IsArgsFieldIndir(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
{
return (tree->OperGet() == GT_IND) && (tree->gtGetOp1()->OperGet() == GT_ADD) &&
(tree->gtGetOp1()->gtGetOp2()->IsIntegralConst(sizeof(INT32) * index)) &&
IsArgsAddr(tree->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
}
static bool IsArgsAddr(GenTree* tree, unsigned lvaNewObjArrayArgs)
{
return (tree->OperGet() == GT_ADDR) && (tree->gtGetOp1()->OperGet() == GT_LCL_VAR) &&
(tree->gtGetOp1()->AsLclVar()->GetLclNum() == lvaNewObjArrayArgs);
}
static bool IsComma(GenTree* tree)
{
return (tree != nullptr) && (tree->OperGet() == GT_COMMA);
}
};
unsigned argIndex = 0;
GenTree* comma;
for (comma = argsArg->GetNode(); Match::IsComma(comma); comma = comma->gtGetOp2())
{
if (lowerBoundsSpecified)
{
//
// In general lower bounds can be ignored because they're not needed to
// calculate the total number of elements. But for single dimensional arrays
// we need to know if the lower bound is 0 because in this case the runtime
// creates a SDArray and this affects the way the array data offset is calculated.
//
if (rank == 1)
{
GenTree* lowerBoundAssign = comma->gtGetOp1();
assert(Match::IsArgsFieldInit(lowerBoundAssign, argIndex, lvaNewObjArrayArgs));
GenTree* lowerBoundNode = lowerBoundAssign->gtGetOp2();
if (lowerBoundNode->IsIntegralConst(0))
{
isMDArray = false;
}
}
comma = comma->gtGetOp2();
argIndex++;
}
GenTree* lengthNodeAssign = comma->gtGetOp1();
assert(Match::IsArgsFieldInit(lengthNodeAssign, argIndex, lvaNewObjArrayArgs));
GenTree* lengthNode = lengthNodeAssign->gtGetOp2();
if (!lengthNode->IsCnsIntOrI())
{
return nullptr;
}
numElements *= S_SIZE_T(lengthNode->AsIntCon()->IconValue());
argIndex++;
}
assert((comma != nullptr) && Match::IsArgsAddr(comma, lvaNewObjArrayArgs));
if (argIndex != numArgs)
{
return nullptr;
}
}
else
{
//
// Make sure there are exactly two arguments: the array class and
// the number of elements.
//
GenTree* arrayLengthNode;
GenTreeCall::Use* args = newArrayCall->AsCall()->gtCallArgs;
#ifdef FEATURE_READYTORUN
if (newArrayCall->AsCall()->gtCallMethHnd == eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1))
{
// Array length is 1st argument for readytorun helper
arrayLengthNode = args->GetNode();
}
else
#endif
{
// Array length is 2nd argument for regular helper
arrayLengthNode = args->GetNext()->GetNode();
}
//
// This optimization is only valid for a constant array size.
//
if (arrayLengthNode->gtOper != GT_CNS_INT)
{
return nullptr;
}
numElements = S_SIZE_T(arrayLengthNode->AsIntCon()->gtIconVal);
if (!info.compCompHnd->isSDArray(arrayClsHnd))
{
return nullptr;
}
}
CORINFO_CLASS_HANDLE elemClsHnd;
var_types elementType = JITtype2varType(info.compCompHnd->getChildType(arrayClsHnd, &elemClsHnd));
//
// Note that genTypeSize will return zero for non primitive types, which is exactly
// what we want (size will then be 0, and we will catch this in the conditional below).
// Note that we don't expect this to fail for valid binaries, so we assert in the
// non-verification case (the verification case should not assert but rather correctly
// handle bad binaries). This assert is not guarding any specific invariant, but rather
// saying that we don't expect this to happen, and if it is hit, we need to investigate
// why.
//
S_UINT32 elemSize(genTypeSize(elementType));
S_UINT32 size = elemSize * S_UINT32(numElements);
if (size.IsOverflow())
{
return nullptr;
}
if ((size.Value() == 0) || (varTypeIsGC(elementType)))
{
return nullptr;
}
void* initData = info.compCompHnd->getArrayInitializationData(fieldToken, size.Value());
if (!initData)
{
return nullptr;
}
//
// At this point we are ready to commit to implementing the InitializeArray
// intrinsic using a struct assignment. Pop the arguments from the stack and
// return the struct assignment node.
//
impPopStack();
impPopStack();
const unsigned blkSize = size.Value();
unsigned dataOffset;
if (isMDArray)
{
dataOffset = eeGetMDArrayDataOffset(rank);
}
else
{
dataOffset = eeGetArrayDataOffset();
}
GenTree* dstAddr = gtNewOperNode(GT_ADD, TYP_BYREF, arrayLocalNode, gtNewIconNode(dataOffset, TYP_I_IMPL));
GenTree* dst = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, dstAddr, typGetBlkLayout(blkSize));
GenTree* src = gtNewIndOfIconHandleNode(TYP_STRUCT, (size_t)initData, GTF_ICON_CONST_PTR, true);
#ifdef DEBUG
src->gtGetOp1()->AsIntCon()->gtTargetHandle = THT_IntializeArrayIntrinsics;
#endif
return gtNewBlkOpNode(dst, // dst
src, // src
false, // volatile
true); // copyBlock
}
GenTree* Compiler::impCreateSpanIntrinsic(CORINFO_SIG_INFO* sig)
{
assert(sig->numArgs == 1);
assert(sig->sigInst.methInstCount == 1);
GenTree* fieldTokenNode = impStackTop(0).val;
//
// Verify that the field token is known and valid. Note that it's also
// possible for the token to come from reflection, in which case we cannot do
// the optimization and must therefore revert to calling the helper. You can
// see an example of this in bvt\DynIL\initarray2.exe (in Main).
//
// Check to see if the ldtoken helper call is what we see here.
if (fieldTokenNode->gtOper != GT_CALL || (fieldTokenNode->AsCall()->gtCallType != CT_HELPER) ||
(fieldTokenNode->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD)))
{
return nullptr;
}
// Strip helper call away
fieldTokenNode = fieldTokenNode->AsCall()->gtCallArgs->GetNode();
if (fieldTokenNode->gtOper == GT_IND)
{
fieldTokenNode = fieldTokenNode->AsOp()->gtOp1;
}
// Check for constant
if (fieldTokenNode->gtOper != GT_CNS_INT)
{
return nullptr;
}
CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE)fieldTokenNode->AsIntCon()->gtCompileTimeHandle;
if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) || (fieldToken == nullptr))
{
return nullptr;
}
CORINFO_CLASS_HANDLE fieldOwnerHnd = info.compCompHnd->getFieldClass(fieldToken);
CORINFO_CLASS_HANDLE fieldClsHnd;
var_types fieldElementType =
JITtype2varType(info.compCompHnd->getFieldType(fieldToken, &fieldClsHnd, fieldOwnerHnd));
unsigned totalFieldSize;
// Most static initialization data fields are of some structure, but it is possible for them to be of various
// primitive types as well
if (fieldElementType == var_types::TYP_STRUCT)
{
totalFieldSize = info.compCompHnd->getClassSize(fieldClsHnd);
}
else
{
totalFieldSize = genTypeSize(fieldElementType);
}
// Limit to primitive or enum type - see ArrayNative::GetSpanDataFrom()
CORINFO_CLASS_HANDLE targetElemHnd = sig->sigInst.methInst[0];
if (info.compCompHnd->getTypeForPrimitiveValueClass(targetElemHnd) == CORINFO_TYPE_UNDEF)
{
return nullptr;
}
const unsigned targetElemSize = info.compCompHnd->getClassSize(targetElemHnd);
assert(targetElemSize != 0);
const unsigned count = totalFieldSize / targetElemSize;
if (count == 0)
{
return nullptr;
}
void* data = info.compCompHnd->getArrayInitializationData(fieldToken, totalFieldSize);
if (!data)
{
return nullptr;
}
//
// Ready to commit to the work
//
impPopStack();
// Turn count and pointer value into constants.
GenTree* lengthValue = gtNewIconNode(count, TYP_INT);
GenTree* pointerValue = gtNewIconHandleNode((size_t)data, GTF_ICON_CONST_PTR);
// Construct ReadOnlySpan<T> to return.
CORINFO_CLASS_HANDLE spanHnd = sig->retTypeClass;
unsigned spanTempNum = lvaGrabTemp(true DEBUGARG("ReadOnlySpan<T> for CreateSpan<T>"));
lvaSetStruct(spanTempNum, spanHnd, false);
CORINFO_FIELD_HANDLE pointerFieldHnd = info.compCompHnd->getFieldInClass(spanHnd, 0);
CORINFO_FIELD_HANDLE lengthFieldHnd = info.compCompHnd->getFieldInClass(spanHnd, 1);
GenTreeLclFld* pointerField = gtNewLclFldNode(spanTempNum, TYP_BYREF, 0);
pointerField->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(pointerFieldHnd));
GenTree* pointerFieldAsg = gtNewAssignNode(pointerField, pointerValue);
GenTreeLclFld* lengthField = gtNewLclFldNode(spanTempNum, TYP_INT, TARGET_POINTER_SIZE);
lengthField->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(lengthFieldHnd));
GenTree* lengthFieldAsg = gtNewAssignNode(lengthField, lengthValue);
// Now append a few statements the initialize the span
impAppendTree(lengthFieldAsg, (unsigned)CHECK_SPILL_NONE, impCurStmtDI);
impAppendTree(pointerFieldAsg, (unsigned)CHECK_SPILL_NONE, impCurStmtDI);
// And finally create a tree that points at the span.
return impCreateLocalNode(spanTempNum DEBUGARG(0));
}
//------------------------------------------------------------------------
// impIntrinsic: possibly expand intrinsic call into alternate IR sequence
//
// Arguments:
// newobjThis - for constructor calls, the tree for the newly allocated object
// clsHnd - handle for the intrinsic method's class
// method - handle for the intrinsic method
// sig - signature of the intrinsic method
// methodFlags - CORINFO_FLG_XXX flags of the intrinsic method
// memberRef - the token for the intrinsic method
// readonlyCall - true if call has a readonly prefix
// tailCall - true if call is in tail position
// pConstrainedResolvedToken -- resolved token for constrained call, or nullptr
// if call is not constrained
// constraintCallThisTransform -- this transform to apply for a constrained call
// pIntrinsicName [OUT] -- intrinsic name (see enumeration in namedintrinsiclist.h)
// for "traditional" jit intrinsics
// isSpecialIntrinsic [OUT] -- set true if intrinsic expansion is a call
// that is amenable to special downstream optimization opportunities
//
// Returns:
// IR tree to use in place of the call, or nullptr if the jit should treat
// the intrinsic call like a normal call.
//
// pIntrinsicName set to non-illegal value if the call is recognized as a
// traditional jit intrinsic, even if the intrinsic is not expaned.
//
// isSpecial set true if the expansion is subject to special
// optimizations later in the jit processing
//
// Notes:
// On success the IR tree may be a call to a different method or an inline
// sequence. If it is a call, then the intrinsic processing here is responsible
// for handling all the special cases, as upon return to impImportCall
// expanded intrinsics bypass most of the normal call processing.
//
// Intrinsics are generally not recognized in minopts and debug codegen.
//
// However, certain traditional intrinsics are identifed as "must expand"
// if there is no fallback implmentation to invoke; these must be handled
// in all codegen modes.
//
// New style intrinsics (where the fallback implementation is in IL) are
// identified as "must expand" if they are invoked from within their
// own method bodies.
//
GenTree* Compiler::impIntrinsic(GenTree* newobjThis,
CORINFO_CLASS_HANDLE clsHnd,
CORINFO_METHOD_HANDLE method,
CORINFO_SIG_INFO* sig,
unsigned methodFlags,
int memberRef,
bool readonlyCall,
bool tailCall,
CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
CORINFO_THIS_TRANSFORM constraintCallThisTransform,
NamedIntrinsic* pIntrinsicName,
bool* isSpecialIntrinsic)
{
assert((methodFlags & CORINFO_FLG_INTRINSIC) != 0);
bool mustExpand = false;
bool isSpecial = false;
NamedIntrinsic ni = NI_Illegal;
if ((methodFlags & CORINFO_FLG_INTRINSIC) != 0)
{
// The recursive non-virtual calls to Jit intrinsics are must-expand by convention.
mustExpand = mustExpand || (gtIsRecursiveCall(method) && !(methodFlags & CORINFO_FLG_VIRTUAL));
ni = lookupNamedIntrinsic(method);
// We specially support the following on all platforms to allow for dead
// code optimization and to more generally support recursive intrinsics.
if (ni == NI_IsSupported_True)
{
assert(sig->numArgs == 0);
return gtNewIconNode(true);
}
if (ni == NI_IsSupported_False)
{
assert(sig->numArgs == 0);
return gtNewIconNode(false);
}
if (ni == NI_Throw_PlatformNotSupportedException)
{
return impUnsupportedNamedIntrinsic(CORINFO_HELP_THROW_PLATFORM_NOT_SUPPORTED, method, sig, mustExpand);
}
#ifdef FEATURE_HW_INTRINSICS
if ((ni > NI_HW_INTRINSIC_START) && (ni < NI_HW_INTRINSIC_END))
{
GenTree* hwintrinsic = impHWIntrinsic(ni, clsHnd, method, sig, mustExpand);
if (mustExpand && (hwintrinsic == nullptr))
{
return impUnsupportedNamedIntrinsic(CORINFO_HELP_THROW_NOT_IMPLEMENTED, method, sig, mustExpand);
}
return hwintrinsic;
}
if ((ni > NI_SIMD_AS_HWINTRINSIC_START) && (ni < NI_SIMD_AS_HWINTRINSIC_END))
{
// These intrinsics aren't defined recursively and so they will never be mustExpand
// Instead, they provide software fallbacks that will be executed instead.
assert(!mustExpand);
return impSimdAsHWIntrinsic(ni, clsHnd, method, sig, newobjThis);
}
#endif // FEATURE_HW_INTRINSICS
}
*pIntrinsicName = ni;
if (ni == NI_System_StubHelpers_GetStubContext)
{
// must be done regardless of DbgCode and MinOpts
return gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL);
}
if (ni == NI_System_StubHelpers_NextCallReturnAddress)
{
// For now we just avoid inlining anything into these methods since
// this intrinsic is only rarely used. We could do this better if we
// wanted to by trying to match which call is the one we need to get
// the return address of.
info.compHasNextCallRetAddr = true;
return new (this, GT_LABEL) GenTree(GT_LABEL, TYP_I_IMPL);
}
switch (ni)
{
// CreateSpan must be expanded for NativeAOT
case NI_System_Runtime_CompilerServices_RuntimeHelpers_CreateSpan:
case NI_System_Runtime_CompilerServices_RuntimeHelpers_InitializeArray:
mustExpand |= IsTargetAbi(CORINFO_CORERT_ABI);
break;
case NI_System_ByReference_ctor:
case NI_System_ByReference_get_Value:
case NI_System_Activator_AllocatorOf:
case NI_System_Activator_DefaultConstructorOf:
case NI_System_Object_MethodTableOf:
case NI_System_EETypePtr_EETypePtrOf:
mustExpand = true;
break;
default:
break;
}
GenTree* retNode = nullptr;
// Under debug and minopts, only expand what is required.
// NextCallReturnAddress intrinsic returns the return address of the next call.
// If that call is an intrinsic and is expanded, codegen for NextCallReturnAddress will fail.
// To avoid that we conservatively expand only required intrinsics in methods that call
// the NextCallReturnAddress intrinsic.
if (!mustExpand && (opts.OptimizationDisabled() || info.compHasNextCallRetAddr))
{
*pIntrinsicName = NI_Illegal;
return retNode;
}
CorInfoType callJitType = sig->retType;
var_types callType = JITtype2varType(callJitType);
/* First do the intrinsics which are always smaller than a call */
if (ni != NI_Illegal)
{
assert(retNode == nullptr);
switch (ni)
{
case NI_Array_Address:
case NI_Array_Get:
case NI_Array_Set:
retNode = impArrayAccessIntrinsic(clsHnd, sig, memberRef, readonlyCall, ni);
break;
case NI_System_String_Equals:
{
retNode = impStringEqualsOrStartsWith(/*startsWith:*/ false, sig, methodFlags);
break;
}
case NI_System_MemoryExtensions_Equals:
case NI_System_MemoryExtensions_SequenceEqual:
{
retNode = impSpanEqualsOrStartsWith(/*startsWith:*/ false, sig, methodFlags);
break;
}
case NI_System_String_StartsWith:
{
retNode = impStringEqualsOrStartsWith(/*startsWith:*/ true, sig, methodFlags);
break;
}
case NI_System_MemoryExtensions_StartsWith:
{
retNode = impSpanEqualsOrStartsWith(/*startsWith:*/ true, sig, methodFlags);
break;
}
case NI_System_MemoryExtensions_AsSpan:
case NI_System_String_op_Implicit:
{
assert(sig->numArgs == 1);
isSpecial = impStackTop().val->OperIs(GT_CNS_STR);
break;
}
case NI_System_String_get_Chars:
{
GenTree* op2 = impPopStack().val;
GenTree* op1 = impPopStack().val;
retNode = gtNewIndexRef(TYP_USHORT, op1, op2);
retNode->gtFlags |= GTF_INX_STRING_LAYOUT;
break;
}
case NI_System_String_get_Length:
{
GenTree* op1 = impPopStack().val;
if (op1->OperIs(GT_CNS_STR))
{
// Optimize `ldstr + String::get_Length()` to CNS_INT
// e.g. "Hello".Length => 5
GenTreeIntCon* iconNode = gtNewStringLiteralLength(op1->AsStrCon());
if (iconNode != nullptr)
{
retNode = iconNode;
break;
}
}
GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, OFFSETOF__CORINFO_String__stringLen, compCurBB);
op1 = arrLen;
// Getting the length of a null string should throw
op1->gtFlags |= GTF_EXCEPT;
retNode = op1;
break;
}
// Implement ByReference Ctor. This wraps the assignment of the ref into a byref-like field
// in a value type. The canonical example of this is Span<T>. In effect this is just a
// substitution. The parameter byref will be assigned into the newly allocated object.
case NI_System_ByReference_ctor:
{
// Remove call to constructor and directly assign the byref passed
// to the call to the first slot of the ByReference struct.
GenTree* op1 = impPopStack().val;
GenTree* thisptr = newobjThis;
CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0);
GenTree* field = gtNewFieldRef(TYP_BYREF, fldHnd, thisptr, 0);
GenTree* assign = gtNewAssignNode(field, op1);
GenTree* byReferenceStruct = gtCloneExpr(thisptr->gtGetOp1());
assert(byReferenceStruct != nullptr);
impPushOnStack(byReferenceStruct, typeInfo(TI_STRUCT, clsHnd));
retNode = assign;
break;
}
// Implement ptr value getter for ByReference struct.
case NI_System_ByReference_get_Value:
{
GenTree* op1 = impPopStack().val;
CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0);
GenTree* field = gtNewFieldRef(TYP_BYREF, fldHnd, op1, 0);
retNode = field;
break;
}
case NI_System_Runtime_CompilerServices_RuntimeHelpers_CreateSpan:
{
retNode = impCreateSpanIntrinsic(sig);
break;
}
case NI_System_Runtime_CompilerServices_RuntimeHelpers_InitializeArray:
{
retNode = impInitializeArrayIntrinsic(sig);
break;
}
case NI_System_Runtime_CompilerServices_RuntimeHelpers_IsKnownConstant:
{
GenTree* op1 = impPopStack().val;
if (op1->OperIsConst())
{
// op1 is a known constant, replace with 'true'.
retNode = gtNewIconNode(1);
JITDUMP("\nExpanding RuntimeHelpers.IsKnownConstant to true early\n");
// We can also consider FTN_ADDR and typeof(T) here
}
else
{
// op1 is not a known constant, we'll do the expansion in morph
retNode = new (this, GT_INTRINSIC) GenTreeIntrinsic(TYP_INT, op1, ni, method);
JITDUMP("\nConverting RuntimeHelpers.IsKnownConstant to:\n");
DISPTREE(retNode);
}
break;
}
case NI_System_Activator_AllocatorOf:
case NI_System_Activator_DefaultConstructorOf:
case NI_System_Object_MethodTableOf:
case NI_System_EETypePtr_EETypePtrOf:
{
assert(IsTargetAbi(CORINFO_CORERT_ABI)); // Only CoreRT supports it.
CORINFO_RESOLVED_TOKEN resolvedToken;
resolvedToken.tokenContext = impTokenLookupContextHandle;
resolvedToken.tokenScope = info.compScopeHnd;
resolvedToken.token = memberRef;
resolvedToken.tokenType = CORINFO_TOKENKIND_Method;
CORINFO_GENERICHANDLE_RESULT embedInfo;
info.compCompHnd->expandRawHandleIntrinsic(&resolvedToken, &embedInfo);
GenTree* rawHandle = impLookupToTree(&resolvedToken, &embedInfo.lookup, gtTokenToIconFlags(memberRef),
embedInfo.compileTimeHandle);
if (rawHandle == nullptr)
{
return nullptr;
}
noway_assert(genTypeSize(rawHandle->TypeGet()) == genTypeSize(TYP_I_IMPL));
unsigned rawHandleSlot = lvaGrabTemp(true DEBUGARG("rawHandle"));
impAssignTempGen(rawHandleSlot, rawHandle, clsHnd, (unsigned)CHECK_SPILL_NONE);
GenTree* lclVar = gtNewLclvNode(rawHandleSlot, TYP_I_IMPL);
GenTree* lclVarAddr = gtNewOperNode(GT_ADDR, TYP_I_IMPL, lclVar);
var_types resultType = JITtype2varType(sig->retType);
retNode = gtNewOperNode(GT_IND, resultType, lclVarAddr);
break;
}
case NI_System_Span_get_Item:
case NI_System_ReadOnlySpan_get_Item:
{
// Have index, stack pointer-to Span<T> s on the stack. Expand to:
//
// For Span<T>
// Comma
// BoundsCheck(index, s->_length)
// s->_pointer + index * sizeof(T)
//
// For ReadOnlySpan<T> -- same expansion, as it now returns a readonly ref
//
// Signature should show one class type parameter, which
// we need to examine.
assert(sig->sigInst.classInstCount == 1);
assert(sig->numArgs == 1);
CORINFO_CLASS_HANDLE spanElemHnd = sig->sigInst.classInst[0];
const unsigned elemSize = info.compCompHnd->getClassSize(spanElemHnd);
assert(elemSize > 0);
const bool isReadOnly = (ni == NI_System_ReadOnlySpan_get_Item);
JITDUMP("\nimpIntrinsic: Expanding %sSpan<T>.get_Item, T=%s, sizeof(T)=%u\n",
isReadOnly ? "ReadOnly" : "", eeGetClassName(spanElemHnd), elemSize);
GenTree* index = impPopStack().val;
GenTree* ptrToSpan = impPopStack().val;
GenTree* indexClone = nullptr;
GenTree* ptrToSpanClone = nullptr;
assert(genActualType(index) == TYP_INT);
assert(ptrToSpan->TypeGet() == TYP_BYREF);
#if defined(DEBUG)
if (verbose)
{
printf("with ptr-to-span\n");
gtDispTree(ptrToSpan);
printf("and index\n");
gtDispTree(index);
}
#endif // defined(DEBUG)
// We need to use both index and ptr-to-span twice, so clone or spill.
index = impCloneExpr(index, &indexClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Span.get_Item index"));
ptrToSpan = impCloneExpr(ptrToSpan, &ptrToSpanClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Span.get_Item ptrToSpan"));
// Bounds check
CORINFO_FIELD_HANDLE lengthHnd = info.compCompHnd->getFieldInClass(clsHnd, 1);
const unsigned lengthOffset = info.compCompHnd->getFieldOffset(lengthHnd);
GenTree* length = gtNewFieldRef(TYP_INT, lengthHnd, ptrToSpan, lengthOffset);
GenTree* boundsCheck = new (this, GT_BOUNDS_CHECK) GenTreeBoundsChk(index, length, SCK_RNGCHK_FAIL);
// Element access
index = indexClone;
#ifdef TARGET_64BIT
if (index->OperGet() == GT_CNS_INT)
{
index->gtType = TYP_I_IMPL;
}
else
{
index = gtNewCastNode(TYP_I_IMPL, index, true, TYP_I_IMPL);
}
#endif
if (elemSize != 1)
{
GenTree* sizeofNode = gtNewIconNode(static_cast<ssize_t>(elemSize), TYP_I_IMPL);
index = gtNewOperNode(GT_MUL, TYP_I_IMPL, index, sizeofNode);
}
CORINFO_FIELD_HANDLE ptrHnd = info.compCompHnd->getFieldInClass(clsHnd, 0);
const unsigned ptrOffset = info.compCompHnd->getFieldOffset(ptrHnd);
GenTree* data = gtNewFieldRef(TYP_BYREF, ptrHnd, ptrToSpanClone, ptrOffset);
GenTree* result = gtNewOperNode(GT_ADD, TYP_BYREF, data, index);
// Prepare result
var_types resultType = JITtype2varType(sig->retType);
assert(resultType == result->TypeGet());
retNode = gtNewOperNode(GT_COMMA, resultType, boundsCheck, result);
break;
}
case NI_System_RuntimeTypeHandle_GetValueInternal:
{
GenTree* op1 = impStackTop(0).val;
if (op1->gtOper == GT_CALL && (op1->AsCall()->gtCallType == CT_HELPER) &&
gtIsTypeHandleToRuntimeTypeHandleHelper(op1->AsCall()))
{
// Old tree
// Helper-RuntimeTypeHandle -> TreeToGetNativeTypeHandle
//
// New tree
// TreeToGetNativeTypeHandle
// Remove call to helper and return the native TypeHandle pointer that was the parameter
// to that helper.
op1 = impPopStack().val;
// Get native TypeHandle argument to old helper
GenTreeCall::Use* arg = op1->AsCall()->gtCallArgs;
assert(arg->GetNext() == nullptr);
op1 = arg->GetNode();
retNode = op1;
}
// Call the regular function.
break;
}
case NI_System_Type_GetTypeFromHandle:
{
GenTree* op1 = impStackTop(0).val;
CorInfoHelpFunc typeHandleHelper;
if (op1->gtOper == GT_CALL && (op1->AsCall()->gtCallType == CT_HELPER) &&
gtIsTypeHandleToRuntimeTypeHandleHelper(op1->AsCall(), &typeHandleHelper))
{
op1 = impPopStack().val;
// Replace helper with a more specialized helper that returns RuntimeType
if (typeHandleHelper == CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE)
{
typeHandleHelper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE;
}
else
{
assert(typeHandleHelper == CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE_MAYBENULL);
typeHandleHelper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL;
}
assert(op1->AsCall()->gtCallArgs->GetNext() == nullptr);
op1 = gtNewHelperCallNode(typeHandleHelper, TYP_REF, op1->AsCall()->gtCallArgs);
op1->gtType = TYP_REF;
retNode = op1;
}
break;
}
case NI_System_Type_op_Equality:
case NI_System_Type_op_Inequality:
{
JITDUMP("Importing Type.op_*Equality intrinsic\n");
GenTree* op1 = impStackTop(1).val;
GenTree* op2 = impStackTop(0).val;
GenTree* optTree = gtFoldTypeEqualityCall(ni == NI_System_Type_op_Equality, op1, op2);
if (optTree != nullptr)
{
// Success, clean up the evaluation stack.
impPopStack();
impPopStack();
// See if we can optimize even further, to a handle compare.
optTree = gtFoldTypeCompare(optTree);
// See if we can now fold a handle compare to a constant.
optTree = gtFoldExpr(optTree);
retNode = optTree;
}
else
{
// Retry optimizing these later
isSpecial = true;
}
break;
}
case NI_System_Enum_HasFlag:
{
GenTree* thisOp = impStackTop(1).val;
GenTree* flagOp = impStackTop(0).val;
GenTree* optTree = gtOptimizeEnumHasFlag(thisOp, flagOp);
if (optTree != nullptr)
{
// Optimization successful. Pop the stack for real.
impPopStack();
impPopStack();
retNode = optTree;
}
else
{
// Retry optimizing this during morph.
isSpecial = true;
}
break;
}
case NI_System_Type_IsAssignableFrom:
{
GenTree* typeTo = impStackTop(1).val;
GenTree* typeFrom = impStackTop(0).val;
retNode = impTypeIsAssignable(typeTo, typeFrom);
break;
}
case NI_System_Type_IsAssignableTo:
{
GenTree* typeTo = impStackTop(0).val;
GenTree* typeFrom = impStackTop(1).val;
retNode = impTypeIsAssignable(typeTo, typeFrom);
break;
}
case NI_System_Type_get_IsValueType:
{
// Optimize
//
// call Type.GetTypeFromHandle (which is replaced with CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE)
// call Type.IsValueType
//
// to `true` or `false`
// e.g. `typeof(int).IsValueType` => `true`
if (impStackTop().val->IsCall())
{
GenTreeCall* call = impStackTop().val->AsCall();
if (call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE))
{
CORINFO_CLASS_HANDLE hClass = gtGetHelperArgClassHandle(call->gtCallArgs->GetNode());
if (hClass != NO_CLASS_HANDLE)
{
retNode =
gtNewIconNode((eeIsValueClass(hClass) &&
// pointers are not value types (e.g. typeof(int*).IsValueType is false)
info.compCompHnd->asCorInfoType(hClass) != CORINFO_TYPE_PTR)
? 1
: 0);
impPopStack(); // drop CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE call
}
}
}
break;
}
case NI_System_Threading_Thread_get_ManagedThreadId:
{
if (impStackTop().val->OperIs(GT_RET_EXPR))
{
GenTreeCall* call = impStackTop().val->AsRetExpr()->gtInlineCandidate->AsCall();
if (call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC)
{
if (lookupNamedIntrinsic(call->gtCallMethHnd) == NI_System_Threading_Thread_get_CurrentThread)
{
// drop get_CurrentThread() call
impPopStack();
call->ReplaceWith(gtNewNothingNode(), this);
retNode = gtNewHelperCallNode(CORINFO_HELP_GETCURRENTMANAGEDTHREADID, TYP_INT);
}
}
}
break;
}
#ifdef TARGET_ARM64
// Intrinsify Interlocked.Or and Interlocked.And only for arm64-v8.1 (and newer)
// TODO-CQ: Implement for XArch (https://github.com/dotnet/runtime/issues/32239).
case NI_System_Threading_Interlocked_Or:
case NI_System_Threading_Interlocked_And:
{
if (compOpportunisticallyDependsOn(InstructionSet_Atomics))
{
assert(sig->numArgs == 2);
GenTree* op2 = impPopStack().val;
GenTree* op1 = impPopStack().val;
genTreeOps op = (ni == NI_System_Threading_Interlocked_Or) ? GT_XORR : GT_XAND;
retNode = gtNewOperNode(op, genActualType(callType), op1, op2);
retNode->gtFlags |= GTF_GLOB_REF | GTF_ASG;
}
break;
}
#endif // TARGET_ARM64
#if defined(TARGET_XARCH) || defined(TARGET_ARM64)
// TODO-ARM-CQ: reenable treating InterlockedCmpXchg32 operation as intrinsic
case NI_System_Threading_Interlocked_CompareExchange:
{
var_types retType = JITtype2varType(sig->retType);
if ((retType == TYP_LONG) && (TARGET_POINTER_SIZE == 4))
{
break;
}
if ((retType != TYP_INT) && (retType != TYP_LONG))
{
break;
}
assert(callType != TYP_STRUCT);
assert(sig->numArgs == 3);
GenTree* op3 = impPopStack().val; // comparand
GenTree* op2 = impPopStack().val; // value
GenTree* op1 = impPopStack().val; // location
GenTree* node = new (this, GT_CMPXCHG) GenTreeCmpXchg(genActualType(callType), op1, op2, op3);
node->AsCmpXchg()->gtOpLocation->gtFlags |= GTF_DONT_CSE;
retNode = node;
break;
}
case NI_System_Threading_Interlocked_Exchange:
case NI_System_Threading_Interlocked_ExchangeAdd:
{
assert(callType != TYP_STRUCT);
assert(sig->numArgs == 2);
var_types retType = JITtype2varType(sig->retType);
if ((retType == TYP_LONG) && (TARGET_POINTER_SIZE == 4))
{
break;
}
if ((retType != TYP_INT) && (retType != TYP_LONG))
{
break;
}
GenTree* op2 = impPopStack().val;
GenTree* op1 = impPopStack().val;
// This creates:
// val
// XAdd
// addr
// field (for example)
//
// In the case where the first argument is the address of a local, we might
// want to make this *not* make the var address-taken -- but atomic instructions
// on a local are probably pretty useless anyway, so we probably don't care.
op1 = gtNewOperNode(ni == NI_System_Threading_Interlocked_ExchangeAdd ? GT_XADD : GT_XCHG,
genActualType(callType), op1, op2);
op1->gtFlags |= GTF_GLOB_REF | GTF_ASG;
retNode = op1;
break;
}
#endif // defined(TARGET_XARCH) || defined(TARGET_ARM64)
case NI_System_Threading_Interlocked_MemoryBarrier:
case NI_System_Threading_Interlocked_ReadMemoryBarrier:
{
assert(sig->numArgs == 0);
GenTree* op1 = new (this, GT_MEMORYBARRIER) GenTree(GT_MEMORYBARRIER, TYP_VOID);
op1->gtFlags |= GTF_GLOB_REF | GTF_ASG;
// On XARCH `NI_System_Threading_Interlocked_ReadMemoryBarrier` fences need not be emitted.
// However, we still need to capture the effect on reordering.
if (ni == NI_System_Threading_Interlocked_ReadMemoryBarrier)
{
op1->gtFlags |= GTF_MEMORYBARRIER_LOAD;
}
retNode = op1;
break;
}
#ifdef FEATURE_HW_INTRINSICS
case NI_System_Math_FusedMultiplyAdd:
{
#ifdef TARGET_XARCH
if (compExactlyDependsOn(InstructionSet_FMA))
{
assert(varTypeIsFloating(callType));
// We are constructing a chain of intrinsics similar to:
// return FMA.MultiplyAddScalar(
// Vector128.CreateScalarUnsafe(x),
// Vector128.CreateScalarUnsafe(y),
// Vector128.CreateScalarUnsafe(z)
// ).ToScalar();
GenTree* op3 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, impPopStack().val,
NI_Vector128_CreateScalarUnsafe, callJitType, 16);
GenTree* op2 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, impPopStack().val,
NI_Vector128_CreateScalarUnsafe, callJitType, 16);
GenTree* op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, impPopStack().val,
NI_Vector128_CreateScalarUnsafe, callJitType, 16);
GenTree* res =
gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, op2, op3, NI_FMA_MultiplyAddScalar, callJitType, 16);
retNode = gtNewSimdHWIntrinsicNode(callType, res, NI_Vector128_ToScalar, callJitType, 16);
break;
}
#elif defined(TARGET_ARM64)
if (compExactlyDependsOn(InstructionSet_AdvSimd))
{
assert(varTypeIsFloating(callType));
// We are constructing a chain of intrinsics similar to:
// return AdvSimd.FusedMultiplyAddScalar(
// Vector64.Create{ScalarUnsafe}(z),
// Vector64.Create{ScalarUnsafe}(y),
// Vector64.Create{ScalarUnsafe}(x)
// ).ToScalar();
NamedIntrinsic createVector64 =
(callType == TYP_DOUBLE) ? NI_Vector64_Create : NI_Vector64_CreateScalarUnsafe;
constexpr unsigned int simdSize = 8;
GenTree* op3 =
gtNewSimdHWIntrinsicNode(TYP_SIMD8, impPopStack().val, createVector64, callJitType, simdSize);
GenTree* op2 =
gtNewSimdHWIntrinsicNode(TYP_SIMD8, impPopStack().val, createVector64, callJitType, simdSize);
GenTree* op1 =
gtNewSimdHWIntrinsicNode(TYP_SIMD8, impPopStack().val, createVector64, callJitType, simdSize);
// Note that AdvSimd.FusedMultiplyAddScalar(op1,op2,op3) corresponds to op1 + op2 * op3
// while Math{F}.FusedMultiplyAddScalar(op1,op2,op3) corresponds to op1 * op2 + op3
retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op3, op2, op1, NI_AdvSimd_FusedMultiplyAddScalar,
callJitType, simdSize);
retNode = gtNewSimdHWIntrinsicNode(callType, retNode, NI_Vector64_ToScalar, callJitType, simdSize);
break;
}
#endif
// TODO-CQ-XArch: Ideally we would create a GT_INTRINSIC node for fma, however, that currently
// requires more extensive changes to valuenum to support methods with 3 operands
// We want to generate a GT_INTRINSIC node in the case the call can't be treated as
// a target intrinsic so that we can still benefit from CSE and constant folding.
break;
}
#endif // FEATURE_HW_INTRINSICS
case NI_System_Math_Abs:
case NI_System_Math_Acos:
case NI_System_Math_Acosh:
case NI_System_Math_Asin:
case NI_System_Math_Asinh:
case NI_System_Math_Atan:
case NI_System_Math_Atanh:
case NI_System_Math_Atan2:
case NI_System_Math_Cbrt:
case NI_System_Math_Ceiling:
case NI_System_Math_Cos:
case NI_System_Math_Cosh:
case NI_System_Math_Exp:
case NI_System_Math_Floor:
case NI_System_Math_FMod:
case NI_System_Math_ILogB:
case NI_System_Math_Log:
case NI_System_Math_Log2:
case NI_System_Math_Log10:
#ifdef TARGET_ARM64
// ARM64 has fmax/fmin which are IEEE754:2019 minimum/maximum compatible
// TODO-XARCH-CQ: Enable this for XARCH when one of the arguments is a constant
// so we can then emit maxss/minss and avoid NaN/-0.0 handling
case NI_System_Math_Max:
case NI_System_Math_Min:
#endif
case NI_System_Math_Pow:
case NI_System_Math_Round:
case NI_System_Math_Sin:
case NI_System_Math_Sinh:
case NI_System_Math_Sqrt:
case NI_System_Math_Tan:
case NI_System_Math_Tanh:
case NI_System_Math_Truncate:
{
retNode = impMathIntrinsic(method, sig, callType, ni, tailCall);
break;
}
case NI_System_Array_Clone:
case NI_System_Collections_Generic_Comparer_get_Default:
case NI_System_Collections_Generic_EqualityComparer_get_Default:
case NI_System_Object_MemberwiseClone:
case NI_System_Threading_Thread_get_CurrentThread:
{
// Flag for later handling.
isSpecial = true;
break;
}
case NI_System_Object_GetType:
{
JITDUMP("\n impIntrinsic: call to Object.GetType\n");
GenTree* op1 = impStackTop(0).val;
// If we're calling GetType on a boxed value, just get the type directly.
if (op1->IsBoxedValue())
{
JITDUMP("Attempting to optimize box(...).getType() to direct type construction\n");
// Try and clean up the box. Obtain the handle we
// were going to pass to the newobj.
GenTree* boxTypeHandle = gtTryRemoveBoxUpstreamEffects(op1, BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE);
if (boxTypeHandle != nullptr)
{
// Note we don't need to play the TYP_STRUCT games here like
// do for LDTOKEN since the return value of this operator is Type,
// not RuntimeTypeHandle.
impPopStack();
GenTreeCall::Use* helperArgs = gtNewCallArgs(boxTypeHandle);
GenTree* runtimeType =
gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE, TYP_REF, helperArgs);
retNode = runtimeType;
}
}
// If we have a constrained callvirt with a "box this" transform
// we know we have a value class and hence an exact type.
//
// If so, instead of boxing and then extracting the type, just
// construct the type directly.
if ((retNode == nullptr) && (pConstrainedResolvedToken != nullptr) &&
(constraintCallThisTransform == CORINFO_BOX_THIS))
{
// Ensure this is one of the is simple box cases (in particular, rule out nullables).
const CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pConstrainedResolvedToken->hClass);
const bool isSafeToOptimize = (boxHelper == CORINFO_HELP_BOX);
if (isSafeToOptimize)
{
JITDUMP("Optimizing constrained box-this obj.getType() to direct type construction\n");
impPopStack();
GenTree* typeHandleOp =
impTokenToHandle(pConstrainedResolvedToken, nullptr, true /* mustRestoreHandle */);
if (typeHandleOp == nullptr)
{
assert(compDonotInline());
return nullptr;
}
GenTreeCall::Use* helperArgs = gtNewCallArgs(typeHandleOp);
GenTree* runtimeType =
gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE, TYP_REF, helperArgs);
retNode = runtimeType;
}
}
#ifdef DEBUG
if (retNode != nullptr)
{
JITDUMP("Optimized result for call to GetType is\n");
if (verbose)
{
gtDispTree(retNode);
}
}
#endif
// Else expand as an intrinsic, unless the call is constrained,
// in which case we defer expansion to allow impImportCall do the
// special constraint processing.
if ((retNode == nullptr) && (pConstrainedResolvedToken == nullptr))
{
JITDUMP("Expanding as special intrinsic\n");
impPopStack();
op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, ni, method);
// Set the CALL flag to indicate that the operator is implemented by a call.
// Set also the EXCEPTION flag because the native implementation of
// NI_System_Object_GetType intrinsic can throw NullReferenceException.
op1->gtFlags |= (GTF_CALL | GTF_EXCEPT);
retNode = op1;
// Might be further optimizable, so arrange to leave a mark behind
isSpecial = true;
}
if (retNode == nullptr)
{
JITDUMP("Leaving as normal call\n");
// Might be further optimizable, so arrange to leave a mark behind
isSpecial = true;
}
break;
}
case NI_System_Array_GetLength:
case NI_System_Array_GetLowerBound:
case NI_System_Array_GetUpperBound:
{
// System.Array.GetLength(Int32) method:
// public int GetLength(int dimension)
// System.Array.GetLowerBound(Int32) method:
// public int GetLowerBound(int dimension)
// System.Array.GetUpperBound(Int32) method:
// public int GetUpperBound(int dimension)
//
// Only implement these as intrinsics for multi-dimensional arrays.
// Only handle constant dimension arguments.
GenTree* gtDim = impStackTop().val;
GenTree* gtArr = impStackTop(1).val;
if (gtDim->IsIntegralConst())
{
bool isExact = false;
bool isNonNull = false;
CORINFO_CLASS_HANDLE arrCls = gtGetClassHandle(gtArr, &isExact, &isNonNull);
if (arrCls != NO_CLASS_HANDLE)
{
unsigned rank = info.compCompHnd->getArrayRank(arrCls);
if ((rank > 1) && !info.compCompHnd->isSDArray(arrCls))
{
// `rank` is guaranteed to be <=32 (see MAX_RANK in vm\array.h). Any constant argument
// is `int` sized.
INT64 dimValue = gtDim->AsIntConCommon()->IntegralValue();
assert((unsigned int)dimValue == dimValue);
unsigned dim = (unsigned int)dimValue;
if (dim < rank)
{
// This is now known to be a multi-dimension array with a constant dimension
// that is in range; we can expand it as an intrinsic.
impPopStack().val; // Pop the dim and array object; we already have a pointer to them.
impPopStack().val;
// Make sure there are no global effects in the array (such as it being a function
// call), so we can mark the generated indirection with GTF_IND_INVARIANT. In the
// GetUpperBound case we need the cloned object, since we refer to the array
// object twice. In the other cases, we don't need to clone.
GenTree* gtArrClone = nullptr;
if (((gtArr->gtFlags & GTF_GLOB_EFFECT) != 0) || (ni == NI_System_Array_GetUpperBound))
{
gtArr = impCloneExpr(gtArr, >ArrClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("MD intrinsics array"));
}
switch (ni)
{
case NI_System_Array_GetLength:
{
// Generate *(array + offset-to-length-array + sizeof(int) * dim)
unsigned offs = eeGetMDArrayLengthOffset(rank, dim);
GenTree* gtOffs = gtNewIconNode(offs, TYP_I_IMPL);
GenTree* gtAddr = gtNewOperNode(GT_ADD, TYP_BYREF, gtArr, gtOffs);
retNode = gtNewIndir(TYP_INT, gtAddr);
retNode->gtFlags |= GTF_IND_INVARIANT;
break;
}
case NI_System_Array_GetLowerBound:
{
// Generate *(array + offset-to-bounds-array + sizeof(int) * dim)
unsigned offs = eeGetMDArrayLowerBoundOffset(rank, dim);
GenTree* gtOffs = gtNewIconNode(offs, TYP_I_IMPL);
GenTree* gtAddr = gtNewOperNode(GT_ADD, TYP_BYREF, gtArr, gtOffs);
retNode = gtNewIndir(TYP_INT, gtAddr);
retNode->gtFlags |= GTF_IND_INVARIANT;
break;
}
case NI_System_Array_GetUpperBound:
{
assert(gtArrClone != nullptr);
// Generate:
// *(array + offset-to-length-array + sizeof(int) * dim) +
// *(array + offset-to-bounds-array + sizeof(int) * dim) - 1
unsigned offs = eeGetMDArrayLowerBoundOffset(rank, dim);
GenTree* gtOffs = gtNewIconNode(offs, TYP_I_IMPL);
GenTree* gtAddr = gtNewOperNode(GT_ADD, TYP_BYREF, gtArr, gtOffs);
GenTree* gtLowerBound = gtNewIndir(TYP_INT, gtAddr);
gtLowerBound->gtFlags |= GTF_IND_INVARIANT;
offs = eeGetMDArrayLengthOffset(rank, dim);
gtOffs = gtNewIconNode(offs, TYP_I_IMPL);
gtAddr = gtNewOperNode(GT_ADD, TYP_BYREF, gtArrClone, gtOffs);
GenTree* gtLength = gtNewIndir(TYP_INT, gtAddr);
gtLength->gtFlags |= GTF_IND_INVARIANT;
GenTree* gtSum = gtNewOperNode(GT_ADD, TYP_INT, gtLowerBound, gtLength);
GenTree* gtOne = gtNewIconNode(1, TYP_INT);
retNode = gtNewOperNode(GT_SUB, TYP_INT, gtSum, gtOne);
break;
}
default:
unreached();
}
}
}
}
}
break;
}
case NI_System_Buffers_Binary_BinaryPrimitives_ReverseEndianness:
{
assert(sig->numArgs == 1);
// We expect the return type of the ReverseEndianness routine to match the type of the
// one and only argument to the method. We use a special instruction for 16-bit
// BSWAPs since on x86 processors this is implemented as ROR <16-bit reg>, 8. Additionally,
// we only emit 64-bit BSWAP instructions on 64-bit archs; if we're asked to perform a
// 64-bit byte swap on a 32-bit arch, we'll fall to the default case in the switch block below.
switch (sig->retType)
{
case CorInfoType::CORINFO_TYPE_SHORT:
case CorInfoType::CORINFO_TYPE_USHORT:
retNode = gtNewCastNode(TYP_INT, gtNewOperNode(GT_BSWAP16, TYP_INT, impPopStack().val), false,
callType);
break;
case CorInfoType::CORINFO_TYPE_INT:
case CorInfoType::CORINFO_TYPE_UINT:
#ifdef TARGET_64BIT
case CorInfoType::CORINFO_TYPE_LONG:
case CorInfoType::CORINFO_TYPE_ULONG:
#endif // TARGET_64BIT
retNode = gtNewOperNode(GT_BSWAP, callType, impPopStack().val);
break;
default:
// This default case gets hit on 32-bit archs when a call to a 64-bit overload
// of ReverseEndianness is encountered. In that case we'll let JIT treat this as a standard
// method call, where the implementation decomposes the operation into two 32-bit
// bswap routines. If the input to the 64-bit function is a constant, then we rely
// on inlining + constant folding of 32-bit bswaps to effectively constant fold
// the 64-bit call site.
break;
}
break;
}
// Fold PopCount for constant input
case NI_System_Numerics_BitOperations_PopCount:
{
assert(sig->numArgs == 1);
if (impStackTop().val->IsIntegralConst())
{
typeInfo argType = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack();
INT64 cns = impPopStack().val->AsIntConCommon()->IntegralValue();
if (argType.IsType(TI_LONG))
{
retNode = gtNewIconNode(genCountBits(cns), callType);
}
else
{
assert(argType.IsType(TI_INT));
retNode = gtNewIconNode(genCountBits(static_cast<unsigned>(cns)), callType);
}
}
break;
}
case NI_System_GC_KeepAlive:
{
retNode = impKeepAliveIntrinsic(impPopStack().val);
break;
}
default:
break;
}
}
if (mustExpand && (retNode == nullptr))
{
assert(!"Unhandled must expand intrinsic, throwing PlatformNotSupportedException");
return impUnsupportedNamedIntrinsic(CORINFO_HELP_THROW_PLATFORM_NOT_SUPPORTED, method, sig, mustExpand);
}
// Optionally report if this intrinsic is special
// (that is, potentially re-optimizable during morph).
if (isSpecialIntrinsic != nullptr)
{
*isSpecialIntrinsic = isSpecial;
}
return retNode;
}
GenTree* Compiler::impTypeIsAssignable(GenTree* typeTo, GenTree* typeFrom)
{
// Optimize patterns like:
//
// typeof(TTo).IsAssignableFrom(typeof(TTFrom))
// valueTypeVar.GetType().IsAssignableFrom(typeof(TTFrom))
// typeof(TTFrom).IsAssignableTo(typeof(TTo))
// typeof(TTFrom).IsAssignableTo(valueTypeVar.GetType())
//
// to true/false
if (typeTo->IsCall() && typeFrom->IsCall())
{
// make sure both arguments are `typeof()`
CORINFO_METHOD_HANDLE hTypeof = eeFindHelper(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE);
if ((typeTo->AsCall()->gtCallMethHnd == hTypeof) && (typeFrom->AsCall()->gtCallMethHnd == hTypeof))
{
CORINFO_CLASS_HANDLE hClassTo = gtGetHelperArgClassHandle(typeTo->AsCall()->gtCallArgs->GetNode());
CORINFO_CLASS_HANDLE hClassFrom = gtGetHelperArgClassHandle(typeFrom->AsCall()->gtCallArgs->GetNode());
if (hClassTo == NO_CLASS_HANDLE || hClassFrom == NO_CLASS_HANDLE)
{
return nullptr;
}
TypeCompareState castResult = info.compCompHnd->compareTypesForCast(hClassFrom, hClassTo);
if (castResult == TypeCompareState::May)
{
// requires runtime check
// e.g. __Canon, COMObjects, Nullable
return nullptr;
}
GenTreeIntCon* retNode = gtNewIconNode((castResult == TypeCompareState::Must) ? 1 : 0);
impPopStack(); // drop both CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE calls
impPopStack();
return retNode;
}
}
return nullptr;
}
GenTree* Compiler::impMathIntrinsic(CORINFO_METHOD_HANDLE method,
CORINFO_SIG_INFO* sig,
var_types callType,
NamedIntrinsic intrinsicName,
bool tailCall)
{
GenTree* op1;
GenTree* op2;
assert(callType != TYP_STRUCT);
assert(IsMathIntrinsic(intrinsicName));
op1 = nullptr;
#if !defined(TARGET_X86)
// Intrinsics that are not implemented directly by target instructions will
// be re-materialized as users calls in rationalizer. For prefixed tail calls,
// don't do this optimization, because
// a) For back compatibility reasons on desktop .NET Framework 4.6 / 4.6.1
// b) It will be non-trivial task or too late to re-materialize a surviving
// tail prefixed GT_INTRINSIC as tail call in rationalizer.
if (!IsIntrinsicImplementedByUserCall(intrinsicName) || !tailCall)
#else
// On x86 RyuJIT, importing intrinsics that are implemented as user calls can cause incorrect calculation
// of the depth of the stack if these intrinsics are used as arguments to another call. This causes bad
// code generation for certain EH constructs.
if (!IsIntrinsicImplementedByUserCall(intrinsicName))
#endif
{
CORINFO_CLASS_HANDLE tmpClass;
CORINFO_ARG_LIST_HANDLE arg;
var_types op1Type;
var_types op2Type;
switch (sig->numArgs)
{
case 1:
op1 = impPopStack().val;
arg = sig->args;
op1Type = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg, &tmpClass)));
if (op1->TypeGet() != genActualType(op1Type))
{
assert(varTypeIsFloating(op1));
op1 = gtNewCastNode(callType, op1, false, callType);
}
op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicName, method);
break;
case 2:
op2 = impPopStack().val;
op1 = impPopStack().val;
arg = sig->args;
op1Type = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg, &tmpClass)));
if (op1->TypeGet() != genActualType(op1Type))
{
assert(varTypeIsFloating(op1));
op1 = gtNewCastNode(callType, op1, false, callType);
}
arg = info.compCompHnd->getArgNext(arg);
op2Type = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg, &tmpClass)));
if (op2->TypeGet() != genActualType(op2Type))
{
assert(varTypeIsFloating(op2));
op2 = gtNewCastNode(callType, op2, false, callType);
}
op1 =
new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, op2, intrinsicName, method);
break;
default:
NO_WAY("Unsupported number of args for Math Intrinsic");
}
if (IsIntrinsicImplementedByUserCall(intrinsicName))
{
op1->gtFlags |= GTF_CALL;
}
}
return op1;
}
//------------------------------------------------------------------------
// lookupNamedIntrinsic: map method to jit named intrinsic value
//
// Arguments:
// method -- method handle for method
//
// Return Value:
// Id for the named intrinsic, or Illegal if none.
//
// Notes:
// method should have CORINFO_FLG_INTRINSIC set in its attributes,
// otherwise it is not a named jit intrinsic.
//
NamedIntrinsic Compiler::lookupNamedIntrinsic(CORINFO_METHOD_HANDLE method)
{
const char* className = nullptr;
const char* namespaceName = nullptr;
const char* enclosingClassName = nullptr;
const char* methodName =
info.compCompHnd->getMethodNameFromMetadata(method, &className, &namespaceName, &enclosingClassName);
JITDUMP("Named Intrinsic ");
if (namespaceName != nullptr)
{
JITDUMP("%s.", namespaceName);
}
if (enclosingClassName != nullptr)
{
JITDUMP("%s.", enclosingClassName);
}
if (className != nullptr)
{
JITDUMP("%s.", className);
}
if (methodName != nullptr)
{
JITDUMP("%s", methodName);
}
if ((namespaceName == nullptr) || (className == nullptr) || (methodName == nullptr))
{
// Check if we are dealing with an MD array's known runtime method
CorInfoArrayIntrinsic arrayFuncIndex = info.compCompHnd->getArrayIntrinsicID(method);
switch (arrayFuncIndex)
{
case CorInfoArrayIntrinsic::GET:
JITDUMP("ARRAY_FUNC_GET: Recognized\n");
return NI_Array_Get;
case CorInfoArrayIntrinsic::SET:
JITDUMP("ARRAY_FUNC_SET: Recognized\n");
return NI_Array_Set;
case CorInfoArrayIntrinsic::ADDRESS:
JITDUMP("ARRAY_FUNC_ADDRESS: Recognized\n");
return NI_Array_Address;
default:
break;
}
JITDUMP(": Not recognized, not enough metadata\n");
return NI_Illegal;
}
JITDUMP(": ");
NamedIntrinsic result = NI_Illegal;
if (strcmp(namespaceName, "System") == 0)
{
if ((strcmp(className, "Enum") == 0) && (strcmp(methodName, "HasFlag") == 0))
{
result = NI_System_Enum_HasFlag;
}
else if (strcmp(className, "Activator") == 0)
{
if (strcmp(methodName, "AllocatorOf") == 0)
{
result = NI_System_Activator_AllocatorOf;
}
else if (strcmp(methodName, "DefaultConstructorOf") == 0)
{
result = NI_System_Activator_DefaultConstructorOf;
}
}
else if (strcmp(className, "ByReference`1") == 0)
{
if (strcmp(methodName, ".ctor") == 0)
{
result = NI_System_ByReference_ctor;
}
else if (strcmp(methodName, "get_Value") == 0)
{
result = NI_System_ByReference_get_Value;
}
}
else if (strcmp(className, "Math") == 0 || strcmp(className, "MathF") == 0)
{
if (strcmp(methodName, "Abs") == 0)
{
result = NI_System_Math_Abs;
}
else if (strcmp(methodName, "Acos") == 0)
{
result = NI_System_Math_Acos;
}
else if (strcmp(methodName, "Acosh") == 0)
{
result = NI_System_Math_Acosh;
}
else if (strcmp(methodName, "Asin") == 0)
{
result = NI_System_Math_Asin;
}
else if (strcmp(methodName, "Asinh") == 0)
{
result = NI_System_Math_Asinh;
}
else if (strcmp(methodName, "Atan") == 0)
{
result = NI_System_Math_Atan;
}
else if (strcmp(methodName, "Atanh") == 0)
{
result = NI_System_Math_Atanh;
}
else if (strcmp(methodName, "Atan2") == 0)
{
result = NI_System_Math_Atan2;
}
else if (strcmp(methodName, "Cbrt") == 0)
{
result = NI_System_Math_Cbrt;
}
else if (strcmp(methodName, "Ceiling") == 0)
{
result = NI_System_Math_Ceiling;
}
else if (strcmp(methodName, "Cos") == 0)
{
result = NI_System_Math_Cos;
}
else if (strcmp(methodName, "Cosh") == 0)
{
result = NI_System_Math_Cosh;
}
else if (strcmp(methodName, "Exp") == 0)
{
result = NI_System_Math_Exp;
}
else if (strcmp(methodName, "Floor") == 0)
{
result = NI_System_Math_Floor;
}
else if (strcmp(methodName, "FMod") == 0)
{
result = NI_System_Math_FMod;
}
else if (strcmp(methodName, "FusedMultiplyAdd") == 0)
{
result = NI_System_Math_FusedMultiplyAdd;
}
else if (strcmp(methodName, "ILogB") == 0)
{
result = NI_System_Math_ILogB;
}
else if (strcmp(methodName, "Log") == 0)
{
result = NI_System_Math_Log;
}
else if (strcmp(methodName, "Log2") == 0)
{
result = NI_System_Math_Log2;
}
else if (strcmp(methodName, "Log10") == 0)
{
result = NI_System_Math_Log10;
}
else if (strcmp(methodName, "Max") == 0)
{
result = NI_System_Math_Max;
}
else if (strcmp(methodName, "Min") == 0)
{
result = NI_System_Math_Min;
}
else if (strcmp(methodName, "Pow") == 0)
{
result = NI_System_Math_Pow;
}
else if (strcmp(methodName, "Round") == 0)
{
result = NI_System_Math_Round;
}
else if (strcmp(methodName, "Sin") == 0)
{
result = NI_System_Math_Sin;
}
else if (strcmp(methodName, "Sinh") == 0)
{
result = NI_System_Math_Sinh;
}
else if (strcmp(methodName, "Sqrt") == 0)
{
result = NI_System_Math_Sqrt;
}
else if (strcmp(methodName, "Tan") == 0)
{
result = NI_System_Math_Tan;
}
else if (strcmp(methodName, "Tanh") == 0)
{
result = NI_System_Math_Tanh;
}
else if (strcmp(methodName, "Truncate") == 0)
{
result = NI_System_Math_Truncate;
}
}
else if (strcmp(className, "GC") == 0)
{
if (strcmp(methodName, "KeepAlive") == 0)
{
result = NI_System_GC_KeepAlive;
}
}
else if (strcmp(className, "Array") == 0)
{
if (strcmp(methodName, "Clone") == 0)
{
result = NI_System_Array_Clone;
}
else if (strcmp(methodName, "GetLength") == 0)
{
result = NI_System_Array_GetLength;
}
else if (strcmp(methodName, "GetLowerBound") == 0)
{
result = NI_System_Array_GetLowerBound;
}
else if (strcmp(methodName, "GetUpperBound") == 0)
{
result = NI_System_Array_GetUpperBound;
}
}
else if (strcmp(className, "Object") == 0)
{
if (strcmp(methodName, "MemberwiseClone") == 0)
{
result = NI_System_Object_MemberwiseClone;
}
else if (strcmp(methodName, "GetType") == 0)
{
result = NI_System_Object_GetType;
}
else if (strcmp(methodName, "MethodTableOf") == 0)
{
result = NI_System_Object_MethodTableOf;
}
}
else if (strcmp(className, "RuntimeTypeHandle") == 0)
{
if (strcmp(methodName, "GetValueInternal") == 0)
{
result = NI_System_RuntimeTypeHandle_GetValueInternal;
}
}
else if (strcmp(className, "Type") == 0)
{
if (strcmp(methodName, "get_IsValueType") == 0)
{
result = NI_System_Type_get_IsValueType;
}
else if (strcmp(methodName, "IsAssignableFrom") == 0)
{
result = NI_System_Type_IsAssignableFrom;
}
else if (strcmp(methodName, "IsAssignableTo") == 0)
{
result = NI_System_Type_IsAssignableTo;
}
else if (strcmp(methodName, "op_Equality") == 0)
{
result = NI_System_Type_op_Equality;
}
else if (strcmp(methodName, "op_Inequality") == 0)
{
result = NI_System_Type_op_Inequality;
}
else if (strcmp(methodName, "GetTypeFromHandle") == 0)
{
result = NI_System_Type_GetTypeFromHandle;
}
}
else if (strcmp(className, "String") == 0)
{
if (strcmp(methodName, "Equals") == 0)
{
result = NI_System_String_Equals;
}
else if (strcmp(methodName, "get_Chars") == 0)
{
result = NI_System_String_get_Chars;
}
else if (strcmp(methodName, "get_Length") == 0)
{
result = NI_System_String_get_Length;
}
else if (strcmp(methodName, "op_Implicit") == 0)
{
result = NI_System_String_op_Implicit;
}
else if (strcmp(methodName, "StartsWith") == 0)
{
result = NI_System_String_StartsWith;
}
}
else if (strcmp(className, "MemoryExtensions") == 0)
{
if (strcmp(methodName, "AsSpan") == 0)
{
result = NI_System_MemoryExtensions_AsSpan;
}
if (strcmp(methodName, "SequenceEqual") == 0)
{
result = NI_System_MemoryExtensions_SequenceEqual;
}
else if (strcmp(methodName, "Equals") == 0)
{
result = NI_System_MemoryExtensions_Equals;
}
else if (strcmp(methodName, "StartsWith") == 0)
{
result = NI_System_MemoryExtensions_StartsWith;
}
}
else if (strcmp(className, "Span`1") == 0)
{
if (strcmp(methodName, "get_Item") == 0)
{
result = NI_System_Span_get_Item;
}
}
else if (strcmp(className, "ReadOnlySpan`1") == 0)
{
if (strcmp(methodName, "get_Item") == 0)
{
result = NI_System_ReadOnlySpan_get_Item;
}
}
else if (strcmp(className, "EETypePtr") == 0)
{
if (strcmp(methodName, "EETypePtrOf") == 0)
{
result = NI_System_EETypePtr_EETypePtrOf;
}
}
}
else if (strcmp(namespaceName, "System.Threading") == 0)
{
if (strcmp(className, "Thread") == 0)
{
if (strcmp(methodName, "get_CurrentThread") == 0)
{
result = NI_System_Threading_Thread_get_CurrentThread;
}
else if (strcmp(methodName, "get_ManagedThreadId") == 0)
{
result = NI_System_Threading_Thread_get_ManagedThreadId;
}
}
else if (strcmp(className, "Interlocked") == 0)
{
#ifndef TARGET_ARM64
// TODO-CQ: Implement for XArch (https://github.com/dotnet/runtime/issues/32239).
if (strcmp(methodName, "And") == 0)
{
result = NI_System_Threading_Interlocked_And;
}
else if (strcmp(methodName, "Or") == 0)
{
result = NI_System_Threading_Interlocked_Or;
}
#endif
if (strcmp(methodName, "CompareExchange") == 0)
{
result = NI_System_Threading_Interlocked_CompareExchange;
}
else if (strcmp(methodName, "Exchange") == 0)
{
result = NI_System_Threading_Interlocked_Exchange;
}
else if (strcmp(methodName, "ExchangeAdd") == 0)
{
result = NI_System_Threading_Interlocked_ExchangeAdd;
}
else if (strcmp(methodName, "MemoryBarrier") == 0)
{
result = NI_System_Threading_Interlocked_MemoryBarrier;
}
else if (strcmp(methodName, "ReadMemoryBarrier") == 0)
{
result = NI_System_Threading_Interlocked_ReadMemoryBarrier;
}
}
}
#if defined(TARGET_XARCH) || defined(TARGET_ARM64)
else if (strcmp(namespaceName, "System.Buffers.Binary") == 0)
{
if ((strcmp(className, "BinaryPrimitives") == 0) && (strcmp(methodName, "ReverseEndianness") == 0))
{
result = NI_System_Buffers_Binary_BinaryPrimitives_ReverseEndianness;
}
}
#endif // defined(TARGET_XARCH) || defined(TARGET_ARM64)
else if (strcmp(namespaceName, "System.Collections.Generic") == 0)
{
if ((strcmp(className, "EqualityComparer`1") == 0) && (strcmp(methodName, "get_Default") == 0))
{
result = NI_System_Collections_Generic_EqualityComparer_get_Default;
}
else if ((strcmp(className, "Comparer`1") == 0) && (strcmp(methodName, "get_Default") == 0))
{
result = NI_System_Collections_Generic_Comparer_get_Default;
}
}
else if ((strcmp(namespaceName, "System.Numerics") == 0) && (strcmp(className, "BitOperations") == 0))
{
if (strcmp(methodName, "PopCount") == 0)
{
result = NI_System_Numerics_BitOperations_PopCount;
}
}
#ifdef FEATURE_HW_INTRINSICS
else if (strcmp(namespaceName, "System.Numerics") == 0)
{
CORINFO_SIG_INFO sig;
info.compCompHnd->getMethodSig(method, &sig);
int sizeOfVectorT = getSIMDVectorRegisterByteLength();
result = SimdAsHWIntrinsicInfo::lookupId(&sig, className, methodName, enclosingClassName, sizeOfVectorT);
}
#endif // FEATURE_HW_INTRINSICS
else if ((strcmp(namespaceName, "System.Runtime.CompilerServices") == 0) &&
(strcmp(className, "RuntimeHelpers") == 0))
{
if (strcmp(methodName, "CreateSpan") == 0)
{
result = NI_System_Runtime_CompilerServices_RuntimeHelpers_CreateSpan;
}
else if (strcmp(methodName, "InitializeArray") == 0)
{
result = NI_System_Runtime_CompilerServices_RuntimeHelpers_InitializeArray;
}
else if (strcmp(methodName, "IsKnownConstant") == 0)
{
result = NI_System_Runtime_CompilerServices_RuntimeHelpers_IsKnownConstant;
}
}
else if (strncmp(namespaceName, "System.Runtime.Intrinsics", 25) == 0)
{
// We go down this path even when FEATURE_HW_INTRINSICS isn't enabled
// so we can specially handle IsSupported and recursive calls.
// This is required to appropriately handle the intrinsics on platforms
// which don't support them. On such a platform methods like Vector64.Create
// will be seen as `Intrinsic` and `mustExpand` due to having a code path
// which is recursive. When such a path is hit we expect it to be handled by
// the importer and we fire an assert if it wasn't and in previous versions
// of the JIT would fail fast. This was changed to throw a PNSE instead but
// we still assert as most intrinsics should have been recognized/handled.
// In order to avoid the assert, we specially handle the IsSupported checks
// (to better allow dead-code optimizations) and we explicitly throw a PNSE
// as we know that is the desired behavior for the HWIntrinsics when not
// supported. For cases like Vector64.Create, this is fine because it will
// be behind a relevant IsSupported check and will never be hit and the
// software fallback will be executed instead.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef FEATURE_HW_INTRINSICS
namespaceName += 25;
const char* platformNamespaceName;
#if defined(TARGET_XARCH)
platformNamespaceName = ".X86";
#elif defined(TARGET_ARM64)
platformNamespaceName = ".Arm";
#else
#error Unsupported platform
#endif
if ((namespaceName[0] == '\0') || (strcmp(namespaceName, platformNamespaceName) == 0))
{
CORINFO_SIG_INFO sig;
info.compCompHnd->getMethodSig(method, &sig);
result = HWIntrinsicInfo::lookupId(this, &sig, className, methodName, enclosingClassName);
}
#endif // FEATURE_HW_INTRINSICS
if (result == NI_Illegal)
{
if ((strcmp(methodName, "get_IsSupported") == 0) || (strcmp(methodName, "get_IsHardwareAccelerated") == 0))
{
// This allows the relevant code paths to be dropped as dead code even
// on platforms where FEATURE_HW_INTRINSICS is not supported.
result = NI_IsSupported_False;
}
else if (gtIsRecursiveCall(method))
{
// For the framework itself, any recursive intrinsics will either be
// only supported on a single platform or will be guarded by a relevant
// IsSupported check so the throw PNSE will be valid or dropped.
result = NI_Throw_PlatformNotSupportedException;
}
}
}
else if (strcmp(namespaceName, "System.StubHelpers") == 0)
{
if (strcmp(className, "StubHelpers") == 0)
{
if (strcmp(methodName, "GetStubContext") == 0)
{
result = NI_System_StubHelpers_GetStubContext;
}
else if (strcmp(methodName, "NextCallReturnAddress") == 0)
{
result = NI_System_StubHelpers_NextCallReturnAddress;
}
}
}
if (result == NI_Illegal)
{
JITDUMP("Not recognized\n");
}
else if (result == NI_IsSupported_False)
{
JITDUMP("Unsupported - return false");
}
else if (result == NI_Throw_PlatformNotSupportedException)
{
JITDUMP("Unsupported - throw PlatformNotSupportedException");
}
else
{
JITDUMP("Recognized\n");
}
return result;
}
//------------------------------------------------------------------------
// impUnsupportedNamedIntrinsic: Throws an exception for an unsupported named intrinsic
//
// Arguments:
// helper - JIT helper ID for the exception to be thrown
// method - method handle of the intrinsic function.
// sig - signature of the intrinsic call
// mustExpand - true if the intrinsic must return a GenTree*; otherwise, false
//
// Return Value:
// a gtNewMustThrowException if mustExpand is true; otherwise, nullptr
//
GenTree* Compiler::impUnsupportedNamedIntrinsic(unsigned helper,
CORINFO_METHOD_HANDLE method,
CORINFO_SIG_INFO* sig,
bool mustExpand)
{
// We've hit some error case and may need to return a node for the given error.
//
// When `mustExpand=false`, we are attempting to inline the intrinsic directly into another method. In this
// scenario, we need to return `nullptr` so that a GT_CALL to the intrinsic is emitted instead. This is to
// ensure that everything continues to behave correctly when optimizations are enabled (e.g. things like the
// inliner may expect the node we return to have a certain signature, and the `MustThrowException` node won't
// match that).
//
// When `mustExpand=true`, we are in a GT_CALL to the intrinsic and are attempting to JIT it. This will generally
// be in response to an indirect call (e.g. done via reflection) or in response to an earlier attempt returning
// `nullptr` (under `mustExpand=false`). In that scenario, we are safe to return the `MustThrowException` node.
if (mustExpand)
{
for (unsigned i = 0; i < sig->numArgs; i++)
{
impPopStack();
}
return gtNewMustThrowException(helper, JITtype2varType(sig->retType), sig->retTypeClass);
}
else
{
return nullptr;
}
}
/*****************************************************************************/
GenTree* Compiler::impArrayAccessIntrinsic(
CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, int memberRef, bool readonlyCall, NamedIntrinsic intrinsicName)
{
/* If we are generating SMALL_CODE, we don't want to use intrinsics for
the following, as it generates fatter code.
*/
if (compCodeOpt() == SMALL_CODE)
{
return nullptr;
}
/* These intrinsics generate fatter (but faster) code and are only
done if we don't need SMALL_CODE */
unsigned rank = (intrinsicName == NI_Array_Set) ? (sig->numArgs - 1) : sig->numArgs;
// The rank 1 case is special because it has to handle two array formats
// we will simply not do that case
if (rank > GT_ARR_MAX_RANK || rank <= 1)
{
return nullptr;
}
CORINFO_CLASS_HANDLE arrElemClsHnd = nullptr;
var_types elemType = JITtype2varType(info.compCompHnd->getChildType(clsHnd, &arrElemClsHnd));
// For the ref case, we will only be able to inline if the types match
// (verifier checks for this, we don't care for the nonverified case and the
// type is final (so we don't need to do the cast)
if ((intrinsicName != NI_Array_Get) && !readonlyCall && varTypeIsGC(elemType))
{
// Get the call site signature
CORINFO_SIG_INFO LocalSig;
eeGetCallSiteSig(memberRef, info.compScopeHnd, impTokenLookupContextHandle, &LocalSig);
assert(LocalSig.hasThis());
CORINFO_CLASS_HANDLE actualElemClsHnd;
if (intrinsicName == NI_Array_Set)
{
// Fetch the last argument, the one that indicates the type we are setting.
CORINFO_ARG_LIST_HANDLE argType = LocalSig.args;
for (unsigned r = 0; r < rank; r++)
{
argType = info.compCompHnd->getArgNext(argType);
}
typeInfo argInfo = verParseArgSigToTypeInfo(&LocalSig, argType);
actualElemClsHnd = argInfo.GetClassHandle();
}
else
{
assert(intrinsicName == NI_Array_Address);
// Fetch the return type
typeInfo retInfo = verMakeTypeInfo(LocalSig.retType, LocalSig.retTypeClass);
assert(retInfo.IsByRef());
actualElemClsHnd = retInfo.GetClassHandle();
}
// if it's not final, we can't do the optimization
if (!(info.compCompHnd->getClassAttribs(actualElemClsHnd) & CORINFO_FLG_FINAL))
{
return nullptr;
}
}
unsigned arrayElemSize;
if (elemType == TYP_STRUCT)
{
assert(arrElemClsHnd);
arrayElemSize = info.compCompHnd->getClassSize(arrElemClsHnd);
}
else
{
arrayElemSize = genTypeSize(elemType);
}
if ((unsigned char)arrayElemSize != arrayElemSize)
{
// arrayElemSize would be truncated as an unsigned char.
// This means the array element is too large. Don't do the optimization.
return nullptr;
}
GenTree* val = nullptr;
if (intrinsicName == NI_Array_Set)
{
// Assignment of a struct is more work, and there are more gets than sets.
if (elemType == TYP_STRUCT)
{
return nullptr;
}
val = impPopStack().val;
assert(genActualType(elemType) == genActualType(val->gtType) ||
(elemType == TYP_FLOAT && val->gtType == TYP_DOUBLE) ||
(elemType == TYP_INT && val->gtType == TYP_BYREF) ||
(elemType == TYP_DOUBLE && val->gtType == TYP_FLOAT));
}
noway_assert((unsigned char)GT_ARR_MAX_RANK == GT_ARR_MAX_RANK);
GenTree* inds[GT_ARR_MAX_RANK];
for (unsigned k = rank; k > 0; k--)
{
inds[k - 1] = impPopStack().val;
}
GenTree* arr = impPopStack().val;
assert(arr->gtType == TYP_REF);
GenTree* arrElem =
new (this, GT_ARR_ELEM) GenTreeArrElem(TYP_BYREF, arr, static_cast<unsigned char>(rank),
static_cast<unsigned char>(arrayElemSize), elemType, &inds[0]);
if (intrinsicName != NI_Array_Address)
{
if (varTypeIsStruct(elemType))
{
arrElem = gtNewObjNode(sig->retTypeClass, arrElem);
}
else
{
arrElem = gtNewOperNode(GT_IND, elemType, arrElem);
}
}
if (intrinsicName == NI_Array_Set)
{
assert(val != nullptr);
return gtNewAssignNode(arrElem, val);
}
else
{
return arrElem;
}
}
//------------------------------------------------------------------------
// impKeepAliveIntrinsic: Import the GC.KeepAlive intrinsic call
//
// Imports the intrinsic as a GT_KEEPALIVE node, and, as an optimization,
// if the object to keep alive is a GT_BOX, removes its side effects and
// uses the address of a local (copied from the box's source if needed)
// as the operand for GT_KEEPALIVE. For the BOX optimization, if the class
// of the box has no GC fields, a GT_NOP is returned.
//
// Arguments:
// objToKeepAlive - the intrinisic call's argument
//
// Return Value:
// The imported GT_KEEPALIVE or GT_NOP - see description.
//
GenTree* Compiler::impKeepAliveIntrinsic(GenTree* objToKeepAlive)
{
assert(objToKeepAlive->TypeIs(TYP_REF));
if (opts.OptimizationEnabled() && objToKeepAlive->IsBoxedValue())
{
CORINFO_CLASS_HANDLE boxedClass = lvaGetDesc(objToKeepAlive->AsBox()->BoxOp()->AsLclVar())->lvClassHnd;
ClassLayout* layout = typGetObjLayout(boxedClass);
if (!layout->HasGCPtr())
{
gtTryRemoveBoxUpstreamEffects(objToKeepAlive, BR_REMOVE_AND_NARROW);
JITDUMP("\nBOX class has no GC fields, KEEPALIVE is a NOP");
return gtNewNothingNode();
}
GenTree* boxSrc = gtTryRemoveBoxUpstreamEffects(objToKeepAlive, BR_REMOVE_BUT_NOT_NARROW);
if (boxSrc != nullptr)
{
unsigned boxTempNum;
if (boxSrc->OperIs(GT_LCL_VAR))
{
boxTempNum = boxSrc->AsLclVarCommon()->GetLclNum();
}
else
{
boxTempNum = lvaGrabTemp(true DEBUGARG("Temp for the box source"));
GenTree* boxTempAsg = gtNewTempAssign(boxTempNum, boxSrc);
Statement* boxAsgStmt = objToKeepAlive->AsBox()->gtCopyStmtWhenInlinedBoxValue;
boxAsgStmt->SetRootNode(boxTempAsg);
}
JITDUMP("\nImporting KEEPALIVE(BOX) as KEEPALIVE(ADDR(LCL_VAR V%02u))", boxTempNum);
GenTree* boxTemp = gtNewLclvNode(boxTempNum, boxSrc->TypeGet());
GenTree* boxTempAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, boxTemp);
return gtNewKeepAliveNode(boxTempAddr);
}
}
return gtNewKeepAliveNode(objToKeepAlive);
}
bool Compiler::verMergeEntryStates(BasicBlock* block, bool* changed)
{
unsigned i;
// do some basic checks first
if (block->bbStackDepthOnEntry() != verCurrentState.esStackDepth)
{
return false;
}
if (verCurrentState.esStackDepth > 0)
{
// merge stack types
StackEntry* parentStack = block->bbStackOnEntry();
StackEntry* childStack = verCurrentState.esStack;
for (i = 0; i < verCurrentState.esStackDepth; i++, parentStack++, childStack++)
{
if (tiMergeToCommonParent(&parentStack->seTypeInfo, &childStack->seTypeInfo, changed) == false)
{
return false;
}
}
}
// merge initialization status of this ptr
if (verTrackObjCtorInitState)
{
// If we're tracking the CtorInitState, then it must not be unknown in the current state.
assert(verCurrentState.thisInitialized != TIS_Bottom);
// If the successor block's thisInit state is unknown, copy it from the current state.
if (block->bbThisOnEntry() == TIS_Bottom)
{
*changed = true;
verSetThisInit(block, verCurrentState.thisInitialized);
}
else if (verCurrentState.thisInitialized != block->bbThisOnEntry())
{
if (block->bbThisOnEntry() != TIS_Top)
{
*changed = true;
verSetThisInit(block, TIS_Top);
if (block->bbFlags & BBF_FAILED_VERIFICATION)
{
// The block is bad. Control can flow through the block to any handler that catches the
// verification exception, but the importer ignores bad blocks and therefore won't model
// this flow in the normal way. To complete the merge into the bad block, the new state
// needs to be manually pushed to the handlers that may be reached after the verification
// exception occurs.
//
// Usually, the new state was already propagated to the relevant handlers while processing
// the predecessors of the bad block. The exception is when the bad block is at the start
// of a try region, meaning it is protected by additional handlers that do not protect its
// predecessors.
//
if (block->hasTryIndex() && ((block->bbFlags & BBF_TRY_BEG) != 0))
{
// Push TIS_Top to the handlers that protect the bad block. Note that this can cause
// recursive calls back into this code path (if successors of the current bad block are
// also bad blocks).
//
ThisInitState origTIS = verCurrentState.thisInitialized;
verCurrentState.thisInitialized = TIS_Top;
impVerifyEHBlock(block, true);
verCurrentState.thisInitialized = origTIS;
}
}
}
}
}
else
{
assert(verCurrentState.thisInitialized == TIS_Bottom && block->bbThisOnEntry() == TIS_Bottom);
}
return true;
}
/*****************************************************************************
* 'logMsg' is true if a log message needs to be logged. false if the caller has
* already logged it (presumably in a more detailed fashion than done here)
*/
void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg))
{
block->bbJumpKind = BBJ_THROW;
block->bbFlags |= BBF_FAILED_VERIFICATION;
block->bbFlags &= ~BBF_IMPORTED;
impCurStmtOffsSet(block->bbCodeOffs);
// Clear the statement list as it exists so far; we're only going to have a verification exception.
impStmtList = impLastStmt = nullptr;
#ifdef DEBUG
if (logMsg)
{
JITLOG((LL_ERROR, "Verification failure: while compiling %s near IL offset %x..%xh \n", info.compFullName,
block->bbCodeOffs, block->bbCodeOffsEnd));
if (verbose)
{
printf("\n\nVerification failure: %s near IL %xh \n", info.compFullName, block->bbCodeOffs);
}
}
if (JitConfig.DebugBreakOnVerificationFailure())
{
DebugBreak();
}
#endif
impBeginTreeList();
// if the stack is non-empty evaluate all the side-effects
if (verCurrentState.esStackDepth > 0)
{
impEvalSideEffects();
}
assert(verCurrentState.esStackDepth == 0);
GenTree* op1 =
gtNewHelperCallNode(CORINFO_HELP_VERIFICATION, TYP_VOID, gtNewCallArgs(gtNewIconNode(block->bbCodeOffs)));
// verCurrentState.esStackDepth = 0;
impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtDI);
// The inliner is not able to handle methods that require throw block, so
// make sure this methods never gets inlined.
info.compCompHnd->setMethodAttribs(info.compMethodHnd, CORINFO_FLG_BAD_INLINEE);
}
/*****************************************************************************
*
*/
void Compiler::verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg))
{
verResetCurrentState(block, &verCurrentState);
verConvertBBToThrowVerificationException(block DEBUGARG(logMsg));
#ifdef DEBUG
impNoteLastILoffs(); // Remember at which BC offset the tree was finished
#endif // DEBUG
}
/******************************************************************************/
typeInfo Compiler::verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd)
{
assert(ciType < CORINFO_TYPE_COUNT);
typeInfo tiResult;
switch (ciType)
{
case CORINFO_TYPE_STRING:
case CORINFO_TYPE_CLASS:
tiResult = verMakeTypeInfo(clsHnd);
if (!tiResult.IsType(TI_REF))
{ // type must be consistent with element type
return typeInfo();
}
break;
#ifdef TARGET_64BIT
case CORINFO_TYPE_NATIVEINT:
case CORINFO_TYPE_NATIVEUINT:
if (clsHnd)
{
// If we have more precise information, use it
return verMakeTypeInfo(clsHnd);
}
else
{
return typeInfo::nativeInt();
}
break;
#endif // TARGET_64BIT
case CORINFO_TYPE_VALUECLASS:
case CORINFO_TYPE_REFANY:
tiResult = verMakeTypeInfo(clsHnd);
// type must be constant with element type;
if (!tiResult.IsValueClass())
{
return typeInfo();
}
break;
case CORINFO_TYPE_VAR:
return verMakeTypeInfo(clsHnd);
case CORINFO_TYPE_PTR: // for now, pointers are treated as an error
case CORINFO_TYPE_VOID:
return typeInfo();
break;
case CORINFO_TYPE_BYREF:
{
CORINFO_CLASS_HANDLE childClassHandle;
CorInfoType childType = info.compCompHnd->getChildType(clsHnd, &childClassHandle);
return ByRef(verMakeTypeInfo(childType, childClassHandle));
}
break;
default:
if (clsHnd)
{ // If we have more precise information, use it
return typeInfo(TI_STRUCT, clsHnd);
}
else
{
return typeInfo(JITtype2tiType(ciType));
}
}
return tiResult;
}
/******************************************************************************/
typeInfo Compiler::verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructToRef /* = false */)
{
if (clsHnd == nullptr)
{
return typeInfo();
}
// Byrefs should only occur in method and local signatures, which are accessed
// using ICorClassInfo and ICorClassInfo.getChildType.
// So findClass() and getClassAttribs() should not be called for byrefs
if (JITtype2varType(info.compCompHnd->asCorInfoType(clsHnd)) == TYP_BYREF)
{
assert(!"Did findClass() return a Byref?");
return typeInfo();
}
unsigned attribs = info.compCompHnd->getClassAttribs(clsHnd);
if (attribs & CORINFO_FLG_VALUECLASS)
{
CorInfoType t = info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd);
// Meta-data validation should ensure that CORINF_TYPE_BYREF should
// not occur here, so we may want to change this to an assert instead.
if (t == CORINFO_TYPE_VOID || t == CORINFO_TYPE_BYREF || t == CORINFO_TYPE_PTR)
{
return typeInfo();
}
#ifdef TARGET_64BIT
if (t == CORINFO_TYPE_NATIVEINT || t == CORINFO_TYPE_NATIVEUINT)
{
return typeInfo::nativeInt();
}
#endif // TARGET_64BIT
if (t != CORINFO_TYPE_UNDEF)
{
return (typeInfo(JITtype2tiType(t)));
}
else if (bashStructToRef)
{
return (typeInfo(TI_REF, clsHnd));
}
else
{
return (typeInfo(TI_STRUCT, clsHnd));
}
}
else if (attribs & CORINFO_FLG_GENERIC_TYPE_VARIABLE)
{
// See comment in _typeInfo.h for why we do it this way.
return (typeInfo(TI_REF, clsHnd, true));
}
else
{
return (typeInfo(TI_REF, clsHnd));
}
}
/******************************************************************************/
bool Compiler::verIsSDArray(const typeInfo& ti)
{
if (ti.IsNullObjRef())
{ // nulls are SD arrays
return true;
}
if (!ti.IsType(TI_REF))
{
return false;
}
if (!info.compCompHnd->isSDArray(ti.GetClassHandleForObjRef()))
{
return false;
}
return true;
}
/******************************************************************************/
/* Given 'arrayObjectType' which is an array type, fetch the element type. */
/* Returns an error type if anything goes wrong */
typeInfo Compiler::verGetArrayElemType(const typeInfo& arrayObjectType)
{
assert(!arrayObjectType.IsNullObjRef()); // you need to check for null explicitly since that is a success case
if (!verIsSDArray(arrayObjectType))
{
return typeInfo();
}
CORINFO_CLASS_HANDLE childClassHandle = nullptr;
CorInfoType ciType = info.compCompHnd->getChildType(arrayObjectType.GetClassHandleForObjRef(), &childClassHandle);
return verMakeTypeInfo(ciType, childClassHandle);
}
/*****************************************************************************
*/
typeInfo Compiler::verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args)
{
CORINFO_CLASS_HANDLE classHandle;
CorInfoType ciType = strip(info.compCompHnd->getArgType(sig, args, &classHandle));
var_types type = JITtype2varType(ciType);
if (varTypeIsGC(type))
{
// For efficiency, getArgType only returns something in classHandle for
// value types. For other types that have addition type info, you
// have to call back explicitly
classHandle = info.compCompHnd->getArgClass(sig, args);
if (!classHandle)
{
NO_WAY("Could not figure out Class specified in argument or local signature");
}
}
return verMakeTypeInfo(ciType, classHandle);
}
bool Compiler::verIsByRefLike(const typeInfo& ti)
{
if (ti.IsByRef())
{
return true;
}
if (!ti.IsType(TI_STRUCT))
{
return false;
}
return info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_BYREF_LIKE;
}
bool Compiler::verIsSafeToReturnByRef(const typeInfo& ti)
{
if (ti.IsPermanentHomeByRef())
{
return true;
}
else
{
return false;
}
}
bool Compiler::verIsBoxable(const typeInfo& ti)
{
return (ti.IsPrimitiveType() || ti.IsObjRef() // includes boxed generic type variables
|| ti.IsUnboxedGenericTypeVar() ||
(ti.IsType(TI_STRUCT) &&
// exclude byreflike structs
!(info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_BYREF_LIKE)));
}
// Is it a boxed value type?
bool Compiler::verIsBoxedValueType(const typeInfo& ti)
{
if (ti.GetType() == TI_REF)
{
CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandleForObjRef();
return !!eeIsValueClass(clsHnd);
}
else
{
return false;
}
}
/*****************************************************************************
*
* Check if a TailCall is legal.
*/
bool Compiler::verCheckTailCallConstraint(
OPCODE opcode,
CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a type parameter?
bool speculative // If true, won't throw if verificatoin fails. Instead it will
// return false to the caller.
// If false, it will throw.
)
{
DWORD mflags;
CORINFO_SIG_INFO sig;
unsigned int popCount = 0; // we can't pop the stack since impImportCall needs it, so
// this counter is used to keep track of how many items have been
// virtually popped
CORINFO_METHOD_HANDLE methodHnd = nullptr;
CORINFO_CLASS_HANDLE methodClassHnd = nullptr;
unsigned methodClassFlgs = 0;
assert(impOpcodeIsCallOpcode(opcode));
if (compIsForInlining())
{
return false;
}
// for calli, VerifyOrReturn that this is not a virtual method
if (opcode == CEE_CALLI)
{
/* Get the call sig */
eeGetSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &sig);
// We don't know the target method, so we have to infer the flags, or
// assume the worst-case.
mflags = (sig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
}
else
{
methodHnd = pResolvedToken->hMethod;
mflags = info.compCompHnd->getMethodAttribs(methodHnd);
// When verifying generic code we pair the method handle with its
// owning class to get the exact method signature.
methodClassHnd = pResolvedToken->hClass;
assert(methodClassHnd);
eeGetMethodSig(methodHnd, &sig, methodClassHnd);
// opcode specific check
methodClassFlgs = info.compCompHnd->getClassAttribs(methodClassHnd);
}
// We must have got the methodClassHnd if opcode is not CEE_CALLI
assert((methodHnd != nullptr && methodClassHnd != nullptr) || opcode == CEE_CALLI);
if ((sig.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
{
eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &sig);
}
// check compatibility of the arguments
unsigned int argCount;
argCount = sig.numArgs;
CORINFO_ARG_LIST_HANDLE args;
args = sig.args;
while (argCount--)
{
typeInfo tiDeclared = verParseArgSigToTypeInfo(&sig, args).NormaliseForStack();
// check that the argument is not a byref for tailcalls
VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclared), "tailcall on byrefs", speculative);
// For unsafe code, we might have parameters containing pointer to the stack location.
// Disallow the tailcall for this kind.
CORINFO_CLASS_HANDLE classHandle;
CorInfoType ciType = strip(info.compCompHnd->getArgType(&sig, args, &classHandle));
VerifyOrReturnSpeculative(ciType != CORINFO_TYPE_PTR, "tailcall on CORINFO_TYPE_PTR", speculative);
args = info.compCompHnd->getArgNext(args);
}
// update popCount
popCount += sig.numArgs;
// check for 'this' which is on non-static methods, not called via NEWOBJ
if (!(mflags & CORINFO_FLG_STATIC))
{
// Always update the popCount.
// This is crucial for the stack calculation to be correct.
typeInfo tiThis = impStackTop(popCount).seTypeInfo;
popCount++;
if (opcode == CEE_CALLI)
{
// For CALLI, we don't know the methodClassHnd. Therefore, let's check the "this" object
// on the stack.
if (tiThis.IsValueClass())
{
tiThis.MakeByRef();
}
VerifyOrReturnSpeculative(!verIsByRefLike(tiThis), "byref in tailcall", speculative);
}
else
{
// Check type compatibility of the this argument
typeInfo tiDeclaredThis = verMakeTypeInfo(methodClassHnd);
if (tiDeclaredThis.IsValueClass())
{
tiDeclaredThis.MakeByRef();
}
VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclaredThis), "byref in tailcall", speculative);
}
}
// Tail calls on constrained calls should be illegal too:
// when instantiated at a value type, a constrained call may pass the address of a stack allocated value
VerifyOrReturnSpeculative(!pConstrainedResolvedToken, "byref in constrained tailcall", speculative);
// Get the exact view of the signature for an array method
if (sig.retType != CORINFO_TYPE_VOID)
{
if (methodClassFlgs & CORINFO_FLG_ARRAY)
{
assert(opcode != CEE_CALLI);
eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &sig);
}
}
typeInfo tiCalleeRetType = verMakeTypeInfo(sig.retType, sig.retTypeClass);
typeInfo tiCallerRetType =
verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
// void return type gets morphed into the error type, so we have to treat them specially here
if (sig.retType == CORINFO_TYPE_VOID)
{
VerifyOrReturnSpeculative(info.compMethodInfo->args.retType == CORINFO_TYPE_VOID, "tailcall return mismatch",
speculative);
}
else
{
VerifyOrReturnSpeculative(tiCompatibleWith(NormaliseForStack(tiCalleeRetType),
NormaliseForStack(tiCallerRetType), true),
"tailcall return mismatch", speculative);
}
// for tailcall, stack must be empty
VerifyOrReturnSpeculative(verCurrentState.esStackDepth == popCount, "stack non-empty on tailcall", speculative);
return true; // Yes, tailcall is legal
}
/*****************************************************************************
*
* Checks the IL verification rules for the call
*/
void Compiler::verVerifyCall(OPCODE opcode,
CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
bool tailCall,
bool readonlyCall,
const BYTE* delegateCreateStart,
const BYTE* codeAddr,
CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName))
{
DWORD mflags;
CORINFO_SIG_INFO* sig = nullptr;
unsigned int popCount = 0; // we can't pop the stack since impImportCall needs it, so
// this counter is used to keep track of how many items have been
// virtually popped
// for calli, VerifyOrReturn that this is not a virtual method
if (opcode == CEE_CALLI)
{
Verify(false, "Calli not verifiable");
return;
}
//<NICE> It would be nice to cache the rest of it, but eeFindMethod is the big ticket item.
mflags = callInfo->verMethodFlags;
sig = &callInfo->verSig;
if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
{
eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
}
// opcode specific check
unsigned methodClassFlgs = callInfo->classFlags;
switch (opcode)
{
case CEE_CALLVIRT:
// cannot do callvirt on valuetypes
VerifyOrReturn(!(methodClassFlgs & CORINFO_FLG_VALUECLASS), "callVirt on value class");
VerifyOrReturn(sig->hasThis(), "CallVirt on static method");
break;
case CEE_NEWOBJ:
{
assert(!tailCall); // Importer should not allow this
VerifyOrReturn((mflags & CORINFO_FLG_CONSTRUCTOR) && !(mflags & CORINFO_FLG_STATIC),
"newobj must be on instance");
if (methodClassFlgs & CORINFO_FLG_DELEGATE)
{
VerifyOrReturn(sig->numArgs == 2, "wrong number args to delegate ctor");
typeInfo tiDeclaredObj = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack();
typeInfo tiDeclaredFtn =
verParseArgSigToTypeInfo(sig, info.compCompHnd->getArgNext(sig->args)).NormaliseForStack();
VerifyOrReturn(tiDeclaredFtn.IsNativeIntType(), "ftn arg needs to be a native int type");
assert(popCount == 0);
typeInfo tiActualObj = impStackTop(1).seTypeInfo;
typeInfo tiActualFtn = impStackTop(0).seTypeInfo;
VerifyOrReturn(tiActualFtn.IsMethod(), "delegate needs method as first arg");
VerifyOrReturn(tiCompatibleWith(tiActualObj, tiDeclaredObj, true), "delegate object type mismatch");
VerifyOrReturn(tiActualObj.IsNullObjRef() || tiActualObj.IsType(TI_REF),
"delegate object type mismatch");
CORINFO_CLASS_HANDLE objTypeHandle =
tiActualObj.IsNullObjRef() ? nullptr : tiActualObj.GetClassHandleForObjRef();
// the method signature must be compatible with the delegate's invoke method
// check that for virtual functions, the type of the object used to get the
// ftn ptr is the same as the type of the object passed to the delegate ctor.
// since this is a bit of work to determine in general, we pattern match stylized
// code sequences
// the delegate creation code check, which used to be done later, is now done here
// so we can read delegateMethodRef directly from
// from the preceding LDFTN or CEE_LDVIRTFN instruction sequence;
// we then use it in our call to isCompatibleDelegate().
mdMemberRef delegateMethodRef = mdMemberRefNil;
VerifyOrReturn(verCheckDelegateCreation(delegateCreateStart, codeAddr, delegateMethodRef),
"must create delegates with certain IL");
CORINFO_RESOLVED_TOKEN delegateResolvedToken;
delegateResolvedToken.tokenContext = impTokenLookupContextHandle;
delegateResolvedToken.tokenScope = info.compScopeHnd;
delegateResolvedToken.token = delegateMethodRef;
delegateResolvedToken.tokenType = CORINFO_TOKENKIND_Method;
info.compCompHnd->resolveToken(&delegateResolvedToken);
CORINFO_CALL_INFO delegateCallInfo;
eeGetCallInfo(&delegateResolvedToken, nullptr /* constraint typeRef */, CORINFO_CALLINFO_SECURITYCHECKS,
&delegateCallInfo);
bool isOpenDelegate = false;
VerifyOrReturn(info.compCompHnd->isCompatibleDelegate(objTypeHandle, delegateResolvedToken.hClass,
tiActualFtn.GetMethod(), pResolvedToken->hClass,
&isOpenDelegate),
"function incompatible with delegate");
// check the constraints on the target method
VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(delegateResolvedToken.hClass),
"delegate target has unsatisfied class constraints");
VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(delegateResolvedToken.hClass,
tiActualFtn.GetMethod()),
"delegate target has unsatisfied method constraints");
// See ECMA spec section 1.8.1.5.2 (Delegating via instance dispatch)
// for additional verification rules for delegates
CORINFO_METHOD_HANDLE actualMethodHandle = tiActualFtn.GetMethod();
DWORD actualMethodAttribs = info.compCompHnd->getMethodAttribs(actualMethodHandle);
if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
{
if ((actualMethodAttribs & CORINFO_FLG_VIRTUAL) && ((actualMethodAttribs & CORINFO_FLG_FINAL) == 0))
{
VerifyOrReturn((tiActualObj.IsThisPtr() && lvaIsOriginalThisReadOnly()) ||
verIsBoxedValueType(tiActualObj),
"The 'this' parameter to the call must be either the calling method's "
"'this' parameter or "
"a boxed value type.");
}
}
if (actualMethodAttribs & CORINFO_FLG_PROTECTED)
{
bool targetIsStatic = actualMethodAttribs & CORINFO_FLG_STATIC;
Verify(targetIsStatic || !isOpenDelegate,
"Unverifiable creation of an open instance delegate for a protected member.");
CORINFO_CLASS_HANDLE instanceClassHnd = (tiActualObj.IsNullObjRef() || targetIsStatic)
? info.compClassHnd
: tiActualObj.GetClassHandleForObjRef();
// In the case of protected methods, it is a requirement that the 'this'
// pointer be a subclass of the current context. Perform this check.
Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
"Accessing protected method through wrong type.");
}
goto DONE_ARGS;
}
}
// fall thru to default checks
FALLTHROUGH;
default:
VerifyOrReturn(!(mflags & CORINFO_FLG_ABSTRACT), "method abstract");
}
VerifyOrReturn(!((mflags & CORINFO_FLG_CONSTRUCTOR) && (methodClassFlgs & CORINFO_FLG_DELEGATE)),
"can only newobj a delegate constructor");
// check compatibility of the arguments
unsigned int argCount;
argCount = sig->numArgs;
CORINFO_ARG_LIST_HANDLE args;
args = sig->args;
while (argCount--)
{
typeInfo tiActual = impStackTop(popCount + argCount).seTypeInfo;
typeInfo tiDeclared = verParseArgSigToTypeInfo(sig, args).NormaliseForStack();
VerifyOrReturn(tiCompatibleWith(tiActual, tiDeclared, true), "type mismatch");
args = info.compCompHnd->getArgNext(args);
}
DONE_ARGS:
// update popCount
popCount += sig->numArgs;
// check for 'this' which are is non-static methods, not called via NEWOBJ
CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
if (!(mflags & CORINFO_FLG_STATIC) && (opcode != CEE_NEWOBJ))
{
typeInfo tiThis = impStackTop(popCount).seTypeInfo;
popCount++;
// If it is null, we assume we can access it (since it will AV shortly)
// If it is anything but a reference class, there is no hierarchy, so
// again, we don't need the precise instance class to compute 'protected' access
if (tiThis.IsType(TI_REF))
{
instanceClassHnd = tiThis.GetClassHandleForObjRef();
}
// Check type compatibility of the this argument
typeInfo tiDeclaredThis = verMakeTypeInfo(pResolvedToken->hClass);
if (tiDeclaredThis.IsValueClass())
{
tiDeclaredThis.MakeByRef();
}
// If this is a call to the base class .ctor, set thisPtr Init for
// this block.
if (mflags & CORINFO_FLG_CONSTRUCTOR)
{
if (verTrackObjCtorInitState && tiThis.IsThisPtr() &&
verIsCallToInitThisPtr(info.compClassHnd, pResolvedToken->hClass))
{
assert(verCurrentState.thisInitialized !=
TIS_Bottom); // This should never be the case just from the logic of the verifier.
VerifyOrReturn(verCurrentState.thisInitialized == TIS_Uninit,
"Call to base class constructor when 'this' is possibly initialized");
// Otherwise, 'this' is now initialized.
verCurrentState.thisInitialized = TIS_Init;
tiThis.SetInitialisedObjRef();
}
else
{
// We allow direct calls to value type constructors
// NB: we have to check that the contents of tiThis is a value type, otherwise we could use a
// constrained callvirt to illegally re-enter a .ctor on a value of reference type.
VerifyOrReturn(tiThis.IsByRef() && DereferenceByRef(tiThis).IsValueClass(),
"Bad call to a constructor");
}
}
if (pConstrainedResolvedToken != nullptr)
{
VerifyOrReturn(tiThis.IsByRef(), "non-byref this type in constrained call");
typeInfo tiConstraint = verMakeTypeInfo(pConstrainedResolvedToken->hClass);
// We just dereference this and test for equality
tiThis.DereferenceByRef();
VerifyOrReturn(typeInfo::AreEquivalent(tiThis, tiConstraint),
"this type mismatch with constrained type operand");
// Now pretend the this type is the boxed constrained type, for the sake of subsequent checks
tiThis = typeInfo(TI_REF, pConstrainedResolvedToken->hClass);
}
// To support direct calls on readonly byrefs, just pretend tiDeclaredThis is readonly too
if (tiDeclaredThis.IsByRef() && tiThis.IsReadonlyByRef())
{
tiDeclaredThis.SetIsReadonlyByRef();
}
VerifyOrReturn(tiCompatibleWith(tiThis, tiDeclaredThis, true), "this type mismatch");
if (tiThis.IsByRef())
{
// Find the actual type where the method exists (as opposed to what is declared
// in the metadata). This is to prevent passing a byref as the "this" argument
// while calling methods like System.ValueType.GetHashCode() which expect boxed objects.
CORINFO_CLASS_HANDLE actualClassHnd = info.compCompHnd->getMethodClass(pResolvedToken->hMethod);
VerifyOrReturn(eeIsValueClass(actualClassHnd),
"Call to base type of valuetype (which is never a valuetype)");
}
// Rules for non-virtual call to a non-final virtual method:
// Define:
// The "this" pointer is considered to be "possibly written" if
// 1. Its address have been taken (LDARGA 0) anywhere in the method.
// (or)
// 2. It has been stored to (STARG.0) anywhere in the method.
// A non-virtual call to a non-final virtual method is only allowed if
// 1. The this pointer passed to the callee is an instance of a boxed value type.
// (or)
// 2. The this pointer passed to the callee is the current method's this pointer.
// (and) The current method's this pointer is not "possibly written".
// Thus the rule is that if you assign to this ANYWHERE you can't make "base" calls to
// virtual methods. (Luckily this does affect .ctors, since they are not virtual).
// This is stronger that is strictly needed, but implementing a laxer rule is significantly
// hard and more error prone.
if (opcode == CEE_CALL && (mflags & CORINFO_FLG_VIRTUAL) && ((mflags & CORINFO_FLG_FINAL) == 0))
{
VerifyOrReturn((tiThis.IsThisPtr() && lvaIsOriginalThisReadOnly()) || verIsBoxedValueType(tiThis),
"The 'this' parameter to the call must be either the calling method's 'this' parameter or "
"a boxed value type.");
}
}
// check any constraints on the callee's class and type parameters
VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(pResolvedToken->hClass),
"method has unsatisfied class constraints");
VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(pResolvedToken->hClass, pResolvedToken->hMethod),
"method has unsatisfied method constraints");
if (mflags & CORINFO_FLG_PROTECTED)
{
VerifyOrReturn(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
"Can't access protected method");
}
// Get the exact view of the signature for an array method
if (sig->retType != CORINFO_TYPE_VOID)
{
eeGetMethodSig(pResolvedToken->hMethod, sig, pResolvedToken->hClass);
}
// "readonly." prefixed calls only allowed for the Address operation on arrays.
// The methods supported by array types are under the control of the EE
// so we can trust that only the Address operation returns a byref.
if (readonlyCall)
{
typeInfo tiCalleeRetType = verMakeTypeInfo(sig->retType, sig->retTypeClass);
VerifyOrReturn((methodClassFlgs & CORINFO_FLG_ARRAY) && tiCalleeRetType.IsByRef(),
"unexpected use of readonly prefix");
}
// Verify the tailcall
if (tailCall)
{
verCheckTailCallConstraint(opcode, pResolvedToken, pConstrainedResolvedToken, false);
}
}
/*****************************************************************************
* Checks that a delegate creation is done using the following pattern:
* dup
* ldvirtftn targetMemberRef
* OR
* ldftn targetMemberRef
*
* 'delegateCreateStart' points at the last dup or ldftn in this basic block (null if
* not in this basic block)
*
* targetMemberRef is read from the code sequence.
* targetMemberRef is validated iff verificationNeeded.
*/
bool Compiler::verCheckDelegateCreation(const BYTE* delegateCreateStart,
const BYTE* codeAddr,
mdMemberRef& targetMemberRef)
{
if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
{
targetMemberRef = getU4LittleEndian(&delegateCreateStart[2]);
return true;
}
else if (impIsDUP_LDVIRTFTN_TOKEN(delegateCreateStart, codeAddr))
{
targetMemberRef = getU4LittleEndian(&delegateCreateStart[3]);
return true;
}
return false;
}
typeInfo Compiler::verVerifySTIND(const typeInfo& tiTo, const typeInfo& value, const typeInfo& instrType)
{
Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
typeInfo ptrVal = verVerifyLDIND(tiTo, instrType);
typeInfo normPtrVal = typeInfo(ptrVal).NormaliseForStack();
if (!tiCompatibleWith(value, normPtrVal, true))
{
Verify(tiCompatibleWith(value, normPtrVal, true), "type mismatch");
}
return ptrVal;
}
typeInfo Compiler::verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType)
{
assert(!instrType.IsStruct());
typeInfo ptrVal;
if (ptr.IsByRef())
{
ptrVal = DereferenceByRef(ptr);
if (instrType.IsObjRef() && !ptrVal.IsObjRef())
{
Verify(false, "bad pointer");
}
else if (!instrType.IsObjRef() && !typeInfo::AreEquivalent(instrType, ptrVal))
{
Verify(false, "pointer not consistent with instr");
}
}
else
{
Verify(false, "pointer not byref");
}
return ptrVal;
}
// Verify that the field is used properly. 'tiThis' is NULL for statics,
// 'fieldFlags' is the fields attributes, and mutator is true if it is a
// ld*flda or a st*fld.
// 'enclosingClass' is given if we are accessing a field in some specific type.
void Compiler::verVerifyField(CORINFO_RESOLVED_TOKEN* pResolvedToken,
const CORINFO_FIELD_INFO& fieldInfo,
const typeInfo* tiThis,
bool mutator,
bool allowPlainStructAsThis)
{
CORINFO_CLASS_HANDLE enclosingClass = pResolvedToken->hClass;
unsigned fieldFlags = fieldInfo.fieldFlags;
CORINFO_CLASS_HANDLE instanceClass =
info.compClassHnd; // for statics, we imagine the instance is the current class.
bool isStaticField = ((fieldFlags & CORINFO_FLG_FIELD_STATIC) != 0);
if (mutator)
{
Verify(!(fieldFlags & CORINFO_FLG_FIELD_UNMANAGED), "mutating an RVA bases static");
if ((fieldFlags & CORINFO_FLG_FIELD_FINAL))
{
Verify((info.compFlags & CORINFO_FLG_CONSTRUCTOR) && enclosingClass == info.compClassHnd &&
info.compIsStatic == isStaticField,
"bad use of initonly field (set or address taken)");
}
}
if (tiThis == nullptr)
{
Verify(isStaticField, "used static opcode with non-static field");
}
else
{
typeInfo tThis = *tiThis;
if (allowPlainStructAsThis && tThis.IsValueClass())
{
tThis.MakeByRef();
}
// If it is null, we assume we can access it (since it will AV shortly)
// If it is anything but a refernce class, there is no hierarchy, so
// again, we don't need the precise instance class to compute 'protected' access
if (tiThis->IsType(TI_REF))
{
instanceClass = tiThis->GetClassHandleForObjRef();
}
// Note that even if the field is static, we require that the this pointer
// satisfy the same constraints as a non-static field This happens to
// be simpler and seems reasonable
typeInfo tiDeclaredThis = verMakeTypeInfo(enclosingClass);
if (tiDeclaredThis.IsValueClass())
{
tiDeclaredThis.MakeByRef();
// we allow read-only tThis, on any field access (even stores!), because if the
// class implementor wants to prohibit stores he should make the field private.
// we do this by setting the read-only bit on the type we compare tThis to.
tiDeclaredThis.SetIsReadonlyByRef();
}
else if (verTrackObjCtorInitState && tThis.IsThisPtr())
{
// Any field access is legal on "uninitialized" this pointers.
// The easiest way to implement this is to simply set the
// initialized bit for the duration of the type check on the
// field access only. It does not change the state of the "this"
// for the function as a whole. Note that the "tThis" is a copy
// of the original "this" type (*tiThis) passed in.
tThis.SetInitialisedObjRef();
}
Verify(tiCompatibleWith(tThis, tiDeclaredThis, true), "this type mismatch");
}
// Presently the JIT does not check that we don't store or take the address of init-only fields
// since we cannot guarantee their immutability and it is not a security issue.
// check any constraints on the fields's class --- accessing the field might cause a class constructor to run.
VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(enclosingClass),
"field has unsatisfied class constraints");
if (fieldFlags & CORINFO_FLG_FIELD_PROTECTED)
{
Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClass),
"Accessing protected method through wrong type.");
}
}
void Compiler::verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode)
{
if (tiOp1.IsNumberType())
{
#ifdef TARGET_64BIT
Verify(tiCompatibleWith(tiOp1, tiOp2, true), "Cond type mismatch");
#else // TARGET_64BIT
// [10/17/2013] Consider changing this: to put on my verification lawyer hat,
// this is non-conforming to the ECMA Spec: types don't have to be equivalent,
// but compatible, since we can coalesce native int with int32 (see section III.1.5).
Verify(typeInfo::AreEquivalent(tiOp1, tiOp2), "Cond type mismatch");
#endif // !TARGET_64BIT
}
else if (tiOp1.IsObjRef())
{
switch (opcode)
{
case CEE_BEQ_S:
case CEE_BEQ:
case CEE_BNE_UN_S:
case CEE_BNE_UN:
case CEE_CEQ:
case CEE_CGT_UN:
break;
default:
Verify(false, "Cond not allowed on object types");
}
Verify(tiOp2.IsObjRef(), "Cond type mismatch");
}
else if (tiOp1.IsByRef())
{
Verify(tiOp2.IsByRef(), "Cond type mismatch");
}
else
{
Verify(tiOp1.IsMethod() && tiOp2.IsMethod(), "Cond type mismatch");
}
}
void Compiler::verVerifyThisPtrInitialised()
{
if (verTrackObjCtorInitState)
{
Verify(verCurrentState.thisInitialized == TIS_Init, "this ptr is not initialized");
}
}
bool Compiler::verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target)
{
// Either target == context, in this case calling an alternate .ctor
// Or target is the immediate parent of context
return ((target == context) || (target == info.compCompHnd->getParentType(context)));
}
GenTree* Compiler::impImportLdvirtftn(GenTree* thisPtr,
CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_CALL_INFO* pCallInfo)
{
if ((pCallInfo->methodFlags & CORINFO_FLG_EnC) && !(pCallInfo->classFlags & CORINFO_FLG_INTERFACE))
{
NO_WAY("Virtual call to a function added via EnC is not supported");
}
// CoreRT generic virtual method
if ((pCallInfo->sig.sigInst.methInstCount != 0) && IsTargetAbi(CORINFO_CORERT_ABI))
{
GenTree* runtimeMethodHandle =
impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_METHOD_HDL, pCallInfo->hMethod);
return gtNewHelperCallNode(CORINFO_HELP_GVMLOOKUP_FOR_SLOT, TYP_I_IMPL,
gtNewCallArgs(thisPtr, runtimeMethodHandle));
}
#ifdef FEATURE_READYTORUN
if (opts.IsReadyToRun())
{
if (!pCallInfo->exactContextNeedsRuntimeLookup)
{
GenTreeCall* call =
gtNewHelperCallNode(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR, TYP_I_IMPL, gtNewCallArgs(thisPtr));
call->setEntryPoint(pCallInfo->codePointerLookup.constLookup);
return call;
}
// We need a runtime lookup. CoreRT has a ReadyToRun helper for that too.
if (IsTargetAbi(CORINFO_CORERT_ABI))
{
GenTree* ctxTree = getRuntimeContextTree(pCallInfo->codePointerLookup.lookupKind.runtimeLookupKind);
return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
gtNewCallArgs(ctxTree), &pCallInfo->codePointerLookup.lookupKind);
}
}
#endif
// Get the exact descriptor for the static callsite
GenTree* exactTypeDesc = impParentClassTokenToHandle(pResolvedToken);
if (exactTypeDesc == nullptr)
{ // compDonotInline()
return nullptr;
}
GenTree* exactMethodDesc = impTokenToHandle(pResolvedToken);
if (exactMethodDesc == nullptr)
{ // compDonotInline()
return nullptr;
}
GenTreeCall::Use* helpArgs = gtNewCallArgs(exactMethodDesc);
helpArgs = gtPrependNewCallArg(exactTypeDesc, helpArgs);
helpArgs = gtPrependNewCallArg(thisPtr, helpArgs);
// Call helper function. This gets the target address of the final destination callsite.
return gtNewHelperCallNode(CORINFO_HELP_VIRTUAL_FUNC_PTR, TYP_I_IMPL, helpArgs);
}
//------------------------------------------------------------------------
// impBoxPatternMatch: match and import common box idioms
//
// Arguments:
// pResolvedToken - resolved token from the box operation
// codeAddr - position in IL stream after the box instruction
// codeEndp - end of IL stream
//
// Return Value:
// Number of IL bytes matched and imported, -1 otherwise
//
// Notes:
// pResolvedToken is known to be a value type; ref type boxing
// is handled in the CEE_BOX clause.
int Compiler::impBoxPatternMatch(CORINFO_RESOLVED_TOKEN* pResolvedToken,
const BYTE* codeAddr,
const BYTE* codeEndp,
bool makeInlineObservation)
{
if (codeAddr >= codeEndp)
{
return -1;
}
switch (codeAddr[0])
{
case CEE_UNBOX_ANY:
// box + unbox.any
if (codeAddr + 1 + sizeof(mdToken) <= codeEndp)
{
if (makeInlineObservation)
{
compInlineResult->Note(InlineObservation::CALLEE_FOLDABLE_BOX);
return 1 + sizeof(mdToken);
}
CORINFO_RESOLVED_TOKEN unboxResolvedToken;
impResolveToken(codeAddr + 1, &unboxResolvedToken, CORINFO_TOKENKIND_Class);
// See if the resolved tokens describe types that are equal.
const TypeCompareState compare =
info.compCompHnd->compareTypesForEquality(unboxResolvedToken.hClass, pResolvedToken->hClass);
// If so, box/unbox.any is a nop.
if (compare == TypeCompareState::Must)
{
JITDUMP("\n Importing BOX; UNBOX.ANY as NOP\n");
// Skip the next unbox.any instruction
return 1 + sizeof(mdToken);
}
}
break;
case CEE_BRTRUE:
case CEE_BRTRUE_S:
case CEE_BRFALSE:
case CEE_BRFALSE_S:
// box + br_true/false
if ((codeAddr + ((codeAddr[0] >= CEE_BRFALSE) ? 5 : 2)) <= codeEndp)
{
if (makeInlineObservation)
{
compInlineResult->Note(InlineObservation::CALLEE_FOLDABLE_BOX);
return 0;
}
GenTree* const treeToBox = impStackTop().val;
bool canOptimize = true;
GenTree* treeToNullcheck = nullptr;
// Can the thing being boxed cause a side effect?
if ((treeToBox->gtFlags & GTF_SIDE_EFFECT) != 0)
{
// Is this a side effect we can replicate cheaply?
if (((treeToBox->gtFlags & GTF_SIDE_EFFECT) == GTF_EXCEPT) &&
treeToBox->OperIs(GT_OBJ, GT_BLK, GT_IND))
{
// Yes, we just need to perform a null check if needed.
GenTree* const addr = treeToBox->AsOp()->gtGetOp1();
if (fgAddrCouldBeNull(addr))
{
treeToNullcheck = addr;
}
}
else
{
canOptimize = false;
}
}
if (canOptimize)
{
CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass);
if (boxHelper == CORINFO_HELP_BOX)
{
JITDUMP("\n Importing BOX; BR_TRUE/FALSE as %sconstant\n",
treeToNullcheck == nullptr ? "" : "nullcheck+");
impPopStack();
GenTree* result = gtNewIconNode(1);
if (treeToNullcheck != nullptr)
{
GenTree* nullcheck = gtNewNullCheck(treeToNullcheck, compCurBB);
result = gtNewOperNode(GT_COMMA, TYP_INT, nullcheck, result);
}
impPushOnStack(result, typeInfo(TI_INT));
return 0;
}
}
}
break;
case CEE_ISINST:
if (codeAddr + 1 + sizeof(mdToken) + 1 <= codeEndp)
{
const BYTE* nextCodeAddr = codeAddr + 1 + sizeof(mdToken);
switch (nextCodeAddr[0])
{
// box + isinst + br_true/false
case CEE_BRTRUE:
case CEE_BRTRUE_S:
case CEE_BRFALSE:
case CEE_BRFALSE_S:
if ((nextCodeAddr + ((nextCodeAddr[0] >= CEE_BRFALSE) ? 5 : 2)) <= codeEndp)
{
if (makeInlineObservation)
{
compInlineResult->Note(InlineObservation::CALLEE_FOLDABLE_BOX);
return 1 + sizeof(mdToken);
}
if (!(impStackTop().val->gtFlags & GTF_SIDE_EFFECT))
{
CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass);
if (boxHelper == CORINFO_HELP_BOX)
{
CORINFO_RESOLVED_TOKEN isInstResolvedToken;
impResolveToken(codeAddr + 1, &isInstResolvedToken, CORINFO_TOKENKIND_Casting);
TypeCompareState castResult =
info.compCompHnd->compareTypesForCast(pResolvedToken->hClass,
isInstResolvedToken.hClass);
if (castResult != TypeCompareState::May)
{
JITDUMP("\n Importing BOX; ISINST; BR_TRUE/FALSE as constant\n");
impPopStack();
impPushOnStack(gtNewIconNode((castResult == TypeCompareState::Must) ? 1 : 0),
typeInfo(TI_INT));
// Skip the next isinst instruction
return 1 + sizeof(mdToken);
}
}
else if (boxHelper == CORINFO_HELP_BOX_NULLABLE)
{
// For nullable we're going to fold it to "ldfld hasValue + brtrue/brfalse" or
// "ldc.i4.0 + brtrue/brfalse" in case if the underlying type is not castable to
// the target type.
CORINFO_RESOLVED_TOKEN isInstResolvedToken;
impResolveToken(codeAddr + 1, &isInstResolvedToken, CORINFO_TOKENKIND_Casting);
CORINFO_CLASS_HANDLE nullableCls = pResolvedToken->hClass;
CORINFO_CLASS_HANDLE underlyingCls = info.compCompHnd->getTypeForBox(nullableCls);
TypeCompareState castResult =
info.compCompHnd->compareTypesForCast(underlyingCls,
isInstResolvedToken.hClass);
if (castResult == TypeCompareState::Must)
{
const CORINFO_FIELD_HANDLE hasValueFldHnd =
info.compCompHnd->getFieldInClass(nullableCls, 0);
assert(info.compCompHnd->getFieldOffset(hasValueFldHnd) == 0);
assert(!strcmp(info.compCompHnd->getFieldName(hasValueFldHnd, nullptr),
"hasValue"));
GenTree* objToBox = impPopStack().val;
// Spill struct to get its address (to access hasValue field)
objToBox =
impGetStructAddr(objToBox, nullableCls, (unsigned)CHECK_SPILL_ALL, true);
impPushOnStack(gtNewFieldRef(TYP_BOOL, hasValueFldHnd, objToBox, 0),
typeInfo(TI_INT));
JITDUMP("\n Importing BOX; ISINST; BR_TRUE/FALSE as nullableVT.hasValue\n");
return 1 + sizeof(mdToken);
}
else if (castResult == TypeCompareState::MustNot)
{
impPopStack();
impPushOnStack(gtNewIconNode(0), typeInfo(TI_INT));
JITDUMP("\n Importing BOX; ISINST; BR_TRUE/FALSE as constant (false)\n");
return 1 + sizeof(mdToken);
}
}
}
}
break;
// box + isinst + unbox.any
case CEE_UNBOX_ANY:
if ((nextCodeAddr + 1 + sizeof(mdToken)) <= codeEndp)
{
if (makeInlineObservation)
{
compInlineResult->Note(InlineObservation::CALLEE_FOLDABLE_BOX);
return 2 + sizeof(mdToken) * 2;
}
// See if the resolved tokens in box, isinst and unbox.any describe types that are equal.
CORINFO_RESOLVED_TOKEN isinstResolvedToken = {};
impResolveToken(codeAddr + 1, &isinstResolvedToken, CORINFO_TOKENKIND_Class);
if (info.compCompHnd->compareTypesForEquality(isinstResolvedToken.hClass,
pResolvedToken->hClass) ==
TypeCompareState::Must)
{
CORINFO_RESOLVED_TOKEN unboxResolvedToken = {};
impResolveToken(nextCodeAddr + 1, &unboxResolvedToken, CORINFO_TOKENKIND_Class);
// If so, box + isinst + unbox.any is a nop.
if (info.compCompHnd->compareTypesForEquality(unboxResolvedToken.hClass,
pResolvedToken->hClass) ==
TypeCompareState::Must)
{
JITDUMP("\n Importing BOX; ISINST, UNBOX.ANY as NOP\n");
return 2 + sizeof(mdToken) * 2;
}
}
}
break;
}
}
break;
default:
break;
}
return -1;
}
//------------------------------------------------------------------------
// impImportAndPushBox: build and import a value-type box
//
// Arguments:
// pResolvedToken - resolved token from the box operation
//
// Return Value:
// None.
//
// Side Effects:
// The value to be boxed is popped from the stack, and a tree for
// the boxed value is pushed. This method may create upstream
// statements, spill side effecting trees, and create new temps.
//
// If importing an inlinee, we may also discover the inline must
// fail. If so there is no new value pushed on the stack. Callers
// should use CompDoNotInline after calling this method to see if
// ongoing importation should be aborted.
//
// Notes:
// Boxing of ref classes results in the same value as the value on
// the top of the stack, so is handled inline in impImportBlockCode
// for the CEE_BOX case. Only value or primitive type boxes make it
// here.
//
// Boxing for nullable types is done via a helper call; boxing
// of other value types is expanded inline or handled via helper
// call, depending on the jit's codegen mode.
//
// When the jit is operating in size and time constrained modes,
// using a helper call here can save jit time and code size. But it
// also may inhibit cleanup optimizations that could have also had a
// even greater benefit effect on code size and jit time. An optimal
// strategy may need to peek ahead and see if it is easy to tell how
// the box is being used. For now, we defer.
void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken)
{
// Spill any special side effects
impSpillSpecialSideEff();
// Get get the expression to box from the stack.
GenTree* op1 = nullptr;
GenTree* op2 = nullptr;
StackEntry se = impPopStack();
CORINFO_CLASS_HANDLE operCls = se.seTypeInfo.GetClassHandle();
GenTree* exprToBox = se.val;
// Look at what helper we should use.
CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass);
// Determine what expansion to prefer.
//
// In size/time/debuggable constrained modes, the helper call
// expansion for box is generally smaller and is preferred, unless
// the value to box is a struct that comes from a call. In that
// case the call can construct its return value directly into the
// box payload, saving possibly some up-front zeroing.
//
// Currently primitive type boxes always get inline expanded. We may
// want to do the same for small structs if they don't come from
// calls and don't have GC pointers, since explicitly copying such
// structs is cheap.
JITDUMP("\nCompiler::impImportAndPushBox -- handling BOX(value class) via");
bool canExpandInline = (boxHelper == CORINFO_HELP_BOX);
bool optForSize = !exprToBox->IsCall() && (operCls != nullptr) && opts.OptimizationDisabled();
bool expandInline = canExpandInline && !optForSize;
if (expandInline)
{
JITDUMP(" inline allocate/copy sequence\n");
// we are doing 'normal' boxing. This means that we can inline the box operation
// Box(expr) gets morphed into
// temp = new(clsHnd)
// cpobj(temp+4, expr, clsHnd)
// push temp
// The code paths differ slightly below for structs and primitives because
// "cpobj" differs in these cases. In one case you get
// impAssignStructPtr(temp+4, expr, clsHnd)
// and the other you get
// *(temp+4) = expr
if (opts.OptimizationDisabled())
{
// For minopts/debug code, try and minimize the total number
// of box temps by reusing an existing temp when possible.
if (impBoxTempInUse || impBoxTemp == BAD_VAR_NUM)
{
impBoxTemp = lvaGrabTemp(true DEBUGARG("Reusable Box Helper"));
}
}
else
{
// When optimizing, use a new temp for each box operation
// since we then know the exact class of the box temp.
impBoxTemp = lvaGrabTemp(true DEBUGARG("Single-def Box Helper"));
lvaTable[impBoxTemp].lvType = TYP_REF;
lvaTable[impBoxTemp].lvSingleDef = 1;
JITDUMP("Marking V%02u as a single def local\n", impBoxTemp);
const bool isExact = true;
lvaSetClass(impBoxTemp, pResolvedToken->hClass, isExact);
}
// needs to stay in use until this box expression is appended
// some other node. We approximate this by keeping it alive until
// the opcode stack becomes empty
impBoxTempInUse = true;
// Remember the current last statement in case we need to move
// a range of statements to ensure the box temp is initialized
// before it's used.
//
Statement* const cursor = impLastStmt;
const bool useParent = false;
op1 = gtNewAllocObjNode(pResolvedToken, useParent);
if (op1 == nullptr)
{
// If we fail to create the newobj node, we must be inlining
// and have run across a type we can't describe.
//
assert(compDonotInline());
return;
}
// Remember that this basic block contains 'new' of an object,
// and so does this method
//
compCurBB->bbFlags |= BBF_HAS_NEWOBJ;
optMethodFlags |= OMF_HAS_NEWOBJ;
// Assign the boxed object to the box temp.
//
GenTree* asg = gtNewTempAssign(impBoxTemp, op1);
Statement* asgStmt = impAppendTree(asg, (unsigned)CHECK_SPILL_NONE, impCurStmtDI);
// If the exprToBox is a call that returns its value via a ret buf arg,
// move the assignment statement(s) before the call (which must be a top level tree).
//
// We do this because impAssignStructPtr (invoked below) will
// back-substitute into a call when it sees a GT_RET_EXPR and the call
// has a hidden buffer pointer, So we need to reorder things to avoid
// creating out-of-sequence IR.
//
if (varTypeIsStruct(exprToBox) && exprToBox->OperIs(GT_RET_EXPR))
{
GenTreeCall* const call = exprToBox->AsRetExpr()->gtInlineCandidate->AsCall();
if (call->HasRetBufArg())
{
JITDUMP("Must insert newobj stmts for box before call [%06u]\n", dspTreeID(call));
// Walk back through the statements in this block, looking for the one
// that has this call as the root node.
//
// Because gtNewTempAssign (above) may have added statements that
// feed into the actual assignment we need to move this set of added
// statements as a group.
//
// Note boxed allocations are side-effect free (no com or finalizer) so
// our only worries here are (correctness) not overlapping the box temp
// lifetime and (perf) stretching the temp lifetime across the inlinee
// body.
//
// Since this is an inline candidate, we must be optimizing, and so we have
// a unique box temp per call. So no worries about overlap.
//
assert(!opts.OptimizationDisabled());
// Lifetime stretching could addressed with some extra cleverness--sinking
// the allocation back down to just before the copy, once we figure out
// where the copy is. We defer for now.
//
Statement* insertBeforeStmt = cursor;
noway_assert(insertBeforeStmt != nullptr);
while (true)
{
if (insertBeforeStmt->GetRootNode() == call)
{
break;
}
// If we've searched all the statements in the block and failed to
// find the call, then something's wrong.
//
noway_assert(insertBeforeStmt != impStmtList);
insertBeforeStmt = insertBeforeStmt->GetPrevStmt();
}
// Found the call. Move the statements comprising the assignment.
//
JITDUMP("Moving " FMT_STMT "..." FMT_STMT " before " FMT_STMT "\n", cursor->GetNextStmt()->GetID(),
asgStmt->GetID(), insertBeforeStmt->GetID());
assert(asgStmt == impLastStmt);
do
{
Statement* movingStmt = impExtractLastStmt();
impInsertStmtBefore(movingStmt, insertBeforeStmt);
insertBeforeStmt = movingStmt;
} while (impLastStmt != cursor);
}
}
// Create a pointer to the box payload in op1.
//
op1 = gtNewLclvNode(impBoxTemp, TYP_REF);
op2 = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL);
op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, op2);
// Copy from the exprToBox to the box payload.
//
if (varTypeIsStruct(exprToBox))
{
assert(info.compCompHnd->getClassSize(pResolvedToken->hClass) == info.compCompHnd->getClassSize(operCls));
op1 = impAssignStructPtr(op1, exprToBox, operCls, (unsigned)CHECK_SPILL_ALL);
}
else
{
var_types lclTyp = exprToBox->TypeGet();
if (lclTyp == TYP_BYREF)
{
lclTyp = TYP_I_IMPL;
}
CorInfoType jitType = info.compCompHnd->asCorInfoType(pResolvedToken->hClass);
if (impIsPrimitive(jitType))
{
lclTyp = JITtype2varType(jitType);
}
var_types srcTyp = exprToBox->TypeGet();
var_types dstTyp = lclTyp;
// We allow float <-> double mismatches and implicit truncation for small types.
assert((genActualType(srcTyp) == genActualType(dstTyp)) ||
(varTypeIsFloating(srcTyp) == varTypeIsFloating(dstTyp)));
// Note regarding small types.
// We are going to store to the box here via an indirection, so the cast added below is
// redundant, since the store has an implicit truncation semantic. The reason we still
// add this cast is so that the code which deals with GT_BOX optimizations does not have
// to account for this implicit truncation (e. g. understand that BOX<byte>(0xFF + 1) is
// actually BOX<byte>(0) or deal with signedness mismatch and other GT_CAST complexities).
if (srcTyp != dstTyp)
{
exprToBox = gtNewCastNode(genActualType(dstTyp), exprToBox, false, dstTyp);
}
op1 = gtNewAssignNode(gtNewOperNode(GT_IND, dstTyp, op1), exprToBox);
}
// Spill eval stack to flush out any pending side effects.
impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportAndPushBox"));
// Set up this copy as a second assignment.
Statement* copyStmt = impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtDI);
op1 = gtNewLclvNode(impBoxTemp, TYP_REF);
// Record that this is a "box" node and keep track of the matching parts.
op1 = new (this, GT_BOX) GenTreeBox(TYP_REF, op1, asgStmt, copyStmt);
// If it is a value class, mark the "box" node. We can use this information
// to optimise several cases:
// "box(x) == null" --> false
// "(box(x)).CallAnInterfaceMethod(...)" --> "(&x).CallAValueTypeMethod"
// "(box(x)).CallAnObjectMethod(...)" --> "(&x).CallAValueTypeMethod"
op1->gtFlags |= GTF_BOX_VALUE;
assert(op1->IsBoxedValue());
assert(asg->gtOper == GT_ASG);
}
else
{
// Don't optimize, just call the helper and be done with it.
JITDUMP(" helper call because: %s\n", canExpandInline ? "optimizing for size" : "nullable");
assert(operCls != nullptr);
// Ensure that the value class is restored
op2 = impTokenToHandle(pResolvedToken, nullptr, true /* mustRestoreHandle */);
if (op2 == nullptr)
{
// We must be backing out of an inline.
assert(compDonotInline());
return;
}
GenTreeCall::Use* args =
gtNewCallArgs(op2, impGetStructAddr(exprToBox, operCls, (unsigned)CHECK_SPILL_ALL, true));
op1 = gtNewHelperCallNode(boxHelper, TYP_REF, args);
}
/* Push the result back on the stack, */
/* even if clsHnd is a value class we want the TI_REF */
typeInfo tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(pResolvedToken->hClass));
impPushOnStack(op1, tiRetVal);
}
//------------------------------------------------------------------------
// impImportNewObjArray: Build and import `new` of multi-dimmensional array
//
// Arguments:
// pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
// by a call to CEEInfo::resolveToken().
// pCallInfo - The CORINFO_CALL_INFO that has been initialized
// by a call to CEEInfo::getCallInfo().
//
// Assumptions:
// The multi-dimensional array constructor arguments (array dimensions) are
// pushed on the IL stack on entry to this method.
//
// Notes:
// Multi-dimensional array constructors are imported as calls to a JIT
// helper, not as regular calls.
void Compiler::impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
{
GenTree* classHandle = impParentClassTokenToHandle(pResolvedToken);
if (classHandle == nullptr)
{ // compDonotInline()
return;
}
assert(pCallInfo->sig.numArgs);
GenTree* node;
// Reuse the temp used to pass the array dimensions to avoid bloating
// the stack frame in case there are multiple calls to multi-dim array
// constructors within a single method.
if (lvaNewObjArrayArgs == BAD_VAR_NUM)
{
lvaNewObjArrayArgs = lvaGrabTemp(false DEBUGARG("NewObjArrayArgs"));
lvaTable[lvaNewObjArrayArgs].lvType = TYP_BLK;
lvaTable[lvaNewObjArrayArgs].lvExactSize = 0;
}
// Increase size of lvaNewObjArrayArgs to be the largest size needed to hold 'numArgs' integers
// for our call to CORINFO_HELP_NEW_MDARR.
lvaTable[lvaNewObjArrayArgs].lvExactSize =
max(lvaTable[lvaNewObjArrayArgs].lvExactSize, pCallInfo->sig.numArgs * sizeof(INT32));
// The side-effects may include allocation of more multi-dimensional arrays. Spill all side-effects
// to ensure that the shared lvaNewObjArrayArgs local variable is only ever used to pass arguments
// to one allocation at a time.
impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportNewObjArray"));
//
// The arguments of the CORINFO_HELP_NEW_MDARR helper are:
// - Array class handle
// - Number of dimension arguments
// - Pointer to block of int32 dimensions - address of lvaNewObjArrayArgs temp.
//
node = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
node = gtNewOperNode(GT_ADDR, TYP_I_IMPL, node);
// Pop dimension arguments from the stack one at a time and store it
// into lvaNewObjArrayArgs temp.
for (int i = pCallInfo->sig.numArgs - 1; i >= 0; i--)
{
GenTree* arg = impImplicitIorI4Cast(impPopStack().val, TYP_INT);
GenTree* dest = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
dest = gtNewOperNode(GT_ADDR, TYP_I_IMPL, dest);
dest = gtNewOperNode(GT_ADD, TYP_I_IMPL, dest,
new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(INT32) * i));
dest = gtNewOperNode(GT_IND, TYP_INT, dest);
node = gtNewOperNode(GT_COMMA, node->TypeGet(), gtNewAssignNode(dest, arg), node);
}
GenTreeCall::Use* args = gtNewCallArgs(node);
// pass number of arguments to the helper
args = gtPrependNewCallArg(gtNewIconNode(pCallInfo->sig.numArgs), args);
args = gtPrependNewCallArg(classHandle, args);
node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR, TYP_REF, args);
for (GenTreeCall::Use& use : node->AsCall()->Args())
{
node->gtFlags |= use.GetNode()->gtFlags & GTF_GLOB_EFFECT;
}
node->AsCall()->compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)pResolvedToken->hClass;
// Remember that this basic block contains 'new' of a md array
compCurBB->bbFlags |= BBF_HAS_NEWARRAY;
impPushOnStack(node, typeInfo(TI_REF, pResolvedToken->hClass));
}
GenTree* Compiler::impTransformThis(GenTree* thisPtr,
CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
CORINFO_THIS_TRANSFORM transform)
{
switch (transform)
{
case CORINFO_DEREF_THIS:
{
GenTree* obj = thisPtr;
// This does a LDIND on the obj, which should be a byref. pointing to a ref
impBashVarAddrsToI(obj);
assert(genActualType(obj->gtType) == TYP_I_IMPL || obj->gtType == TYP_BYREF);
CorInfoType constraintTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
obj = gtNewOperNode(GT_IND, JITtype2varType(constraintTyp), obj);
// ldind could point anywhere, example a boxed class static int
obj->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
return obj;
}
case CORINFO_BOX_THIS:
{
// Constraint calls where there might be no
// unboxed entry point require us to implement the call via helper.
// These only occur when a possible target of the call
// may have inherited an implementation of an interface
// method from System.Object or System.ValueType. The EE does not provide us with
// "unboxed" versions of these methods.
GenTree* obj = thisPtr;
assert(obj->TypeGet() == TYP_BYREF || obj->TypeGet() == TYP_I_IMPL);
obj = gtNewObjNode(pConstrainedResolvedToken->hClass, obj);
obj->gtFlags |= GTF_EXCEPT;
CorInfoType jitTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
if (impIsPrimitive(jitTyp))
{
if (obj->OperIsBlk())
{
obj->ChangeOperUnchecked(GT_IND);
// Obj could point anywhere, example a boxed class static int
obj->gtFlags |= GTF_IND_TGTANYWHERE;
obj->AsOp()->gtOp2 = nullptr; // must be zero for tree walkers
}
obj->gtType = JITtype2varType(jitTyp);
assert(varTypeIsArithmetic(obj->gtType));
}
// This pushes on the dereferenced byref
// This is then used immediately to box.
impPushOnStack(obj, verMakeTypeInfo(pConstrainedResolvedToken->hClass).NormaliseForStack());
// This pops off the byref-to-a-value-type remaining on the stack and
// replaces it with a boxed object.
// This is then used as the object to the virtual call immediately below.
impImportAndPushBox(pConstrainedResolvedToken);
if (compDonotInline())
{
return nullptr;
}
obj = impPopStack().val;
return obj;
}
case CORINFO_NO_THIS_TRANSFORM:
default:
return thisPtr;
}
}
//------------------------------------------------------------------------
// impCanPInvokeInline: check whether PInvoke inlining should enabled in current method.
//
// Return Value:
// true if PInvoke inlining should be enabled in current method, false otherwise
//
// Notes:
// Checks a number of ambient conditions where we could pinvoke but choose not to
bool Compiler::impCanPInvokeInline()
{
return getInlinePInvokeEnabled() && (!opts.compDbgCode) && (compCodeOpt() != SMALL_CODE) &&
(!opts.compNoPInvokeInlineCB) // profiler is preventing inline pinvoke
;
}
//------------------------------------------------------------------------
// impCanPInvokeInlineCallSite: basic legality checks using information
// from a call to see if the call qualifies as an inline pinvoke.
//
// Arguments:
// block - block contaning the call, or for inlinees, block
// containing the call being inlined
//
// Return Value:
// true if this call can legally qualify as an inline pinvoke, false otherwise
//
// Notes:
// For runtimes that support exception handling interop there are
// restrictions on using inline pinvoke in handler regions.
//
// * We have to disable pinvoke inlining inside of filters because
// in case the main execution (i.e. in the try block) is inside
// unmanaged code, we cannot reuse the inlined stub (we still need
// the original state until we are in the catch handler)
//
// * We disable pinvoke inlining inside handlers since the GSCookie
// is in the inlined Frame (see
// CORINFO_EE_INFO::InlinedCallFrameInfo::offsetOfGSCookie), but
// this would not protect framelets/return-address of handlers.
//
// These restrictions are currently also in place for CoreCLR but
// can be relaxed when coreclr/#8459 is addressed.
bool Compiler::impCanPInvokeInlineCallSite(BasicBlock* block)
{
if (block->hasHndIndex())
{
return false;
}
// The remaining limitations do not apply to CoreRT
if (IsTargetAbi(CORINFO_CORERT_ABI))
{
return true;
}
#ifdef TARGET_64BIT
// On 64-bit platforms, we disable pinvoke inlining inside of try regions.
// Note that this could be needed on other architectures too, but we
// haven't done enough investigation to know for sure at this point.
//
// Here is the comment from JIT64 explaining why:
// [VSWhidbey: 611015] - because the jitted code links in the
// Frame (instead of the stub) we rely on the Frame not being
// 'active' until inside the stub. This normally happens by the
// stub setting the return address pointer in the Frame object
// inside the stub. On a normal return, the return address
// pointer is zeroed out so the Frame can be safely re-used, but
// if an exception occurs, nobody zeros out the return address
// pointer. Thus if we re-used the Frame object, it would go
// 'active' as soon as we link it into the Frame chain.
//
// Technically we only need to disable PInvoke inlining if we're
// in a handler or if we're in a try body with a catch or
// filter/except where other non-handler code in this method
// might run and try to re-use the dirty Frame object.
//
// A desktop test case where this seems to matter is
// jit\jit64\ebvts\mcpp\sources2\ijw\__clrcall\vector_ctor_dtor.02\deldtor_clr.exe
if (block->hasTryIndex())
{
// This does not apply to the raw pinvoke call that is inside the pinvoke
// ILStub. In this case, we have to inline the raw pinvoke call into the stub,
// otherwise we would end up with a stub that recursively calls itself, and end
// up with a stack overflow.
if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && opts.ShouldUsePInvokeHelpers())
{
return true;
}
return false;
}
#endif // TARGET_64BIT
return true;
}
//------------------------------------------------------------------------
// impCheckForPInvokeCall examine call to see if it is a pinvoke and if so
// if it can be expressed as an inline pinvoke.
//
// Arguments:
// call - tree for the call
// methHnd - handle for the method being called (may be null)
// sig - signature of the method being called
// mflags - method flags for the method being called
// block - block contaning the call, or for inlinees, block
// containing the call being inlined
//
// Notes:
// Sets GTF_CALL_M_PINVOKE on the call for pinvokes.
//
// Also sets GTF_CALL_UNMANAGED on call for inline pinvokes if the
// call passes a combination of legality and profitabilty checks.
//
// If GTF_CALL_UNMANAGED is set, increments info.compUnmanagedCallCountWithGCTransition
void Compiler::impCheckForPInvokeCall(
GenTreeCall* call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block)
{
CorInfoCallConvExtension unmanagedCallConv;
// If VM flagged it as Pinvoke, flag the call node accordingly
if ((mflags & CORINFO_FLG_PINVOKE) != 0)
{
call->gtCallMoreFlags |= GTF_CALL_M_PINVOKE;
}
bool suppressGCTransition = false;
if (methHnd)
{
if ((mflags & CORINFO_FLG_PINVOKE) == 0)
{
return;
}
unmanagedCallConv = info.compCompHnd->getUnmanagedCallConv(methHnd, nullptr, &suppressGCTransition);
}
else
{
if (sig->getCallConv() == CORINFO_CALLCONV_DEFAULT || sig->getCallConv() == CORINFO_CALLCONV_VARARG)
{
return;
}
unmanagedCallConv = info.compCompHnd->getUnmanagedCallConv(nullptr, sig, &suppressGCTransition);
assert(!call->gtCallCookie);
}
if (suppressGCTransition)
{
call->gtCallMoreFlags |= GTF_CALL_M_SUPPRESS_GC_TRANSITION;
}
// If we can't get the unmanaged calling convention or the calling convention is unsupported in the JIT,
// return here without inlining the native call.
if (unmanagedCallConv == CorInfoCallConvExtension::Managed ||
unmanagedCallConv == CorInfoCallConvExtension::Fastcall ||
unmanagedCallConv == CorInfoCallConvExtension::FastcallMemberFunction)
{
return;
}
optNativeCallCount++;
if (methHnd == nullptr && (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) || IsTargetAbi(CORINFO_CORERT_ABI)))
{
// PInvoke in CoreRT ABI must be always inlined. Non-inlineable CALLI cases have been
// converted to regular method calls earlier using convertPInvokeCalliToCall.
// PInvoke CALLI in IL stubs must be inlined
}
else
{
// Check legality
if (!impCanPInvokeInlineCallSite(block))
{
return;
}
// Legal PInvoke CALL in PInvoke IL stubs must be inlined to avoid infinite recursive
// inlining in CoreRT. Skip the ambient conditions checks and profitability checks.
if (!IsTargetAbi(CORINFO_CORERT_ABI) || (info.compFlags & CORINFO_FLG_PINVOKE) == 0)
{
if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && opts.ShouldUsePInvokeHelpers())
{
// Raw PInvoke call in PInvoke IL stub generated must be inlined to avoid infinite
// recursive calls to the stub.
}
else
{
if (!impCanPInvokeInline())
{
return;
}
// Size-speed tradeoff: don't use inline pinvoke at rarely
// executed call sites. The non-inline version is more
// compact.
if (block->isRunRarely())
{
return;
}
}
}
// The expensive check should be last
if (info.compCompHnd->pInvokeMarshalingRequired(methHnd, sig))
{
return;
}
}
JITLOG((LL_INFO1000000, "\nInline a CALLI PINVOKE call from method %s\n", info.compFullName));
call->gtFlags |= GTF_CALL_UNMANAGED;
call->unmgdCallConv = unmanagedCallConv;
if (!call->IsSuppressGCTransition())
{
info.compUnmanagedCallCountWithGCTransition++;
}
// AMD64 convention is same for native and managed
if (unmanagedCallConv == CorInfoCallConvExtension::C ||
unmanagedCallConv == CorInfoCallConvExtension::CMemberFunction)
{
call->gtFlags |= GTF_CALL_POP_ARGS;
}
if (unmanagedCallConv == CorInfoCallConvExtension::Thiscall)
{
call->gtCallMoreFlags |= GTF_CALL_M_UNMGD_THISCALL;
}
}
GenTreeCall* Compiler::impImportIndirectCall(CORINFO_SIG_INFO* sig, const DebugInfo& di)
{
var_types callRetTyp = JITtype2varType(sig->retType);
/* The function pointer is on top of the stack - It may be a
* complex expression. As it is evaluated after the args,
* it may cause registered args to be spilled. Simply spill it.
*/
// Ignore this trivial case.
if (impStackTop().val->gtOper != GT_LCL_VAR)
{
impSpillStackEntry(verCurrentState.esStackDepth - 1,
BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impImportIndirectCall"));
}
/* Get the function pointer */
GenTree* fptr = impPopStack().val;
// The function pointer is typically a sized to match the target pointer size
// However, stubgen IL optimization can change LDC.I8 to LDC.I4
// See ILCodeStream::LowerOpcode
assert(genActualType(fptr->gtType) == TYP_I_IMPL || genActualType(fptr->gtType) == TYP_INT);
#ifdef DEBUG
// This temporary must never be converted to a double in stress mode,
// because that can introduce a call to the cast helper after the
// arguments have already been evaluated.
if (fptr->OperGet() == GT_LCL_VAR)
{
lvaTable[fptr->AsLclVarCommon()->GetLclNum()].lvKeepType = 1;
}
#endif
/* Create the call node */
GenTreeCall* call = gtNewIndCallNode(fptr, callRetTyp, nullptr, di);
call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
#ifdef UNIX_X86_ABI
call->gtFlags &= ~GTF_CALL_POP_ARGS;
#endif
return call;
}
/*****************************************************************************/
void Compiler::impPopArgsForUnmanagedCall(GenTree* call, CORINFO_SIG_INFO* sig)
{
assert(call->gtFlags & GTF_CALL_UNMANAGED);
/* Since we push the arguments in reverse order (i.e. right -> left)
* spill any side effects from the stack
*
* OBS: If there is only one side effect we do not need to spill it
* thus we have to spill all side-effects except last one
*/
unsigned lastLevelWithSideEffects = UINT_MAX;
unsigned argsToReverse = sig->numArgs;
// For "thiscall", the first argument goes in a register. Since its
// order does not need to be changed, we do not need to spill it
if (call->AsCall()->gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
{
assert(argsToReverse);
argsToReverse--;
}
#ifndef TARGET_X86
// Don't reverse args on ARM or x64 - first four args always placed in regs in order
argsToReverse = 0;
#endif
for (unsigned level = verCurrentState.esStackDepth - argsToReverse; level < verCurrentState.esStackDepth; level++)
{
if (verCurrentState.esStack[level].val->gtFlags & GTF_ORDER_SIDEEFF)
{
assert(lastLevelWithSideEffects == UINT_MAX);
impSpillStackEntry(level,
BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - other side effect"));
}
else if (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT)
{
if (lastLevelWithSideEffects != UINT_MAX)
{
/* We had a previous side effect - must spill it */
impSpillStackEntry(lastLevelWithSideEffects,
BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - side effect"));
/* Record the level for the current side effect in case we will spill it */
lastLevelWithSideEffects = level;
}
else
{
/* This is the first side effect encountered - record its level */
lastLevelWithSideEffects = level;
}
}
}
/* The argument list is now "clean" - no out-of-order side effects
* Pop the argument list in reverse order */
GenTreeCall::Use* args = impPopReverseCallArgs(sig->numArgs, sig, sig->numArgs - argsToReverse);
call->AsCall()->gtCallArgs = args;
if (call->AsCall()->gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
{
GenTree* thisPtr = args->GetNode();
impBashVarAddrsToI(thisPtr);
assert(thisPtr->TypeGet() == TYP_I_IMPL || thisPtr->TypeGet() == TYP_BYREF);
}
for (GenTreeCall::Use& argUse : GenTreeCall::UseList(args))
{
GenTree* arg = argUse.GetNode();
call->gtFlags |= arg->gtFlags & GTF_GLOB_EFFECT;
// We should not be passing gc typed args to an unmanaged call.
if (varTypeIsGC(arg->TypeGet()))
{
// Tolerate byrefs by retyping to native int.
//
// This is needed or we'll generate inconsistent GC info
// for this arg at the call site (gc info says byref,
// pinvoke sig says native int).
//
if (arg->TypeGet() == TYP_BYREF)
{
arg->ChangeType(TYP_I_IMPL);
}
else
{
assert(!"*** invalid IL: gc ref passed to unmanaged call");
}
}
}
}
//------------------------------------------------------------------------
// impInitClass: Build a node to initialize the class before accessing the
// field if necessary
//
// Arguments:
// pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
// by a call to CEEInfo::resolveToken().
//
// Return Value: If needed, a pointer to the node that will perform the class
// initializtion. Otherwise, nullptr.
//
GenTree* Compiler::impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken)
{
CorInfoInitClassResult initClassResult =
info.compCompHnd->initClass(pResolvedToken->hField, info.compMethodHnd, impTokenLookupContextHandle);
if ((initClassResult & CORINFO_INITCLASS_USE_HELPER) == 0)
{
return nullptr;
}
bool runtimeLookup;
GenTree* node = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup);
if (node == nullptr)
{
assert(compDonotInline());
return nullptr;
}
if (runtimeLookup)
{
node = gtNewHelperCallNode(CORINFO_HELP_INITCLASS, TYP_VOID, gtNewCallArgs(node));
}
else
{
// Call the shared non gc static helper, as its the fastest
node = fgGetSharedCCtor(pResolvedToken->hClass);
}
return node;
}
GenTree* Compiler::impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp)
{
GenTree* op1 = nullptr;
#if defined(DEBUG)
// If we're replaying under SuperPMI, we're going to read the data stored by SuperPMI and use it
// for optimization. Unfortunately, SuperPMI doesn't implement a guarantee on the alignment of
// this data, so for some platforms which don't allow unaligned access (e.g., Linux arm32),
// this can fault. We should fix SuperPMI to guarantee alignment, but that is a big change.
// Instead, simply fix up the data here for future use.
// This variable should be the largest size element, with the largest alignment requirement,
// and the native C++ compiler should guarantee sufficient alignment.
double aligned_data = 0.0;
void* p_aligned_data = &aligned_data;
if (info.compMethodSuperPMIIndex != -1)
{
switch (lclTyp)
{
case TYP_BOOL:
case TYP_BYTE:
case TYP_UBYTE:
static_assert_no_msg(sizeof(unsigned __int8) == sizeof(bool));
static_assert_no_msg(sizeof(unsigned __int8) == sizeof(signed char));
static_assert_no_msg(sizeof(unsigned __int8) == sizeof(unsigned char));
// No alignment necessary for byte.
break;
case TYP_SHORT:
case TYP_USHORT:
static_assert_no_msg(sizeof(unsigned __int16) == sizeof(short));
static_assert_no_msg(sizeof(unsigned __int16) == sizeof(unsigned short));
if ((size_t)fldAddr % sizeof(unsigned __int16) != 0)
{
*(unsigned __int16*)p_aligned_data = GET_UNALIGNED_16(fldAddr);
fldAddr = p_aligned_data;
}
break;
case TYP_INT:
case TYP_UINT:
case TYP_FLOAT:
static_assert_no_msg(sizeof(unsigned __int32) == sizeof(int));
static_assert_no_msg(sizeof(unsigned __int32) == sizeof(unsigned int));
static_assert_no_msg(sizeof(unsigned __int32) == sizeof(float));
if ((size_t)fldAddr % sizeof(unsigned __int32) != 0)
{
*(unsigned __int32*)p_aligned_data = GET_UNALIGNED_32(fldAddr);
fldAddr = p_aligned_data;
}
break;
case TYP_LONG:
case TYP_ULONG:
case TYP_DOUBLE:
static_assert_no_msg(sizeof(unsigned __int64) == sizeof(__int64));
static_assert_no_msg(sizeof(unsigned __int64) == sizeof(double));
if ((size_t)fldAddr % sizeof(unsigned __int64) != 0)
{
*(unsigned __int64*)p_aligned_data = GET_UNALIGNED_64(fldAddr);
fldAddr = p_aligned_data;
}
break;
default:
assert(!"Unexpected lclTyp");
break;
}
}
#endif // DEBUG
switch (lclTyp)
{
int ival;
__int64 lval;
double dval;
case TYP_BOOL:
ival = *((bool*)fldAddr);
goto IVAL_COMMON;
case TYP_BYTE:
ival = *((signed char*)fldAddr);
goto IVAL_COMMON;
case TYP_UBYTE:
ival = *((unsigned char*)fldAddr);
goto IVAL_COMMON;
case TYP_SHORT:
ival = *((short*)fldAddr);
goto IVAL_COMMON;
case TYP_USHORT:
ival = *((unsigned short*)fldAddr);
goto IVAL_COMMON;
case TYP_UINT:
case TYP_INT:
ival = *((int*)fldAddr);
IVAL_COMMON:
op1 = gtNewIconNode(ival);
break;
case TYP_LONG:
case TYP_ULONG:
lval = *((__int64*)fldAddr);
op1 = gtNewLconNode(lval);
break;
case TYP_FLOAT:
dval = *((float*)fldAddr);
op1 = gtNewDconNode(dval);
op1->gtType = TYP_FLOAT;
break;
case TYP_DOUBLE:
dval = *((double*)fldAddr);
op1 = gtNewDconNode(dval);
break;
default:
assert(!"Unexpected lclTyp");
break;
}
return op1;
}
GenTree* Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_ACCESS_FLAGS access,
CORINFO_FIELD_INFO* pFieldInfo,
var_types lclTyp)
{
// Ordinary static fields never overlap. RVA statics, however, can overlap (if they're
// mapped to the same ".data" declaration). That said, such mappings only appear to be
// possible with ILASM, and in ILASM-produced (ILONLY) images, RVA statics are always
// read-only (using "stsfld" on them is UB). In mixed-mode assemblies, RVA statics can
// be mutable, but the only current producer of such images, the C++/CLI compiler, does
// not appear to support mapping different fields to the same address. So we will say
// that "mutable overlapping RVA statics" are UB as well. If this ever changes, code in
// value numbering will need to be updated to respect "NotAField FldSeq".
// For statics that are not "boxed", the initial address tree will contain the field sequence.
// For those that are, we will attach it later, when adding the indirection for the box, since
// that tree will represent the true address.
bool isBoxedStatic = (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) != 0;
bool isSharedStatic = (pFieldInfo->fieldAccessor == CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER) ||
(pFieldInfo->fieldAccessor == CORINFO_FIELD_STATIC_READYTORUN_HELPER);
FieldSeqNode::FieldKind fieldKind =
isSharedStatic ? FieldSeqNode::FieldKind::SharedStatic : FieldSeqNode::FieldKind::SimpleStatic;
FieldSeqNode* innerFldSeq = !isBoxedStatic ? GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField, fieldKind)
: FieldSeqStore::NotAField();
GenTree* op1;
switch (pFieldInfo->fieldAccessor)
{
case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
{
assert(!compIsForInlining());
// We first call a special helper to get the statics base pointer
op1 = impParentClassTokenToHandle(pResolvedToken);
// compIsForInlining() is false so we should not get NULL here
assert(op1 != nullptr);
var_types type = TYP_BYREF;
switch (pFieldInfo->helper)
{
case CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE:
type = TYP_I_IMPL;
break;
case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE:
case CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE:
case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE:
break;
default:
assert(!"unknown generic statics helper");
break;
}
op1 = gtNewHelperCallNode(pFieldInfo->helper, type, gtNewCallArgs(op1));
op1 = gtNewOperNode(GT_ADD, type, op1, gtNewIconNode(pFieldInfo->offset, innerFldSeq));
}
break;
case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
{
#ifdef FEATURE_READYTORUN
if (opts.IsReadyToRun())
{
GenTreeFlags callFlags = GTF_EMPTY;
if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
{
callFlags |= GTF_CALL_HOISTABLE;
}
op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_STATIC_BASE, TYP_BYREF);
op1->gtFlags |= callFlags;
op1->AsCall()->setEntryPoint(pFieldInfo->fieldLookup);
}
else
#endif
{
op1 = fgGetStaticsCCtorHelper(pResolvedToken->hClass, pFieldInfo->helper);
}
op1 = gtNewOperNode(GT_ADD, op1->TypeGet(), op1, gtNewIconNode(pFieldInfo->offset, innerFldSeq));
break;
}
case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
{
#ifdef FEATURE_READYTORUN
assert(opts.IsReadyToRun());
assert(!compIsForInlining());
CORINFO_LOOKUP_KIND kind;
info.compCompHnd->getLocationOfThisType(info.compMethodHnd, &kind);
assert(kind.needsRuntimeLookup);
GenTree* ctxTree = getRuntimeContextTree(kind.runtimeLookupKind);
GenTreeCall::Use* args = gtNewCallArgs(ctxTree);
GenTreeFlags callFlags = GTF_EMPTY;
if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
{
callFlags |= GTF_CALL_HOISTABLE;
}
var_types type = TYP_BYREF;
op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE, type, args);
op1->gtFlags |= callFlags;
op1->AsCall()->setEntryPoint(pFieldInfo->fieldLookup);
op1 = gtNewOperNode(GT_ADD, type, op1, gtNewIconNode(pFieldInfo->offset, innerFldSeq));
#else
unreached();
#endif // FEATURE_READYTORUN
}
break;
default:
{
// Do we need the address of a static field?
//
if (access & CORINFO_ACCESS_ADDRESS)
{
void** pFldAddr = nullptr;
void* fldAddr = info.compCompHnd->getFieldAddress(pResolvedToken->hField, (void**)&pFldAddr);
// We should always be able to access this static's address directly.
assert(pFldAddr == nullptr);
// Create the address node.
GenTreeFlags handleKind = isBoxedStatic ? GTF_ICON_STATIC_BOX_PTR : GTF_ICON_STATIC_HDL;
op1 = gtNewIconHandleNode((size_t)fldAddr, handleKind, innerFldSeq);
#ifdef DEBUG
op1->AsIntCon()->gtTargetHandle = op1->AsIntCon()->gtIconVal;
#endif
if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
{
op1->gtFlags |= GTF_ICON_INITCLASS;
}
}
else // We need the value of a static field
{
// In future, it may be better to just create the right tree here instead of folding it later.
op1 = gtNewFieldRef(lclTyp, pResolvedToken->hField);
if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
{
op1->gtFlags |= GTF_FLD_INITCLASS;
}
if (isBoxedStatic)
{
FieldSeqNode* outerFldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField, fieldKind);
op1->ChangeType(TYP_REF); // points at boxed object
op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, gtNewIconNode(TARGET_POINTER_SIZE, outerFldSeq));
if (varTypeIsStruct(lclTyp))
{
// Constructor adds GTF_GLOB_REF. Note that this is *not* GTF_EXCEPT.
op1 = gtNewObjNode(pFieldInfo->structType, op1);
}
else
{
op1 = gtNewOperNode(GT_IND, lclTyp, op1);
op1->gtFlags |= (GTF_GLOB_REF | GTF_IND_NONFAULTING);
}
}
return op1;
}
break;
}
}
if (isBoxedStatic)
{
FieldSeqNode* outerFldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField, fieldKind);
op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
op1->gtFlags |= (GTF_IND_INVARIANT | GTF_IND_NONFAULTING | GTF_IND_NONNULL);
op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, gtNewIconNode(TARGET_POINTER_SIZE, outerFldSeq));
}
if (!(access & CORINFO_ACCESS_ADDRESS))
{
if (varTypeIsStruct(lclTyp))
{
// Constructor adds GTF_GLOB_REF. Note that this is *not* GTF_EXCEPT.
op1 = gtNewObjNode(pFieldInfo->structType, op1);
}
else
{
op1 = gtNewOperNode(GT_IND, lclTyp, op1);
op1->gtFlags |= GTF_GLOB_REF;
}
}
return op1;
}
// In general try to call this before most of the verification work. Most people expect the access
// exceptions before the verification exceptions. If you do this after, that usually doesn't happen. Turns
// out if you can't access something we also think that you're unverifiable for other reasons.
void Compiler::impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
{
if (result != CORINFO_ACCESS_ALLOWED)
{
impHandleAccessAllowedInternal(result, helperCall);
}
}
void Compiler::impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
{
switch (result)
{
case CORINFO_ACCESS_ALLOWED:
break;
case CORINFO_ACCESS_ILLEGAL:
// if we're verifying, then we need to reject the illegal access to ensure that we don't think the
// method is verifiable. Otherwise, delay the exception to runtime.
if (compIsForImportOnly())
{
info.compCompHnd->ThrowExceptionForHelper(helperCall);
}
else
{
impInsertHelperCall(helperCall);
}
break;
}
}
void Compiler::impInsertHelperCall(CORINFO_HELPER_DESC* helperInfo)
{
// Construct the argument list
GenTreeCall::Use* args = nullptr;
assert(helperInfo->helperNum != CORINFO_HELP_UNDEF);
for (unsigned i = helperInfo->numArgs; i > 0; --i)
{
const CORINFO_HELPER_ARG& helperArg = helperInfo->args[i - 1];
GenTree* currentArg = nullptr;
switch (helperArg.argType)
{
case CORINFO_HELPER_ARG_TYPE_Field:
info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
info.compCompHnd->getFieldClass(helperArg.fieldHandle));
currentArg = gtNewIconEmbFldHndNode(helperArg.fieldHandle);
break;
case CORINFO_HELPER_ARG_TYPE_Method:
info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(helperArg.methodHandle);
currentArg = gtNewIconEmbMethHndNode(helperArg.methodHandle);
break;
case CORINFO_HELPER_ARG_TYPE_Class:
info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(helperArg.classHandle);
currentArg = gtNewIconEmbClsHndNode(helperArg.classHandle);
break;
case CORINFO_HELPER_ARG_TYPE_Module:
currentArg = gtNewIconEmbScpHndNode(helperArg.moduleHandle);
break;
case CORINFO_HELPER_ARG_TYPE_Const:
currentArg = gtNewIconNode(helperArg.constant);
break;
default:
NO_WAY("Illegal helper arg type");
}
args = gtPrependNewCallArg(currentArg, args);
}
/* TODO-Review:
* Mark as CSE'able, and hoistable. Consider marking hoistable unless you're in the inlinee.
* Also, consider sticking this in the first basic block.
*/
GenTree* callout = gtNewHelperCallNode(helperInfo->helperNum, TYP_VOID, args);
impAppendTree(callout, (unsigned)CHECK_SPILL_NONE, impCurStmtDI);
}
//------------------------------------------------------------------------
// impTailCallRetTypeCompatible: Checks whether the return types of caller
// and callee are compatible so that calle can be tail called.
// sizes are not supported integral type sizes return values to temps.
//
// Arguments:
// allowWidening -- whether to allow implicit widening by the callee.
// For instance, allowing int32 -> int16 tailcalls.
// The managed calling convention allows this, but
// we don't want explicit tailcalls to depend on this
// detail of the managed calling convention.
// callerRetType -- the caller's return type
// callerRetTypeClass - the caller's return struct type
// callerCallConv -- calling convention of the caller
// calleeRetType -- the callee's return type
// calleeRetTypeClass - the callee return struct type
// calleeCallConv -- calling convention of the callee
//
// Returns:
// True if the tailcall types are compatible.
//
// Remarks:
// Note that here we don't check compatibility in IL Verifier sense, but on the
// lines of return types getting returned in the same return register.
bool Compiler::impTailCallRetTypeCompatible(bool allowWidening,
var_types callerRetType,
CORINFO_CLASS_HANDLE callerRetTypeClass,
CorInfoCallConvExtension callerCallConv,
var_types calleeRetType,
CORINFO_CLASS_HANDLE calleeRetTypeClass,
CorInfoCallConvExtension calleeCallConv)
{
// Early out if the types are the same.
if (callerRetType == calleeRetType)
{
return true;
}
// For integral types the managed calling convention dictates that callee
// will widen the return value to 4 bytes, so we can allow implicit widening
// in managed to managed tailcalls when dealing with <= 4 bytes.
bool isManaged =
(callerCallConv == CorInfoCallConvExtension::Managed) && (calleeCallConv == CorInfoCallConvExtension::Managed);
if (allowWidening && isManaged && varTypeIsIntegral(callerRetType) && varTypeIsIntegral(calleeRetType) &&
(genTypeSize(callerRetType) <= 4) && (genTypeSize(calleeRetType) <= genTypeSize(callerRetType)))
{
return true;
}
// If the class handles are the same and not null, the return types are compatible.
if ((callerRetTypeClass != nullptr) && (callerRetTypeClass == calleeRetTypeClass))
{
return true;
}
#if defined(TARGET_AMD64) || defined(TARGET_ARMARCH)
// Jit64 compat:
if (callerRetType == TYP_VOID)
{
// This needs to be allowed to support the following IL pattern that Jit64 allows:
// tail.call
// pop
// ret
//
// Note that the above IL pattern is not valid as per IL verification rules.
// Therefore, only full trust code can take advantage of this pattern.
return true;
}
// These checks return true if the return value type sizes are the same and
// get returned in the same return register i.e. caller doesn't need to normalize
// return value. Some of the tail calls permitted by below checks would have
// been rejected by IL Verifier before we reached here. Therefore, only full
// trust code can make those tail calls.
unsigned callerRetTypeSize = 0;
unsigned calleeRetTypeSize = 0;
bool isCallerRetTypMBEnreg = VarTypeIsMultiByteAndCanEnreg(callerRetType, callerRetTypeClass, &callerRetTypeSize,
true, info.compIsVarArgs, callerCallConv);
bool isCalleeRetTypMBEnreg = VarTypeIsMultiByteAndCanEnreg(calleeRetType, calleeRetTypeClass, &calleeRetTypeSize,
true, info.compIsVarArgs, calleeCallConv);
if (varTypeIsIntegral(callerRetType) || isCallerRetTypMBEnreg)
{
return (varTypeIsIntegral(calleeRetType) || isCalleeRetTypMBEnreg) && (callerRetTypeSize == calleeRetTypeSize);
}
#endif // TARGET_AMD64 || TARGET_ARMARCH
return false;
}
/********************************************************************************
*
* Returns true if the current opcode and and the opcodes following it correspond
* to a supported tail call IL pattern.
*
*/
bool Compiler::impIsTailCallILPattern(
bool tailPrefixed, OPCODE curOpcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, bool isRecursive)
{
// Bail out if the current opcode is not a call.
if (!impOpcodeIsCallOpcode(curOpcode))
{
return false;
}
#if !FEATURE_TAILCALL_OPT_SHARED_RETURN
// If shared ret tail opt is not enabled, we will enable
// it for recursive methods.
if (isRecursive)
#endif
{
// we can actually handle if the ret is in a fallthrough block, as long as that is the only part of the
// sequence. Make sure we don't go past the end of the IL however.
codeEnd = min(codeEnd + 1, info.compCode + info.compILCodeSize);
}
// Bail out if there is no next opcode after call
if (codeAddrOfNextOpcode >= codeEnd)
{
return false;
}
OPCODE nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
return (nextOpcode == CEE_RET);
}
/*****************************************************************************
*
* Determine whether the call could be converted to an implicit tail call
*
*/
bool Compiler::impIsImplicitTailCallCandidate(
OPCODE opcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive)
{
#if FEATURE_TAILCALL_OPT
if (!opts.compTailCallOpt)
{
return false;
}
if (opts.OptimizationDisabled())
{
return false;
}
// must not be tail prefixed
if (prefixFlags & PREFIX_TAILCALL_EXPLICIT)
{
return false;
}
#if !FEATURE_TAILCALL_OPT_SHARED_RETURN
// the block containing call is marked as BBJ_RETURN
// We allow shared ret tail call optimization on recursive calls even under
// !FEATURE_TAILCALL_OPT_SHARED_RETURN.
if (!isRecursive && (compCurBB->bbJumpKind != BBJ_RETURN))
return false;
#endif // !FEATURE_TAILCALL_OPT_SHARED_RETURN
// must be call+ret or call+pop+ret
if (!impIsTailCallILPattern(false, opcode, codeAddrOfNextOpcode, codeEnd, isRecursive))
{
return false;
}
return true;
#else
return false;
#endif // FEATURE_TAILCALL_OPT
}
//------------------------------------------------------------------------
// impImportCall: import a call-inspiring opcode
//
// Arguments:
// opcode - opcode that inspires the call
// pResolvedToken - resolved token for the call target
// pConstrainedResolvedToken - resolved constraint token (or nullptr)
// newObjThis - tree for this pointer or uninitalized newobj temp (or nullptr)
// prefixFlags - IL prefix flags for the call
// callInfo - EE supplied info for the call
// rawILOffset - IL offset of the opcode, used for guarded devirtualization.
//
// Returns:
// Type of the call's return value.
// If we're importing an inlinee and have realized the inline must fail, the call return type should be TYP_UNDEF.
// However we can't assert for this here yet because there are cases we miss. See issue #13272.
//
//
// Notes:
// opcode can be CEE_CALL, CEE_CALLI, CEE_CALLVIRT, or CEE_NEWOBJ.
//
// For CEE_NEWOBJ, newobjThis should be the temp grabbed for the allocated
// uninitalized object.
#ifdef _PREFAST_
#pragma warning(push)
#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
var_types Compiler::impImportCall(OPCODE opcode,
CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
GenTree* newobjThis,
int prefixFlags,
CORINFO_CALL_INFO* callInfo,
IL_OFFSET rawILOffset)
{
assert(opcode == CEE_CALL || opcode == CEE_CALLVIRT || opcode == CEE_NEWOBJ || opcode == CEE_CALLI);
// The current statement DI may not refer to the exact call, but for calls
// we wish to be able to attach the exact IL instruction to get "return
// value" support in the debugger, so create one with the exact IL offset.
DebugInfo di = impCreateDIWithCurrentStackInfo(rawILOffset, true);
var_types callRetTyp = TYP_COUNT;
CORINFO_SIG_INFO* sig = nullptr;
CORINFO_METHOD_HANDLE methHnd = nullptr;
CORINFO_CLASS_HANDLE clsHnd = nullptr;
unsigned clsFlags = 0;
unsigned mflags = 0;
GenTree* call = nullptr;
GenTreeCall::Use* args = nullptr;
CORINFO_THIS_TRANSFORM constraintCallThisTransform = CORINFO_NO_THIS_TRANSFORM;
CORINFO_CONTEXT_HANDLE exactContextHnd = nullptr;
bool exactContextNeedsRuntimeLookup = false;
bool canTailCall = true;
const char* szCanTailCallFailReason = nullptr;
const int tailCallFlags = (prefixFlags & PREFIX_TAILCALL);
const bool isReadonlyCall = (prefixFlags & PREFIX_READONLY) != 0;
methodPointerInfo* ldftnInfo = nullptr;
// Synchronized methods need to call CORINFO_HELP_MON_EXIT at the end. We could
// do that before tailcalls, but that is probably not the intended
// semantic. So just disallow tailcalls from synchronized methods.
// Also, popping arguments in a varargs function is more work and NYI
// If we have a security object, we have to keep our frame around for callers
// to see any imperative security.
// Reverse P/Invokes need a call to CORINFO_HELP_JIT_REVERSE_PINVOKE_EXIT
// at the end, so tailcalls should be disabled.
if (info.compFlags & CORINFO_FLG_SYNCH)
{
canTailCall = false;
szCanTailCallFailReason = "Caller is synchronized";
}
else if (opts.IsReversePInvoke())
{
canTailCall = false;
szCanTailCallFailReason = "Caller is Reverse P/Invoke";
}
#if !FEATURE_FIXED_OUT_ARGS
else if (info.compIsVarArgs)
{
canTailCall = false;
szCanTailCallFailReason = "Caller is varargs";
}
#endif // FEATURE_FIXED_OUT_ARGS
// We only need to cast the return value of pinvoke inlined calls that return small types
// TODO-AMD64-Cleanup: Remove this when we stop interoperating with JIT64, or if we decide to stop
// widening everything! CoreCLR does not support JIT64 interoperation so no need to widen there.
// The existing x64 JIT doesn't bother widening all types to int, so we have to assume for
// the time being that the callee might be compiled by the other JIT and thus the return
// value will need to be widened by us (or not widened at all...)
// ReadyToRun code sticks with default calling convention that does not widen small return types.
bool checkForSmallType = opts.IsReadyToRun();
bool bIntrinsicImported = false;
CORINFO_SIG_INFO calliSig;
GenTreeCall::Use* extraArg = nullptr;
/*-------------------------------------------------------------------------
* First create the call node
*/
if (opcode == CEE_CALLI)
{
if (IsTargetAbi(CORINFO_CORERT_ABI))
{
// See comment in impCheckForPInvokeCall
BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB;
if (info.compCompHnd->convertPInvokeCalliToCall(pResolvedToken, !impCanPInvokeInlineCallSite(block)))
{
eeGetCallInfo(pResolvedToken, nullptr, CORINFO_CALLINFO_ALLOWINSTPARAM, callInfo);
return impImportCall(CEE_CALL, pResolvedToken, nullptr, nullptr, prefixFlags, callInfo, rawILOffset);
}
}
/* Get the call site sig */
eeGetSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &calliSig);
callRetTyp = JITtype2varType(calliSig.retType);
call = impImportIndirectCall(&calliSig, di);
// We don't know the target method, so we have to infer the flags, or
// assume the worst-case.
mflags = (calliSig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
#ifdef DEBUG
if (verbose)
{
unsigned structSize = (callRetTyp == TYP_STRUCT) ? eeTryGetClassSize(calliSig.retTypeSigClass) : 0;
printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %u\n",
opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
}
#endif
sig = &calliSig;
}
else // (opcode != CEE_CALLI)
{
NamedIntrinsic ni = NI_Illegal;
// Passing CORINFO_CALLINFO_ALLOWINSTPARAM indicates that this JIT is prepared to
// supply the instantiation parameters necessary to make direct calls to underlying
// shared generic code, rather than calling through instantiating stubs. If the
// returned signature has CORINFO_CALLCONV_PARAMTYPE then this indicates that the JIT
// must indeed pass an instantiation parameter.
methHnd = callInfo->hMethod;
sig = &(callInfo->sig);
callRetTyp = JITtype2varType(sig->retType);
mflags = callInfo->methodFlags;
#ifdef DEBUG
if (verbose)
{
unsigned structSize = (callRetTyp == TYP_STRUCT) ? eeTryGetClassSize(sig->retTypeSigClass) : 0;
printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %u\n",
opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
}
#endif
if (compIsForInlining())
{
/* Does the inlinee use StackCrawlMark */
if (mflags & CORINFO_FLG_DONT_INLINE_CALLER)
{
compInlineResult->NoteFatal(InlineObservation::CALLEE_STACK_CRAWL_MARK);
return TYP_UNDEF;
}
/* For now ignore varargs */
if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
{
compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NATIVE_VARARGS);
return TYP_UNDEF;
}
if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
{
compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
return TYP_UNDEF;
}
if ((mflags & CORINFO_FLG_VIRTUAL) && (sig->sigInst.methInstCount != 0) && (opcode == CEE_CALLVIRT))
{
compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_GENERIC_VIRTUAL);
return TYP_UNDEF;
}
}
clsHnd = pResolvedToken->hClass;
clsFlags = callInfo->classFlags;
#ifdef DEBUG
// If this is a call to JitTestLabel.Mark, do "early inlining", and record the test attribute.
// This recognition should really be done by knowing the methHnd of the relevant Mark method(s).
// These should be in corelib.h, and available through a JIT/EE interface call.
const char* modName;
const char* className;
const char* methodName;
if ((className = eeGetClassName(clsHnd)) != nullptr &&
strcmp(className, "System.Runtime.CompilerServices.JitTestLabel") == 0 &&
(methodName = eeGetMethodName(methHnd, &modName)) != nullptr && strcmp(methodName, "Mark") == 0)
{
return impImportJitTestLabelMark(sig->numArgs);
}
#endif // DEBUG
// <NICE> Factor this into getCallInfo </NICE>
bool isSpecialIntrinsic = false;
if ((mflags & (CORINFO_FLG_INTRINSIC | CORINFO_FLG_INTRINSIC)) != 0)
{
const bool isTailCall = canTailCall && (tailCallFlags != 0);
call =
impIntrinsic(newobjThis, clsHnd, methHnd, sig, mflags, pResolvedToken->token, isReadonlyCall,
isTailCall, pConstrainedResolvedToken, callInfo->thisTransform, &ni, &isSpecialIntrinsic);
if (compDonotInline())
{
return TYP_UNDEF;
}
if (call != nullptr)
{
#ifdef FEATURE_READYTORUN
if (call->OperGet() == GT_INTRINSIC)
{
if (opts.IsReadyToRun())
{
noway_assert(callInfo->kind == CORINFO_CALL);
call->AsIntrinsic()->gtEntryPoint = callInfo->codePointerLookup.constLookup;
}
else
{
call->AsIntrinsic()->gtEntryPoint.addr = nullptr;
call->AsIntrinsic()->gtEntryPoint.accessType = IAT_VALUE;
}
}
#endif
bIntrinsicImported = true;
goto DONE_CALL;
}
}
#ifdef FEATURE_SIMD
call = impSIMDIntrinsic(opcode, newobjThis, clsHnd, methHnd, sig, mflags, pResolvedToken->token);
if (call != nullptr)
{
bIntrinsicImported = true;
goto DONE_CALL;
}
#endif // FEATURE_SIMD
if ((mflags & CORINFO_FLG_VIRTUAL) && (mflags & CORINFO_FLG_EnC) && (opcode == CEE_CALLVIRT))
{
NO_WAY("Virtual call to a function added via EnC is not supported");
}
if ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT &&
(sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
(sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG)
{
BADCODE("Bad calling convention");
}
//-------------------------------------------------------------------------
// Construct the call node
//
// Work out what sort of call we're making.
// Dispense with virtual calls implemented via LDVIRTFTN immediately.
constraintCallThisTransform = callInfo->thisTransform;
exactContextHnd = callInfo->contextHandle;
exactContextNeedsRuntimeLookup = callInfo->exactContextNeedsRuntimeLookup;
switch (callInfo->kind)
{
case CORINFO_VIRTUALCALL_STUB:
{
assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
if (callInfo->stubLookup.lookupKind.needsRuntimeLookup)
{
if (callInfo->stubLookup.lookupKind.runtimeLookupKind == CORINFO_LOOKUP_NOT_SUPPORTED)
{
// Runtime does not support inlining of all shapes of runtime lookups
// Inlining has to be aborted in such a case
compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_COMPLEX_HANDLE);
return TYP_UNDEF;
}
GenTree* stubAddr = impRuntimeLookupToTree(pResolvedToken, &callInfo->stubLookup, methHnd);
assert(!compDonotInline());
// This is the rough code to set up an indirect stub call
assert(stubAddr != nullptr);
// The stubAddr may be a
// complex expression. As it is evaluated after the args,
// it may cause registered args to be spilled. Simply spill it.
unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall with runtime lookup"));
impAssignTempGen(lclNum, stubAddr, (unsigned)CHECK_SPILL_NONE);
stubAddr = gtNewLclvNode(lclNum, TYP_I_IMPL);
// Create the actual call node
assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
(sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
call = gtNewIndCallNode(stubAddr, callRetTyp, nullptr);
call->gtFlags |= GTF_EXCEPT | (stubAddr->gtFlags & GTF_GLOB_EFFECT);
call->gtFlags |= GTF_CALL_VIRT_STUB;
#ifdef TARGET_X86
// No tailcalls allowed for these yet...
canTailCall = false;
szCanTailCallFailReason = "VirtualCall with runtime lookup";
#endif
}
else
{
// The stub address is known at compile time
call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, di);
call->AsCall()->gtStubCallStubAddr = callInfo->stubLookup.constLookup.addr;
call->gtFlags |= GTF_CALL_VIRT_STUB;
assert(callInfo->stubLookup.constLookup.accessType != IAT_PPVALUE &&
callInfo->stubLookup.constLookup.accessType != IAT_RELPVALUE);
if (callInfo->stubLookup.constLookup.accessType == IAT_PVALUE)
{
call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_VIRTSTUB_REL_INDIRECT;
}
}
#ifdef FEATURE_READYTORUN
if (opts.IsReadyToRun())
{
// Null check is sometimes needed for ready to run to handle
// non-virtual <-> virtual changes between versions
if (callInfo->nullInstanceCheck)
{
call->gtFlags |= GTF_CALL_NULLCHECK;
}
}
#endif
break;
}
case CORINFO_VIRTUALCALL_VTABLE:
{
assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, di);
call->gtFlags |= GTF_CALL_VIRT_VTABLE;
// Should we expand virtual call targets early for this method?
//
if (opts.compExpandCallsEarly)
{
// Mark this method to expand the virtual call target early in fgMorpgCall
call->AsCall()->SetExpandedEarly();
}
break;
}
case CORINFO_VIRTUALCALL_LDVIRTFTN:
{
if (compIsForInlining())
{
compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_CALL_VIA_LDVIRTFTN);
return TYP_UNDEF;
}
assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
// OK, We've been told to call via LDVIRTFTN, so just
// take the call now....
GenTreeCall::Use* args = impPopCallArgs(sig->numArgs, sig);
GenTree* thisPtr = impPopStack().val;
thisPtr = impTransformThis(thisPtr, pConstrainedResolvedToken, callInfo->thisTransform);
assert(thisPtr != nullptr);
// Clone the (possibly transformed) "this" pointer
GenTree* thisPtrCopy;
thisPtr = impCloneExpr(thisPtr, &thisPtrCopy, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("LDVIRTFTN this pointer"));
GenTree* fptr = impImportLdvirtftn(thisPtr, pResolvedToken, callInfo);
assert(fptr != nullptr);
thisPtr = nullptr; // can't reuse it
// Now make an indirect call through the function pointer
unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall through function pointer"));
impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
// Create the actual call node
call = gtNewIndCallNode(fptr, callRetTyp, args, di);
call->AsCall()->gtCallThisArg = gtNewCallArgs(thisPtrCopy);
call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
if ((sig->sigInst.methInstCount != 0) && IsTargetAbi(CORINFO_CORERT_ABI))
{
// CoreRT generic virtual method: need to handle potential fat function pointers
addFatPointerCandidate(call->AsCall());
}
#ifdef FEATURE_READYTORUN
if (opts.IsReadyToRun())
{
// Null check is needed for ready to run to handle
// non-virtual <-> virtual changes between versions
call->gtFlags |= GTF_CALL_NULLCHECK;
}
#endif
// Sine we are jumping over some code, check that its OK to skip that code
assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
(sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
goto DONE;
}
case CORINFO_CALL:
{
// This is for a non-virtual, non-interface etc. call
call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, di);
// We remove the nullcheck for the GetType call intrinsic.
// TODO-CQ: JIT64 does not introduce the null check for many more helper calls
// and intrinsics.
if (callInfo->nullInstanceCheck &&
!((mflags & CORINFO_FLG_INTRINSIC) != 0 && (ni == NI_System_Object_GetType)))
{
call->gtFlags |= GTF_CALL_NULLCHECK;
}
#ifdef FEATURE_READYTORUN
if (opts.IsReadyToRun())
{
call->AsCall()->setEntryPoint(callInfo->codePointerLookup.constLookup);
}
#endif
break;
}
case CORINFO_CALL_CODE_POINTER:
{
// The EE has asked us to call by computing a code pointer and then doing an
// indirect call. This is because a runtime lookup is required to get the code entry point.
// These calls always follow a uniform calling convention, i.e. no extra hidden params
assert((sig->callConv & CORINFO_CALLCONV_PARAMTYPE) == 0);
assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG);
assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
GenTree* fptr =
impLookupToTree(pResolvedToken, &callInfo->codePointerLookup, GTF_ICON_FTN_ADDR, callInfo->hMethod);
if (compDonotInline())
{
return TYP_UNDEF;
}
// Now make an indirect call through the function pointer
unsigned lclNum = lvaGrabTemp(true DEBUGARG("Indirect call through function pointer"));
impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
call = gtNewIndCallNode(fptr, callRetTyp, nullptr, di);
call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
if (callInfo->nullInstanceCheck)
{
call->gtFlags |= GTF_CALL_NULLCHECK;
}
break;
}
default:
assert(!"unknown call kind");
break;
}
//-------------------------------------------------------------------------
// Set more flags
PREFIX_ASSUME(call != nullptr);
if (mflags & CORINFO_FLG_NOGCCHECK)
{
call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_NOGCCHECK;
}
// Mark call if it's one of the ones we will maybe treat as an intrinsic
if (isSpecialIntrinsic)
{
call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_SPECIAL_INTRINSIC;
}
}
assert(sig);
assert(clsHnd || (opcode == CEE_CALLI)); // We're never verifying for CALLI, so this is not set.
/* Some sanity checks */
// CALL_VIRT and NEWOBJ must have a THIS pointer
assert((opcode != CEE_CALLVIRT && opcode != CEE_NEWOBJ) || (sig->callConv & CORINFO_CALLCONV_HASTHIS));
// static bit and hasThis are negations of one another
assert(((mflags & CORINFO_FLG_STATIC) != 0) == ((sig->callConv & CORINFO_CALLCONV_HASTHIS) == 0));
assert(call != nullptr);
/*-------------------------------------------------------------------------
* Check special-cases etc
*/
/* Special case - Check if it is a call to Delegate.Invoke(). */
if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
{
assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
assert(mflags & CORINFO_FLG_FINAL);
/* Set the delegate flag */
call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_DELEGATE_INV;
if (callInfo->wrapperDelegateInvoke)
{
call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_WRAPPER_DELEGATE_INV;
}
if (opcode == CEE_CALLVIRT)
{
assert(mflags & CORINFO_FLG_FINAL);
/* It should have the GTF_CALL_NULLCHECK flag set. Reset it */
assert(call->gtFlags & GTF_CALL_NULLCHECK);
call->gtFlags &= ~GTF_CALL_NULLCHECK;
}
}
CORINFO_CLASS_HANDLE actualMethodRetTypeSigClass;
actualMethodRetTypeSigClass = sig->retTypeSigClass;
/* Check for varargs */
if (!compFeatureVarArg() && ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
(sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG))
{
BADCODE("Varargs not supported.");
}
if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
(sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
{
assert(!compIsForInlining());
/* Set the right flags */
call->gtFlags |= GTF_CALL_POP_ARGS;
call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_VARARGS;
/* Can't allow tailcall for varargs as it is caller-pop. The caller
will be expecting to pop a certain number of arguments, but if we
tailcall to a function with a different number of arguments, we
are hosed. There are ways around this (caller remembers esp value,
varargs is not caller-pop, etc), but not worth it. */
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_X86
if (canTailCall)
{
canTailCall = false;
szCanTailCallFailReason = "Callee is varargs";
}
#endif
/* Get the total number of arguments - this is already correct
* for CALLI - for methods we have to get it from the call site */
if (opcode != CEE_CALLI)
{
#ifdef DEBUG
unsigned numArgsDef = sig->numArgs;
#endif
eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
// For vararg calls we must be sure to load the return type of the
// method actually being called, as well as the return types of the
// specified in the vararg signature. With type equivalency, these types
// may not be the same.
if (sig->retTypeSigClass != actualMethodRetTypeSigClass)
{
if (actualMethodRetTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR &&
sig->retType != CORINFO_TYPE_VAR)
{
// Make sure that all valuetypes (including enums) that we push are loaded.
// This is to guarantee that if a GC is triggerred from the prestub of this methods,
// all valuetypes in the method signature are already loaded.
// We need to be able to find the size of the valuetypes, but we cannot
// do a class-load from within GC.
info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(actualMethodRetTypeSigClass);
}
}
assert(numArgsDef <= sig->numArgs);
}
/* We will have "cookie" as the last argument but we cannot push
* it on the operand stack because we may overflow, so we append it
* to the arg list next after we pop them */
}
//--------------------------- Inline NDirect ------------------------------
// For inline cases we technically should look at both the current
// block and the call site block (or just the latter if we've
// fused the EH trees). However the block-related checks pertain to
// EH and we currently won't inline a method with EH. So for
// inlinees, just checking the call site block is sufficient.
{
// New lexical block here to avoid compilation errors because of GOTOs.
BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB;
impCheckForPInvokeCall(call->AsCall(), methHnd, sig, mflags, block);
}
#ifdef UNIX_X86_ABI
// On Unix x86 we use caller-cleaned convention.
if ((call->gtFlags & GTF_CALL_UNMANAGED) == 0)
call->gtFlags |= GTF_CALL_POP_ARGS;
#endif // UNIX_X86_ABI
if (call->gtFlags & GTF_CALL_UNMANAGED)
{
// We set up the unmanaged call by linking the frame, disabling GC, etc
// This needs to be cleaned up on return.
// In addition, native calls have different normalization rules than managed code
// (managed calling convention always widens return values in the callee)
if (canTailCall)
{
canTailCall = false;
szCanTailCallFailReason = "Callee is native";
}
checkForSmallType = true;
impPopArgsForUnmanagedCall(call, sig);
goto DONE;
}
else if ((opcode == CEE_CALLI) && ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT) &&
((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG))
{
if (!info.compCompHnd->canGetCookieForPInvokeCalliSig(sig))
{
// Normally this only happens with inlining.
// However, a generic method (or type) being NGENd into another module
// can run into this issue as well. There's not an easy fall-back for NGEN
// so instead we fallback to JIT.
if (compIsForInlining())
{
compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_PINVOKE_COOKIE);
}
else
{
IMPL_LIMITATION("Can't get PInvoke cookie (cross module generics)");
}
return TYP_UNDEF;
}
GenTree* cookie = eeGetPInvokeCookie(sig);
// This cookie is required to be either a simple GT_CNS_INT or
// an indirection of a GT_CNS_INT
//
GenTree* cookieConst = cookie;
if (cookie->gtOper == GT_IND)
{
cookieConst = cookie->AsOp()->gtOp1;
}
assert(cookieConst->gtOper == GT_CNS_INT);
// Setting GTF_DONT_CSE on the GT_CNS_INT as well as on the GT_IND (if it exists) will ensure that
// we won't allow this tree to participate in any CSE logic
//
cookie->gtFlags |= GTF_DONT_CSE;
cookieConst->gtFlags |= GTF_DONT_CSE;
call->AsCall()->gtCallCookie = cookie;
if (canTailCall)
{
canTailCall = false;
szCanTailCallFailReason = "PInvoke calli";
}
}
/*-------------------------------------------------------------------------
* Create the argument list
*/
//-------------------------------------------------------------------------
// Special case - for varargs we have an implicit last argument
if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
{
assert(!compIsForInlining());
void *varCookie, *pVarCookie;
if (!info.compCompHnd->canGetVarArgsHandle(sig))
{
compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_VARARGS_COOKIE);
return TYP_UNDEF;
}
varCookie = info.compCompHnd->getVarArgsHandle(sig, &pVarCookie);
assert((!varCookie) != (!pVarCookie));
GenTree* cookie = gtNewIconEmbHndNode(varCookie, pVarCookie, GTF_ICON_VARG_HDL, sig);
assert(extraArg == nullptr);
extraArg = gtNewCallArgs(cookie);
}
//-------------------------------------------------------------------------
// Extra arg for shared generic code and array methods
//
// Extra argument containing instantiation information is passed in the
// following circumstances:
// (a) To the "Address" method on array classes; the extra parameter is
// the array's type handle (a TypeDesc)
// (b) To shared-code instance methods in generic structs; the extra parameter
// is the struct's type handle (a vtable ptr)
// (c) To shared-code per-instantiation non-generic static methods in generic
// classes and structs; the extra parameter is the type handle
// (d) To shared-code generic methods; the extra parameter is an
// exact-instantiation MethodDesc
//
// We also set the exact type context associated with the call so we can
// inline the call correctly later on.
if (sig->callConv & CORINFO_CALLCONV_PARAMTYPE)
{
assert(call->AsCall()->gtCallType == CT_USER_FUNC);
if (clsHnd == nullptr)
{
NO_WAY("CALLI on parameterized type");
}
assert(opcode != CEE_CALLI);
GenTree* instParam;
bool runtimeLookup;
// Instantiated generic method
if (((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_METHOD)
{
assert(exactContextHnd != METHOD_BEING_COMPILED_CONTEXT());
CORINFO_METHOD_HANDLE exactMethodHandle =
(CORINFO_METHOD_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
if (!exactContextNeedsRuntimeLookup)
{
#ifdef FEATURE_READYTORUN
if (opts.IsReadyToRun())
{
instParam =
impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_METHOD_HDL, exactMethodHandle);
if (instParam == nullptr)
{
assert(compDonotInline());
return TYP_UNDEF;
}
}
else
#endif
{
instParam = gtNewIconEmbMethHndNode(exactMethodHandle);
info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(exactMethodHandle);
}
}
else
{
instParam = impTokenToHandle(pResolvedToken, &runtimeLookup, true /*mustRestoreHandle*/);
if (instParam == nullptr)
{
assert(compDonotInline());
return TYP_UNDEF;
}
}
}
// otherwise must be an instance method in a generic struct,
// a static method in a generic type, or a runtime-generated array method
else
{
assert(((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS);
CORINFO_CLASS_HANDLE exactClassHandle = eeGetClassFromContext(exactContextHnd);
if (compIsForInlining() && (clsFlags & CORINFO_FLG_ARRAY) != 0)
{
compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_ARRAY_METHOD);
return TYP_UNDEF;
}
if ((clsFlags & CORINFO_FLG_ARRAY) && isReadonlyCall)
{
// We indicate "readonly" to the Address operation by using a null
// instParam.
instParam = gtNewIconNode(0, TYP_REF);
}
else if (!exactContextNeedsRuntimeLookup)
{
#ifdef FEATURE_READYTORUN
if (opts.IsReadyToRun())
{
instParam =
impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_CLASS_HDL, exactClassHandle);
if (instParam == nullptr)
{
assert(compDonotInline());
return TYP_UNDEF;
}
}
else
#endif
{
instParam = gtNewIconEmbClsHndNode(exactClassHandle);
info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(exactClassHandle);
}
}
else
{
instParam = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup, true /*mustRestoreHandle*/);
if (instParam == nullptr)
{
assert(compDonotInline());
return TYP_UNDEF;
}
}
}
assert(extraArg == nullptr);
extraArg = gtNewCallArgs(instParam);
}
if ((opcode == CEE_NEWOBJ) && ((clsFlags & CORINFO_FLG_DELEGATE) != 0))
{
// Only verifiable cases are supported.
// dup; ldvirtftn; newobj; or ldftn; newobj.
// IL test could contain unverifiable sequence, in this case optimization should not be done.
if (impStackHeight() > 0)
{
typeInfo delegateTypeInfo = impStackTop().seTypeInfo;
if (delegateTypeInfo.IsMethod())
{
ldftnInfo = delegateTypeInfo.GetMethodPointerInfo();
}
}
}
//-------------------------------------------------------------------------
// The main group of arguments
args = impPopCallArgs(sig->numArgs, sig, extraArg);
call->AsCall()->gtCallArgs = args;
for (GenTreeCall::Use& use : call->AsCall()->Args())
{
call->gtFlags |= use.GetNode()->gtFlags & GTF_GLOB_EFFECT;
}
//-------------------------------------------------------------------------
// The "this" pointer
if (((mflags & CORINFO_FLG_STATIC) == 0) && ((sig->callConv & CORINFO_CALLCONV_EXPLICITTHIS) == 0) &&
!((opcode == CEE_NEWOBJ) && (newobjThis == nullptr)))
{
GenTree* obj;
if (opcode == CEE_NEWOBJ)
{
obj = newobjThis;
}
else
{
obj = impPopStack().val;
obj = impTransformThis(obj, pConstrainedResolvedToken, constraintCallThisTransform);
if (compDonotInline())
{
return TYP_UNDEF;
}
}
// Store the "this" value in the call
call->gtFlags |= obj->gtFlags & GTF_GLOB_EFFECT;
call->AsCall()->gtCallThisArg = gtNewCallArgs(obj);
// Is this a virtual or interface call?
if (call->AsCall()->IsVirtual())
{
// only true object pointers can be virtual
assert(obj->gtType == TYP_REF);
// See if we can devirtualize.
const bool isExplicitTailCall = (tailCallFlags & PREFIX_TAILCALL_EXPLICIT) != 0;
const bool isLateDevirtualization = false;
impDevirtualizeCall(call->AsCall(), pResolvedToken, &callInfo->hMethod, &callInfo->methodFlags,
&callInfo->contextHandle, &exactContextHnd, isLateDevirtualization, isExplicitTailCall,
// Take care to pass raw IL offset here as the 'debug info' might be different for
// inlinees.
rawILOffset);
// Devirtualization may change which method gets invoked. Update our local cache.
//
methHnd = callInfo->hMethod;
}
if (impIsThis(obj))
{
call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_NONVIRT_SAME_THIS;
}
}
//-------------------------------------------------------------------------
// The "this" pointer for "newobj"
if (opcode == CEE_NEWOBJ)
{
if (clsFlags & CORINFO_FLG_VAROBJSIZE)
{
assert(!(clsFlags & CORINFO_FLG_ARRAY)); // arrays handled separately
// This is a 'new' of a variable sized object, wher
// the constructor is to return the object. In this case
// the constructor claims to return VOID but we know it
// actually returns the new object
assert(callRetTyp == TYP_VOID);
callRetTyp = TYP_REF;
call->gtType = TYP_REF;
impSpillSpecialSideEff();
impPushOnStack(call, typeInfo(TI_REF, clsHnd));
}
else
{
if (clsFlags & CORINFO_FLG_DELEGATE)
{
// New inliner morph it in impImportCall.
// This will allow us to inline the call to the delegate constructor.
call = fgOptimizeDelegateConstructor(call->AsCall(), &exactContextHnd, ldftnInfo);
}
if (!bIntrinsicImported)
{
#if defined(DEBUG) || defined(INLINE_DATA)
// Keep track of the raw IL offset of the call
call->AsCall()->gtRawILOffset = rawILOffset;
#endif // defined(DEBUG) || defined(INLINE_DATA)
// Is it an inline candidate?
impMarkInlineCandidate(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo);
}
// append the call node.
impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtDI);
// Now push the value of the 'new onto the stack
// This is a 'new' of a non-variable sized object.
// Append the new node (op1) to the statement list,
// and then push the local holding the value of this
// new instruction on the stack.
if (clsFlags & CORINFO_FLG_VALUECLASS)
{
assert(newobjThis->gtOper == GT_ADDR && newobjThis->AsOp()->gtOp1->gtOper == GT_LCL_VAR);
unsigned tmp = newobjThis->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum();
impPushOnStack(gtNewLclvNode(tmp, lvaGetRealType(tmp)), verMakeTypeInfo(clsHnd).NormaliseForStack());
}
else
{
if (newobjThis->gtOper == GT_COMMA)
{
// We must have inserted the callout. Get the real newobj.
newobjThis = newobjThis->AsOp()->gtOp2;
}
assert(newobjThis->gtOper == GT_LCL_VAR);
impPushOnStack(gtNewLclvNode(newobjThis->AsLclVarCommon()->GetLclNum(), TYP_REF),
typeInfo(TI_REF, clsHnd));
}
}
return callRetTyp;
}
DONE:
#ifdef DEBUG
// In debug we want to be able to register callsites with the EE.
assert(call->AsCall()->callSig == nullptr);
call->AsCall()->callSig = new (this, CMK_Generic) CORINFO_SIG_INFO;
*call->AsCall()->callSig = *sig;
#endif
// Final importer checks for calls flagged as tail calls.
//
if (tailCallFlags != 0)
{
const bool isExplicitTailCall = (tailCallFlags & PREFIX_TAILCALL_EXPLICIT) != 0;
const bool isImplicitTailCall = (tailCallFlags & PREFIX_TAILCALL_IMPLICIT) != 0;
const bool isStressTailCall = (tailCallFlags & PREFIX_TAILCALL_STRESS) != 0;
// Exactly one of these should be true.
assert(isExplicitTailCall != isImplicitTailCall);
// This check cannot be performed for implicit tail calls for the reason
// that impIsImplicitTailCallCandidate() is not checking whether return
// types are compatible before marking a call node with PREFIX_TAILCALL_IMPLICIT.
// As a result it is possible that in the following case, we find that
// the type stack is non-empty if Callee() is considered for implicit
// tail calling.
// int Caller(..) { .... void Callee(); ret val; ... }
//
// Note that we cannot check return type compatibility before ImpImportCall()
// as we don't have required info or need to duplicate some of the logic of
// ImpImportCall().
//
// For implicit tail calls, we perform this check after return types are
// known to be compatible.
if (isExplicitTailCall && (verCurrentState.esStackDepth != 0))
{
BADCODE("Stack should be empty after tailcall");
}
// For opportunistic tailcalls we allow implicit widening, i.e. tailcalls from int32 -> int16, since the
// managed calling convention dictates that the callee widens the value. For explicit tailcalls we don't
// want to require this detail of the calling convention to bubble up to the tailcall helpers
bool allowWidening = isImplicitTailCall;
if (canTailCall &&
!impTailCallRetTypeCompatible(allowWidening, info.compRetType, info.compMethodInfo->args.retTypeClass,
info.compCallConv, callRetTyp, sig->retTypeClass,
call->AsCall()->GetUnmanagedCallConv()))
{
canTailCall = false;
szCanTailCallFailReason = "Return types are not tail call compatible";
}
// Stack empty check for implicit tail calls.
if (canTailCall && isImplicitTailCall && (verCurrentState.esStackDepth != 0))
{
#ifdef TARGET_AMD64
// JIT64 Compatibility: Opportunistic tail call stack mismatch throws a VerificationException
// in JIT64, not an InvalidProgramException.
Verify(false, "Stack should be empty after tailcall");
#else // TARGET_64BIT
BADCODE("Stack should be empty after tailcall");
#endif //! TARGET_64BIT
}
// assert(compCurBB is not a catch, finally or filter block);
// assert(compCurBB is not a try block protected by a finally block);
assert(!isExplicitTailCall || compCurBB->bbJumpKind == BBJ_RETURN);
// Ask VM for permission to tailcall
if (canTailCall)
{
// True virtual or indirect calls, shouldn't pass in a callee handle.
CORINFO_METHOD_HANDLE exactCalleeHnd =
((call->AsCall()->gtCallType != CT_USER_FUNC) || call->AsCall()->IsVirtual()) ? nullptr : methHnd;
if (info.compCompHnd->canTailCall(info.compMethodHnd, methHnd, exactCalleeHnd, isExplicitTailCall))
{
if (isExplicitTailCall)
{
// In case of explicit tail calls, mark it so that it is not considered
// for in-lining.
call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_EXPLICIT_TAILCALL;
JITDUMP("\nGTF_CALL_M_EXPLICIT_TAILCALL set for call [%06u]\n", dspTreeID(call));
if (isStressTailCall)
{
call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_STRESS_TAILCALL;
JITDUMP("\nGTF_CALL_M_STRESS_TAILCALL set for call [%06u]\n", dspTreeID(call));
}
}
else
{
#if FEATURE_TAILCALL_OPT
// Must be an implicit tail call.
assert(isImplicitTailCall);
// It is possible that a call node is both an inline candidate and marked
// for opportunistic tail calling. In-lining happens before morhphing of
// trees. If in-lining of an in-line candidate gets aborted for whatever
// reason, it will survive to the morphing stage at which point it will be
// transformed into a tail call after performing additional checks.
call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_IMPLICIT_TAILCALL;
JITDUMP("\nGTF_CALL_M_IMPLICIT_TAILCALL set for call [%06u]\n", dspTreeID(call));
#else //! FEATURE_TAILCALL_OPT
NYI("Implicit tail call prefix on a target which doesn't support opportunistic tail calls");
#endif // FEATURE_TAILCALL_OPT
}
// This might or might not turn into a tailcall. We do more
// checks in morph. For explicit tailcalls we need more
// information in morph in case it turns out to be a
// helper-based tailcall.
if (isExplicitTailCall)
{
assert(call->AsCall()->tailCallInfo == nullptr);
call->AsCall()->tailCallInfo = new (this, CMK_CorTailCallInfo) TailCallSiteInfo;
switch (opcode)
{
case CEE_CALLI:
call->AsCall()->tailCallInfo->SetCalli(sig);
break;
case CEE_CALLVIRT:
call->AsCall()->tailCallInfo->SetCallvirt(sig, pResolvedToken);
break;
default:
call->AsCall()->tailCallInfo->SetCall(sig, pResolvedToken);
break;
}
}
}
else
{
// canTailCall reported its reasons already
canTailCall = false;
JITDUMP("\ninfo.compCompHnd->canTailCall returned false for call [%06u]\n", dspTreeID(call));
}
}
else
{
// If this assert fires it means that canTailCall was set to false without setting a reason!
assert(szCanTailCallFailReason != nullptr);
JITDUMP("\nRejecting %splicit tail call for [%06u], reason: '%s'\n", isExplicitTailCall ? "ex" : "im",
dspTreeID(call), szCanTailCallFailReason);
info.compCompHnd->reportTailCallDecision(info.compMethodHnd, methHnd, isExplicitTailCall, TAILCALL_FAIL,
szCanTailCallFailReason);
}
}
// Note: we assume that small return types are already normalized by the managed callee
// or by the pinvoke stub for calls to unmanaged code.
if (!bIntrinsicImported)
{
//
// Things needed to be checked when bIntrinsicImported is false.
//
assert(call->gtOper == GT_CALL);
assert(callInfo != nullptr);
if (compIsForInlining() && opcode == CEE_CALLVIRT)
{
GenTree* callObj = call->AsCall()->gtCallThisArg->GetNode();
if ((call->AsCall()->IsVirtual() || (call->gtFlags & GTF_CALL_NULLCHECK)) &&
impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, call->AsCall()->gtCallArgs, callObj,
impInlineInfo->inlArgInfo))
{
impInlineInfo->thisDereferencedFirst = true;
}
}
#if defined(DEBUG) || defined(INLINE_DATA)
// Keep track of the raw IL offset of the call
call->AsCall()->gtRawILOffset = rawILOffset;
#endif // defined(DEBUG) || defined(INLINE_DATA)
// Is it an inline candidate?
impMarkInlineCandidate(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo);
}
// Extra checks for tail calls and tail recursion.
//
// A tail recursive call is a potential loop from the current block to the start of the root method.
// If we see a tail recursive call, mark the blocks from the call site back to the entry as potentially
// being in a loop.
//
// Note: if we're importing an inlinee we don't mark the right set of blocks, but by then it's too
// late. Currently this doesn't lead to problems. See GitHub issue 33529.
//
// OSR also needs to handle tail calls specially:
// * block profiling in OSR methods needs to ensure probes happen before tail calls, not after.
// * the root method entry must be imported if there's a recursive tail call or a potentially
// inlineable tail call.
//
if ((tailCallFlags != 0) && canTailCall)
{
if (gtIsRecursiveCall(methHnd))
{
assert(verCurrentState.esStackDepth == 0);
BasicBlock* loopHead = nullptr;
if (!compIsForInlining() && opts.IsOSR())
{
// For root method OSR we may branch back to the actual method entry,
// which is not fgFirstBB, and which we will need to import.
assert(fgEntryBB != nullptr);
loopHead = fgEntryBB;
}
else
{
// For normal jitting we may branch back to the firstBB; this
// should already be imported.
loopHead = fgFirstBB;
}
JITDUMP("\nTail recursive call [%06u] in the method. Mark " FMT_BB " to " FMT_BB
" as having a backward branch.\n",
dspTreeID(call), loopHead->bbNum, compCurBB->bbNum);
fgMarkBackwardJump(loopHead, compCurBB);
}
// We only do these OSR checks in the root method because:
// * If we fail to import the root method entry when importing the root method, we can't go back
// and import it during inlining. So instead of checking jsut for recursive tail calls we also
// have to check for anything that might introduce a recursive tail call.
// * We only instrument root method blocks in OSR methods,
//
if (opts.IsOSR() && !compIsForInlining())
{
// If a root method tail call candidate block is not a BBJ_RETURN, it should have a unique
// BBJ_RETURN successor. Mark that successor so we can handle it specially during profile
// instrumentation.
//
if (compCurBB->bbJumpKind != BBJ_RETURN)
{
BasicBlock* const successor = compCurBB->GetUniqueSucc();
assert(successor->bbJumpKind == BBJ_RETURN);
successor->bbFlags |= BBF_TAILCALL_SUCCESSOR;
optMethodFlags |= OMF_HAS_TAILCALL_SUCCESSOR;
}
// If this call might eventually turn into a loop back to method entry, make sure we
// import the method entry.
//
assert(call->IsCall());
GenTreeCall* const actualCall = call->AsCall();
const bool mustImportEntryBlock = gtIsRecursiveCall(methHnd) || actualCall->IsInlineCandidate() ||
actualCall->IsGuardedDevirtualizationCandidate();
// Only schedule importation if we're not currently importing.
//
if (mustImportEntryBlock && (compCurBB != fgEntryBB))
{
JITDUMP("\nOSR: inlineable or recursive tail call [%06u] in the method, so scheduling " FMT_BB
" for importation\n",
dspTreeID(call), fgEntryBB->bbNum);
impImportBlockPending(fgEntryBB);
}
}
}
if ((sig->flags & CORINFO_SIGFLAG_FAT_CALL) != 0)
{
assert(opcode == CEE_CALLI || callInfo->kind == CORINFO_CALL_CODE_POINTER);
addFatPointerCandidate(call->AsCall());
}
DONE_CALL:
// Push or append the result of the call
if (callRetTyp == TYP_VOID)
{
if (opcode == CEE_NEWOBJ)
{
// we actually did push something, so don't spill the thing we just pushed.
assert(verCurrentState.esStackDepth > 0);
impAppendTree(call, verCurrentState.esStackDepth - 1, impCurStmtDI);
}
else
{
impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtDI);
}
}
else
{
impSpillSpecialSideEff();
if (clsFlags & CORINFO_FLG_ARRAY)
{
eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
}
typeInfo tiRetVal = verMakeTypeInfo(sig->retType, sig->retTypeClass);
tiRetVal.NormaliseForStack();
// The CEE_READONLY prefix modifies the verification semantics of an Address
// operation on an array type.
if ((clsFlags & CORINFO_FLG_ARRAY) && isReadonlyCall && tiRetVal.IsByRef())
{
tiRetVal.SetIsReadonlyByRef();
}
if (call->IsCall())
{
// Sometimes "call" is not a GT_CALL (if we imported an intrinsic that didn't turn into a call)
GenTreeCall* origCall = call->AsCall();
const bool isFatPointerCandidate = origCall->IsFatPointerCandidate();
const bool isInlineCandidate = origCall->IsInlineCandidate();
const bool isGuardedDevirtualizationCandidate = origCall->IsGuardedDevirtualizationCandidate();
if (varTypeIsStruct(callRetTyp))
{
// Need to treat all "split tree" cases here, not just inline candidates
call = impFixupCallStructReturn(call->AsCall(), sig->retTypeClass);
}
// TODO: consider handling fatcalli cases this way too...?
if (isInlineCandidate || isGuardedDevirtualizationCandidate)
{
// We should not have made any adjustments in impFixupCallStructReturn
// as we defer those until we know the fate of the call.
assert(call == origCall);
assert(opts.OptEnabled(CLFLG_INLINING));
assert(!isFatPointerCandidate); // We should not try to inline calli.
// Make the call its own tree (spill the stack if needed).
// Do not consume the debug info here. This is particularly
// important if we give up on the inline, in which case the
// call will typically end up in the statement that contains
// the GT_RET_EXPR that we leave on the stack.
impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtDI, false);
// TODO: Still using the widened type.
GenTree* retExpr = gtNewInlineCandidateReturnExpr(call, genActualType(callRetTyp), compCurBB->bbFlags);
// Link the retExpr to the call so if necessary we can manipulate it later.
origCall->gtInlineCandidateInfo->retExpr = retExpr;
// Propagate retExpr as the placeholder for the call.
call = retExpr;
}
else
{
// If the call is virtual, and has a generics context, and is not going to have a class probe,
// record the context for possible use during late devirt.
//
// If we ever want to devirt at Tier0, and/or see issues where OSR methods under PGO lose
// important devirtualizations, we'll want to allow both a class probe and a captured context.
//
if (origCall->IsVirtual() && (origCall->gtCallType != CT_INDIRECT) && (exactContextHnd != nullptr) &&
(origCall->gtClassProfileCandidateInfo == nullptr))
{
JITDUMP("\nSaving context %p for call [%06u]\n", exactContextHnd, dspTreeID(origCall));
origCall->gtCallMoreFlags |= GTF_CALL_M_LATE_DEVIRT;
LateDevirtualizationInfo* const info = new (this, CMK_Inlining) LateDevirtualizationInfo;
info->exactContextHnd = exactContextHnd;
origCall->gtLateDevirtualizationInfo = info;
}
if (isFatPointerCandidate)
{
// fatPointer candidates should be in statements of the form call() or var = call().
// Such form allows to find statements with fat calls without walking through whole trees
// and removes problems with cutting trees.
assert(!bIntrinsicImported);
assert(IsTargetAbi(CORINFO_CORERT_ABI));
if (call->OperGet() != GT_LCL_VAR) // can be already converted by impFixupCallStructReturn.
{
unsigned calliSlot = lvaGrabTemp(true DEBUGARG("calli"));
LclVarDsc* varDsc = lvaGetDesc(calliSlot);
varDsc->lvVerTypeInfo = tiRetVal;
impAssignTempGen(calliSlot, call, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_NONE);
// impAssignTempGen can change src arg list and return type for call that returns struct.
var_types type = genActualType(lvaTable[calliSlot].TypeGet());
call = gtNewLclvNode(calliSlot, type);
}
}
// For non-candidates we must also spill, since we
// might have locals live on the eval stack that this
// call can modify.
//
// Suppress this for certain well-known call targets
// that we know won't modify locals, eg calls that are
// recognized in gtCanOptimizeTypeEquality. Otherwise
// we may break key fragile pattern matches later on.
bool spillStack = true;
if (call->IsCall())
{
GenTreeCall* callNode = call->AsCall();
if ((callNode->gtCallType == CT_HELPER) && (gtIsTypeHandleToRuntimeTypeHelper(callNode) ||
gtIsTypeHandleToRuntimeTypeHandleHelper(callNode)))
{
spillStack = false;
}
else if ((callNode->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) != 0)
{
spillStack = false;
}
}
if (spillStack)
{
impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("non-inline candidate call"));
}
}
}
if (!bIntrinsicImported)
{
//-------------------------------------------------------------------------
//
/* If the call is of a small type and the callee is managed, the callee will normalize the result
before returning.
However, we need to normalize small type values returned by unmanaged
functions (pinvoke). The pinvoke stub does the normalization, but we need to do it here
if we use the shorter inlined pinvoke stub. */
if (checkForSmallType && varTypeIsIntegral(callRetTyp) && genTypeSize(callRetTyp) < genTypeSize(TYP_INT))
{
call = gtNewCastNode(genActualType(callRetTyp), call, false, callRetTyp);
}
}
impPushOnStack(call, tiRetVal);
}
// VSD functions get a new call target each time we getCallInfo, so clear the cache.
// Also, the call info cache for CALLI instructions is largely incomplete, so clear it out.
// if ( (opcode == CEE_CALLI) || (callInfoCache.fetchCallInfo().kind == CORINFO_VIRTUALCALL_STUB))
// callInfoCache.uncacheCallInfo();
return callRetTyp;
}
#ifdef _PREFAST_
#pragma warning(pop)
#endif
bool Compiler::impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo, CorInfoCallConvExtension callConv)
{
CorInfoType corType = methInfo->args.retType;
if ((corType == CORINFO_TYPE_VALUECLASS) || (corType == CORINFO_TYPE_REFANY))
{
// We have some kind of STRUCT being returned
structPassingKind howToReturnStruct = SPK_Unknown;
var_types returnType = getReturnTypeForStruct(methInfo->args.retTypeClass, callConv, &howToReturnStruct);
if (howToReturnStruct == SPK_ByReference)
{
return true;
}
}
return false;
}
#ifdef DEBUG
//
var_types Compiler::impImportJitTestLabelMark(int numArgs)
{
TestLabelAndNum tlAndN;
if (numArgs == 2)
{
tlAndN.m_num = 0;
StackEntry se = impPopStack();
assert(se.seTypeInfo.GetType() == TI_INT);
GenTree* val = se.val;
assert(val->IsCnsIntOrI());
tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
}
else if (numArgs == 3)
{
StackEntry se = impPopStack();
assert(se.seTypeInfo.GetType() == TI_INT);
GenTree* val = se.val;
assert(val->IsCnsIntOrI());
tlAndN.m_num = val->AsIntConCommon()->IconValue();
se = impPopStack();
assert(se.seTypeInfo.GetType() == TI_INT);
val = se.val;
assert(val->IsCnsIntOrI());
tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
}
else
{
assert(false);
}
StackEntry expSe = impPopStack();
GenTree* node = expSe.val;
// There are a small number of special cases, where we actually put the annotation on a subnode.
if (tlAndN.m_tl == TL_LoopHoist && tlAndN.m_num >= 100)
{
// A loop hoist annotation with value >= 100 means that the expression should be a static field access,
// a GT_IND of a static field address, which should be the sum of a (hoistable) helper call and possibly some
// offset within the the static field block whose address is returned by the helper call.
// The annotation is saying that this address calculation, but not the entire access, should be hoisted.
assert(node->OperGet() == GT_IND);
tlAndN.m_num -= 100;
GetNodeTestData()->Set(node->AsOp()->gtOp1, tlAndN);
GetNodeTestData()->Remove(node);
}
else
{
GetNodeTestData()->Set(node, tlAndN);
}
impPushOnStack(node, expSe.seTypeInfo);
return node->TypeGet();
}
#endif // DEBUG
//-----------------------------------------------------------------------------------
// impFixupCallStructReturn: For a call node that returns a struct do one of the following:
// - set the flag to indicate struct return via retbuf arg;
// - adjust the return type to a SIMD type if it is returned in 1 reg;
// - spill call result into a temp if it is returned into 2 registers or more and not tail call or inline candidate.
//
// Arguments:
// call - GT_CALL GenTree node
// retClsHnd - Class handle of return type of the call
//
// Return Value:
// Returns new GenTree node after fixing struct return of call node
//
GenTree* Compiler::impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HANDLE retClsHnd)
{
if (!varTypeIsStruct(call))
{
return call;
}
call->gtRetClsHnd = retClsHnd;
#if FEATURE_MULTIREG_RET
call->InitializeStructReturnType(this, retClsHnd, call->GetUnmanagedCallConv());
const ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
const unsigned retRegCount = retTypeDesc->GetReturnRegCount();
#else // !FEATURE_MULTIREG_RET
const unsigned retRegCount = 1;
#endif // !FEATURE_MULTIREG_RET
structPassingKind howToReturnStruct;
var_types returnType = getReturnTypeForStruct(retClsHnd, call->GetUnmanagedCallConv(), &howToReturnStruct);
if (howToReturnStruct == SPK_ByReference)
{
assert(returnType == TYP_UNKNOWN);
call->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
return call;
}
// Recognize SIMD types as we do for LCL_VARs,
// note it could be not the ABI specific type, for example, on x64 we can set 'TYP_SIMD8`
// for `System.Numerics.Vector2` here but lower will change it to long as ABI dictates.
var_types simdReturnType = impNormStructType(call->gtRetClsHnd);
if (simdReturnType != call->TypeGet())
{
assert(varTypeIsSIMD(simdReturnType));
JITDUMP("changing the type of a call [%06u] from %s to %s\n", dspTreeID(call), varTypeName(call->TypeGet()),
varTypeName(simdReturnType));
call->ChangeType(simdReturnType);
}
if (retRegCount == 1)
{
return call;
}
#if FEATURE_MULTIREG_RET
assert(varTypeIsStruct(call)); // It could be a SIMD returned in several regs.
assert(returnType == TYP_STRUCT);
assert((howToReturnStruct == SPK_ByValueAsHfa) || (howToReturnStruct == SPK_ByValue));
#ifdef UNIX_AMD64_ABI
// must be a struct returned in two registers
assert(retRegCount == 2);
#else // not UNIX_AMD64_ABI
assert(retRegCount >= 2);
#endif // not UNIX_AMD64_ABI
if (!call->CanTailCall() && !call->IsInlineCandidate())
{
// Force a call returning multi-reg struct to be always of the IR form
// tmp = call
//
// No need to assign a multi-reg struct to a local var if:
// - It is a tail call or
// - The call is marked for in-lining later
return impAssignMultiRegTypeToVar(call, retClsHnd DEBUGARG(call->GetUnmanagedCallConv()));
}
return call;
#endif // FEATURE_MULTIREG_RET
}
/*****************************************************************************
For struct return values, re-type the operand in the case where the ABI
does not use a struct return buffer
*/
//------------------------------------------------------------------------
// impFixupStructReturnType: For struct return values it sets appropriate flags in MULTIREG returns case;
// in non-multiref case it handles two special helpers: `CORINFO_HELP_GETFIELDSTRUCT`, `CORINFO_HELP_UNBOX_NULLABLE`.
//
// Arguments:
// op - the return value;
// retClsHnd - the struct handle;
// unmgdCallConv - the calling convention of the function that returns this struct.
//
// Return Value:
// the result tree that does the return.
//
GenTree* Compiler::impFixupStructReturnType(GenTree* op,
CORINFO_CLASS_HANDLE retClsHnd,
CorInfoCallConvExtension unmgdCallConv)
{
assert(varTypeIsStruct(info.compRetType));
assert(info.compRetBuffArg == BAD_VAR_NUM);
JITDUMP("\nimpFixupStructReturnType: retyping\n");
DISPTREE(op);
#if defined(TARGET_XARCH)
#if FEATURE_MULTIREG_RET
// No VarArgs for CoreCLR on x64 Unix
UNIX_AMD64_ABI_ONLY(assert(!info.compIsVarArgs));
// Is method returning a multi-reg struct?
if (varTypeIsStruct(info.compRetNativeType) && IsMultiRegReturnedType(retClsHnd, unmgdCallConv))
{
// In case of multi-reg struct return, we force IR to be one of the following:
// GT_RETURN(lclvar) or GT_RETURN(call). If op is anything other than a
// lclvar or call, it is assigned to a temp to create: temp = op and GT_RETURN(tmp).
if (op->gtOper == GT_LCL_VAR)
{
// Note that this is a multi-reg return.
unsigned lclNum = op->AsLclVarCommon()->GetLclNum();
lvaTable[lclNum].lvIsMultiRegRet = true;
// TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
op->gtFlags |= GTF_DONT_CSE;
return op;
}
if (op->gtOper == GT_CALL)
{
return op;
}
return impAssignMultiRegTypeToVar(op, retClsHnd DEBUGARG(unmgdCallConv));
}
#else
assert(info.compRetNativeType != TYP_STRUCT);
#endif // defined(UNIX_AMD64_ABI) || defined(TARGET_X86)
#elif FEATURE_MULTIREG_RET && defined(TARGET_ARM)
if (varTypeIsStruct(info.compRetNativeType) && !info.compIsVarArgs && IsHfa(retClsHnd))
{
if (op->gtOper == GT_LCL_VAR)
{
// This LCL_VAR is an HFA return value, it stays as a TYP_STRUCT
unsigned lclNum = op->AsLclVarCommon()->GetLclNum();
// Make sure this struct type stays as struct so that we can return it as an HFA
lvaTable[lclNum].lvIsMultiRegRet = true;
// TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
op->gtFlags |= GTF_DONT_CSE;
return op;
}
if (op->gtOper == GT_CALL)
{
if (op->AsCall()->IsVarargs())
{
// We cannot tail call because control needs to return to fixup the calling
// convention for result return.
op->AsCall()->gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
op->AsCall()->gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
}
else
{
return op;
}
}
return impAssignMultiRegTypeToVar(op, retClsHnd DEBUGARG(unmgdCallConv));
}
#elif FEATURE_MULTIREG_RET && defined(TARGET_ARM64)
// Is method returning a multi-reg struct?
if (IsMultiRegReturnedType(retClsHnd, unmgdCallConv))
{
if (op->gtOper == GT_LCL_VAR)
{
// This LCL_VAR stays as a TYP_STRUCT
unsigned lclNum = op->AsLclVarCommon()->GetLclNum();
if (!lvaIsImplicitByRefLocal(lclNum))
{
// Make sure this struct type is not struct promoted
lvaTable[lclNum].lvIsMultiRegRet = true;
// TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
op->gtFlags |= GTF_DONT_CSE;
return op;
}
}
if (op->gtOper == GT_CALL)
{
if (op->AsCall()->IsVarargs())
{
// We cannot tail call because control needs to return to fixup the calling
// convention for result return.
op->AsCall()->gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
op->AsCall()->gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
}
else
{
return op;
}
}
return impAssignMultiRegTypeToVar(op, retClsHnd DEBUGARG(unmgdCallConv));
}
#endif // FEATURE_MULTIREG_RET && TARGET_ARM64
if (!op->IsCall() || !op->AsCall()->TreatAsHasRetBufArg(this))
{
// Don't retype `struct` as a primitive type in `ret` instruction.
return op;
}
// This must be one of those 'special' helpers that don't
// really have a return buffer, but instead use it as a way
// to keep the trees cleaner with fewer address-taken temps.
//
// Well now we have to materialize the the return buffer as
// an address-taken temp. Then we can return the temp.
//
// NOTE: this code assumes that since the call directly
// feeds the return, then the call must be returning the
// same structure/class/type.
//
unsigned tmpNum = lvaGrabTemp(true DEBUGARG("pseudo return buffer"));
// No need to spill anything as we're about to return.
impAssignTempGen(tmpNum, op, info.compMethodInfo->args.retTypeClass, (unsigned)CHECK_SPILL_NONE);
op = gtNewLclvNode(tmpNum, info.compRetType);
JITDUMP("\nimpFixupStructReturnType: created a pseudo-return buffer for a special helper\n");
DISPTREE(op);
return op;
}
/*****************************************************************************
CEE_LEAVE may be jumping out of a protected block, viz, a catch or a
finally-protected try. We find the finally blocks protecting the current
offset (in order) by walking over the complete exception table and
finding enclosing clauses. This assumes that the table is sorted.
This will create a series of BBJ_CALLFINALLY -> BBJ_CALLFINALLY ... -> BBJ_ALWAYS.
If we are leaving a catch handler, we need to attach the
CPX_ENDCATCHes to the correct BBJ_CALLFINALLY blocks.
After this function, the BBJ_LEAVE block has been converted to a different type.
*/
#if !defined(FEATURE_EH_FUNCLETS)
void Compiler::impImportLeave(BasicBlock* block)
{
#ifdef DEBUG
if (verbose)
{
printf("\nBefore import CEE_LEAVE:\n");
fgDispBasicBlocks();
fgDispHandlerTab();
}
#endif // DEBUG
bool invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
unsigned blkAddr = block->bbCodeOffs;
BasicBlock* leaveTarget = block->bbJumpDest;
unsigned jmpAddr = leaveTarget->bbCodeOffs;
// LEAVE clears the stack, spill side effects, and set stack to 0
impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
verCurrentState.esStackDepth = 0;
assert(block->bbJumpKind == BBJ_LEAVE);
assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != NULL); // should be a BB boundary
BasicBlock* step = DUMMY_INIT(NULL);
unsigned encFinallies = 0; // Number of enclosing finallies.
GenTree* endCatches = NULL;
Statement* endLFinStmt = NULL; // The statement tree to indicate the end of locally-invoked finally.
unsigned XTnum;
EHblkDsc* HBtab;
for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
{
// Grab the handler offsets
IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
/* Is this a catch-handler we are CEE_LEAVEing out of?
* If so, we need to call CORINFO_HELP_ENDCATCH.
*/
if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
{
// Can't CEE_LEAVE out of a finally/fault handler
if (HBtab->HasFinallyOrFaultHandler())
BADCODE("leave out of fault/finally block");
// Create the call to CORINFO_HELP_ENDCATCH
GenTree* endCatch = gtNewHelperCallNode(CORINFO_HELP_ENDCATCH, TYP_VOID);
// Make a list of all the currently pending endCatches
if (endCatches)
endCatches = gtNewOperNode(GT_COMMA, TYP_VOID, endCatches, endCatch);
else
endCatches = endCatch;
#ifdef DEBUG
if (verbose)
{
printf("impImportLeave - " FMT_BB " jumping out of catch handler EH#%u, adding call to "
"CORINFO_HELP_ENDCATCH\n",
block->bbNum, XTnum);
}
#endif
}
else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
!jitIsBetween(jmpAddr, tryBeg, tryEnd))
{
/* This is a finally-protected try we are jumping out of */
/* If there are any pending endCatches, and we have already
jumped out of a finally-protected try, then the endCatches
have to be put in a block in an outer try for async
exceptions to work correctly.
Else, just use append to the original block */
BasicBlock* callBlock;
assert(!encFinallies ==
!endLFinStmt); // if we have finallies, we better have an endLFin tree, and vice-versa
if (encFinallies == 0)
{
assert(step == DUMMY_INIT(NULL));
callBlock = block;
callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
if (endCatches)
impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtDI);
#ifdef DEBUG
if (verbose)
{
printf("impImportLeave - jumping out of a finally-protected try, convert block to BBJ_CALLFINALLY "
"block %s\n",
callBlock->dspToString());
}
#endif
}
else
{
assert(step != DUMMY_INIT(NULL));
/* Calling the finally block */
callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step);
assert(step->bbJumpKind == BBJ_ALWAYS);
step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
// finally in the chain)
step->bbJumpDest->bbRefs++;
/* The new block will inherit this block's weight */
callBlock->inheritWeight(block);
#ifdef DEBUG
if (verbose)
{
printf("impImportLeave - jumping out of a finally-protected try, new BBJ_CALLFINALLY block %s\n",
callBlock->dspToString());
}
#endif
Statement* lastStmt;
if (endCatches)
{
lastStmt = gtNewStmt(endCatches);
endLFinStmt->SetNextStmt(lastStmt);
lastStmt->SetPrevStmt(endLFinStmt);
}
else
{
lastStmt = endLFinStmt;
}
// note that this sets BBF_IMPORTED on the block
impEndTreeList(callBlock, endLFinStmt, lastStmt);
}
step = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
/* The new block will inherit this block's weight */
step->inheritWeight(block);
step->bbFlags |= BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
#ifdef DEBUG
if (verbose)
{
printf("impImportLeave - jumping out of a finally-protected try, created step (BBJ_ALWAYS) block %s\n",
step->dspToString());
}
#endif
unsigned finallyNesting = compHndBBtab[XTnum].ebdHandlerNestingLevel;
assert(finallyNesting <= compHndBBtabCount);
callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
GenTree* endLFin = new (this, GT_END_LFIN) GenTreeVal(GT_END_LFIN, TYP_VOID, finallyNesting);
endLFinStmt = gtNewStmt(endLFin);
endCatches = NULL;
encFinallies++;
invalidatePreds = true;
}
}
/* Append any remaining endCatches, if any */
assert(!encFinallies == !endLFinStmt);
if (encFinallies == 0)
{
assert(step == DUMMY_INIT(NULL));
block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
if (endCatches)
impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtDI);
#ifdef DEBUG
if (verbose)
{
printf("impImportLeave - no enclosing finally-protected try blocks; convert CEE_LEAVE block to BBJ_ALWAYS "
"block %s\n",
block->dspToString());
}
#endif
}
else
{
// If leaveTarget is the start of another try block, we want to make sure that
// we do not insert finalStep into that try block. Hence, we find the enclosing
// try block.
unsigned tryIndex = bbFindInnermostCommonTryRegion(step, leaveTarget);
// Insert a new BB either in the try region indicated by tryIndex or
// the handler region indicated by leaveTarget->bbHndIndex,
// depending on which is the inner region.
BasicBlock* finalStep = fgNewBBinRegion(BBJ_ALWAYS, tryIndex, leaveTarget->bbHndIndex, step);
finalStep->bbFlags |= BBF_KEEP_BBJ_ALWAYS;
step->bbJumpDest = finalStep;
/* The new block will inherit this block's weight */
finalStep->inheritWeight(block);
#ifdef DEBUG
if (verbose)
{
printf("impImportLeave - finalStep block required (encFinallies(%d) > 0), new block %s\n", encFinallies,
finalStep->dspToString());
}
#endif
Statement* lastStmt;
if (endCatches)
{
lastStmt = gtNewStmt(endCatches);
endLFinStmt->SetNextStmt(lastStmt);
lastStmt->SetPrevStmt(endLFinStmt);
}
else
{
lastStmt = endLFinStmt;
}
impEndTreeList(finalStep, endLFinStmt, lastStmt);
finalStep->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
// Queue up the jump target for importing
impImportBlockPending(leaveTarget);
invalidatePreds = true;
}
if (invalidatePreds && fgComputePredsDone)
{
JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
fgRemovePreds();
}
#ifdef DEBUG
fgVerifyHandlerTab();
if (verbose)
{
printf("\nAfter import CEE_LEAVE:\n");
fgDispBasicBlocks();
fgDispHandlerTab();
}
#endif // DEBUG
}
#else // FEATURE_EH_FUNCLETS
void Compiler::impImportLeave(BasicBlock* block)
{
#ifdef DEBUG
if (verbose)
{
printf("\nBefore import CEE_LEAVE in " FMT_BB " (targetting " FMT_BB "):\n", block->bbNum,
block->bbJumpDest->bbNum);
fgDispBasicBlocks();
fgDispHandlerTab();
}
#endif // DEBUG
bool invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
unsigned blkAddr = block->bbCodeOffs;
BasicBlock* leaveTarget = block->bbJumpDest;
unsigned jmpAddr = leaveTarget->bbCodeOffs;
// LEAVE clears the stack, spill side effects, and set stack to 0
impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
verCurrentState.esStackDepth = 0;
assert(block->bbJumpKind == BBJ_LEAVE);
assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != nullptr); // should be a BB boundary
BasicBlock* step = nullptr;
enum StepType
{
// No step type; step == NULL.
ST_None,
// Is the step block the BBJ_ALWAYS block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair?
// That is, is step->bbJumpDest where a finally will return to?
ST_FinallyReturn,
// The step block is a catch return.
ST_Catch,
// The step block is in a "try", created as the target for a finally return or the target for a catch return.
ST_Try
};
StepType stepType = ST_None;
unsigned XTnum;
EHblkDsc* HBtab;
for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
{
// Grab the handler offsets
IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
/* Is this a catch-handler we are CEE_LEAVEing out of?
*/
if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
{
// Can't CEE_LEAVE out of a finally/fault handler
if (HBtab->HasFinallyOrFaultHandler())
{
BADCODE("leave out of fault/finally block");
}
/* We are jumping out of a catch */
if (step == nullptr)
{
step = block;
step->bbJumpKind = BBJ_EHCATCHRET; // convert the BBJ_LEAVE to BBJ_EHCATCHRET
stepType = ST_Catch;
#ifdef DEBUG
if (verbose)
{
printf("impImportLeave - jumping out of a catch (EH#%u), convert block " FMT_BB
" to BBJ_EHCATCHRET "
"block\n",
XTnum, step->bbNum);
}
#endif
}
else
{
BasicBlock* exitBlock;
/* Create a new catch exit block in the catch region for the existing step block to jump to in this
* scope */
exitBlock = fgNewBBinRegion(BBJ_EHCATCHRET, 0, XTnum + 1, step);
assert(step->KindIs(BBJ_ALWAYS, BBJ_EHCATCHRET));
step->bbJumpDest = exitBlock; // the previous step (maybe a call to a nested finally, or a nested catch
// exit) returns to this block
step->bbJumpDest->bbRefs++;
#if defined(TARGET_ARM)
if (stepType == ST_FinallyReturn)
{
assert(step->bbJumpKind == BBJ_ALWAYS);
// Mark the target of a finally return
step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
}
#endif // defined(TARGET_ARM)
/* The new block will inherit this block's weight */
exitBlock->inheritWeight(block);
exitBlock->bbFlags |= BBF_IMPORTED;
/* This exit block is the new step */
step = exitBlock;
stepType = ST_Catch;
invalidatePreds = true;
#ifdef DEBUG
if (verbose)
{
printf("impImportLeave - jumping out of a catch (EH#%u), new BBJ_EHCATCHRET block " FMT_BB "\n",
XTnum, exitBlock->bbNum);
}
#endif
}
}
else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
!jitIsBetween(jmpAddr, tryBeg, tryEnd))
{
/* We are jumping out of a finally-protected try */
BasicBlock* callBlock;
if (step == nullptr)
{
#if FEATURE_EH_CALLFINALLY_THUNKS
// Put the call to the finally in the enclosing region.
unsigned callFinallyTryIndex =
(HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
unsigned callFinallyHndIndex =
(HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, block);
// Convert the BBJ_LEAVE to BBJ_ALWAYS, jumping to the new BBJ_CALLFINALLY. This is because
// the new BBJ_CALLFINALLY is in a different EH region, thus it can't just replace the BBJ_LEAVE,
// which might be in the middle of the "try". In most cases, the BBJ_ALWAYS will jump to the
// next block, and flow optimizations will remove it.
block->bbJumpKind = BBJ_ALWAYS;
block->bbJumpDest = callBlock;
block->bbJumpDest->bbRefs++;
/* The new block will inherit this block's weight */
callBlock->inheritWeight(block);
callBlock->bbFlags |= BBF_IMPORTED;
#ifdef DEBUG
if (verbose)
{
printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block " FMT_BB
" to "
"BBJ_ALWAYS, add BBJ_CALLFINALLY block " FMT_BB "\n",
XTnum, block->bbNum, callBlock->bbNum);
}
#endif
#else // !FEATURE_EH_CALLFINALLY_THUNKS
callBlock = block;
callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
#ifdef DEBUG
if (verbose)
{
printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block " FMT_BB
" to "
"BBJ_CALLFINALLY block\n",
XTnum, callBlock->bbNum);
}
#endif
#endif // !FEATURE_EH_CALLFINALLY_THUNKS
}
else
{
// Calling the finally block. We already have a step block that is either the call-to-finally from a
// more nested try/finally (thus we are jumping out of multiple nested 'try' blocks, each protected by
// a 'finally'), or the step block is the return from a catch.
//
// Due to ThreadAbortException, we can't have the catch return target the call-to-finally block
// directly. Note that if a 'catch' ends without resetting the ThreadAbortException, the VM will
// automatically re-raise the exception, using the return address of the catch (that is, the target
// block of the BBJ_EHCATCHRET) as the re-raise address. If this address is in a finally, the VM will
// refuse to do the re-raise, and the ThreadAbortException will get eaten (and lost). On AMD64/ARM64,
// we put the call-to-finally thunk in a special "cloned finally" EH region that does look like a
// finally clause to the VM. Thus, on these platforms, we can't have BBJ_EHCATCHRET target a
// BBJ_CALLFINALLY directly. (Note that on ARM32, we don't mark the thunk specially -- it lives directly
// within the 'try' region protected by the finally, since we generate code in such a way that execution
// never returns to the call-to-finally call, and the finally-protected 'try' region doesn't appear on
// stack walks.)
assert(step->KindIs(BBJ_ALWAYS, BBJ_EHCATCHRET));
#if FEATURE_EH_CALLFINALLY_THUNKS
if (step->bbJumpKind == BBJ_EHCATCHRET)
{
// Need to create another step block in the 'try' region that will actually branch to the
// call-to-finally thunk.
BasicBlock* step2 = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
step->bbJumpDest = step2;
step->bbJumpDest->bbRefs++;
step2->inheritWeight(block);
step2->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
#ifdef DEBUG
if (verbose)
{
printf("impImportLeave - jumping out of a finally-protected try (EH#%u), step block is "
"BBJ_EHCATCHRET (" FMT_BB "), new BBJ_ALWAYS step-step block " FMT_BB "\n",
XTnum, step->bbNum, step2->bbNum);
}
#endif
step = step2;
assert(stepType == ST_Catch); // Leave it as catch type for now.
}
#endif // FEATURE_EH_CALLFINALLY_THUNKS
#if FEATURE_EH_CALLFINALLY_THUNKS
unsigned callFinallyTryIndex =
(HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
unsigned callFinallyHndIndex =
(HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
#else // !FEATURE_EH_CALLFINALLY_THUNKS
unsigned callFinallyTryIndex = XTnum + 1;
unsigned callFinallyHndIndex = 0; // don't care
#endif // !FEATURE_EH_CALLFINALLY_THUNKS
callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, step);
step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
// finally in the chain)
step->bbJumpDest->bbRefs++;
#if defined(TARGET_ARM)
if (stepType == ST_FinallyReturn)
{
assert(step->bbJumpKind == BBJ_ALWAYS);
// Mark the target of a finally return
step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
}
#endif // defined(TARGET_ARM)
/* The new block will inherit this block's weight */
callBlock->inheritWeight(block);
callBlock->bbFlags |= BBF_IMPORTED;
#ifdef DEBUG
if (verbose)
{
printf("impImportLeave - jumping out of a finally-protected try (EH#%u), new BBJ_CALLFINALLY "
"block " FMT_BB "\n",
XTnum, callBlock->bbNum);
}
#endif
}
step = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
stepType = ST_FinallyReturn;
/* The new block will inherit this block's weight */
step->inheritWeight(block);
step->bbFlags |= BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
#ifdef DEBUG
if (verbose)
{
printf("impImportLeave - jumping out of a finally-protected try (EH#%u), created step (BBJ_ALWAYS) "
"block " FMT_BB "\n",
XTnum, step->bbNum);
}
#endif
callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
invalidatePreds = true;
}
else if (HBtab->HasCatchHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
!jitIsBetween(jmpAddr, tryBeg, tryEnd))
{
// We are jumping out of a catch-protected try.
//
// If we are returning from a call to a finally, then we must have a step block within a try
// that is protected by a catch. This is so when unwinding from that finally (e.g., if code within the
// finally raises an exception), the VM will find this step block, notice that it is in a protected region,
// and invoke the appropriate catch.
//
// We also need to handle a special case with the handling of ThreadAbortException. If a try/catch
// catches a ThreadAbortException (which might be because it catches a parent, e.g. System.Exception),
// and the catch doesn't call System.Threading.Thread::ResetAbort(), then when the catch returns to the VM,
// the VM will automatically re-raise the ThreadAbortException. When it does this, it uses the target
// address of the catch return as the new exception address. That is, the re-raised exception appears to
// occur at the catch return address. If this exception return address skips an enclosing try/catch that
// catches ThreadAbortException, then the enclosing try/catch will not catch the exception, as it should.
// For example:
//
// try {
// try {
// // something here raises ThreadAbortException
// LEAVE LABEL_1; // no need to stop at LABEL_2
// } catch (Exception) {
// // This catches ThreadAbortException, but doesn't call System.Threading.Thread::ResetAbort(), so
// // ThreadAbortException is re-raised by the VM at the address specified by the LEAVE opcode.
// // This is bad, since it means the outer try/catch won't get a chance to catch the re-raised
// // ThreadAbortException. So, instead, create step block LABEL_2 and LEAVE to that. We only
// // need to do this transformation if the current EH block is a try/catch that catches
// // ThreadAbortException (or one of its parents), however we might not be able to find that
// // information, so currently we do it for all catch types.
// LEAVE LABEL_1; // Convert this to LEAVE LABEL2;
// }
// LABEL_2: LEAVE LABEL_1; // inserted by this step creation code
// } catch (ThreadAbortException) {
// }
// LABEL_1:
//
// Note that this pattern isn't theoretical: it occurs in ASP.NET, in IL code generated by the Roslyn C#
// compiler.
if ((stepType == ST_FinallyReturn) || (stepType == ST_Catch))
{
BasicBlock* catchStep;
assert(step);
if (stepType == ST_FinallyReturn)
{
assert(step->bbJumpKind == BBJ_ALWAYS);
}
else
{
assert(stepType == ST_Catch);
assert(step->bbJumpKind == BBJ_EHCATCHRET);
}
/* Create a new exit block in the try region for the existing step block to jump to in this scope */
catchStep = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
step->bbJumpDest = catchStep;
step->bbJumpDest->bbRefs++;
#if defined(TARGET_ARM)
if (stepType == ST_FinallyReturn)
{
// Mark the target of a finally return
step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
}
#endif // defined(TARGET_ARM)
/* The new block will inherit this block's weight */
catchStep->inheritWeight(block);
catchStep->bbFlags |= BBF_IMPORTED;
#ifdef DEBUG
if (verbose)
{
if (stepType == ST_FinallyReturn)
{
printf("impImportLeave - return from finally jumping out of a catch-protected try (EH#%u), new "
"BBJ_ALWAYS block " FMT_BB "\n",
XTnum, catchStep->bbNum);
}
else
{
assert(stepType == ST_Catch);
printf("impImportLeave - return from catch jumping out of a catch-protected try (EH#%u), new "
"BBJ_ALWAYS block " FMT_BB "\n",
XTnum, catchStep->bbNum);
}
}
#endif // DEBUG
/* This block is the new step */
step = catchStep;
stepType = ST_Try;
invalidatePreds = true;
}
}
}
if (step == nullptr)
{
block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
#ifdef DEBUG
if (verbose)
{
printf("impImportLeave - no enclosing finally-protected try blocks or catch handlers; convert CEE_LEAVE "
"block " FMT_BB " to BBJ_ALWAYS\n",
block->bbNum);
}
#endif
}
else
{
step->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
#if defined(TARGET_ARM)
if (stepType == ST_FinallyReturn)
{
assert(step->bbJumpKind == BBJ_ALWAYS);
// Mark the target of a finally return
step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
}
#endif // defined(TARGET_ARM)
#ifdef DEBUG
if (verbose)
{
printf("impImportLeave - final destination of step blocks set to " FMT_BB "\n", leaveTarget->bbNum);
}
#endif
// Queue up the jump target for importing
impImportBlockPending(leaveTarget);
}
if (invalidatePreds && fgComputePredsDone)
{
JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
fgRemovePreds();
}
#ifdef DEBUG
fgVerifyHandlerTab();
if (verbose)
{
printf("\nAfter import CEE_LEAVE:\n");
fgDispBasicBlocks();
fgDispHandlerTab();
}
#endif // DEBUG
}
#endif // FEATURE_EH_FUNCLETS
/*****************************************************************************/
// This is called when reimporting a leave block. It resets the JumpKind,
// JumpDest, and bbNext to the original values
void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr)
{
#if defined(FEATURE_EH_FUNCLETS)
// With EH Funclets, while importing leave opcode we create another block ending with BBJ_ALWAYS (call it B1)
// and the block containing leave (say B0) is marked as BBJ_CALLFINALLY. Say for some reason we reimport B0,
// it is reset (in this routine) by marking as ending with BBJ_LEAVE and further down when B0 is reimported, we
// create another BBJ_ALWAYS (call it B2). In this process B1 gets orphaned and any blocks to which B1 is the
// only predecessor are also considered orphans and attempted to be deleted.
//
// try {
// ....
// try
// {
// ....
// leave OUTSIDE; // B0 is the block containing this leave, following this would be B1
// } finally { }
// } finally { }
// OUTSIDE:
//
// In the above nested try-finally example, we create a step block (call it Bstep) which in branches to a block
// where a finally would branch to (and such block is marked as finally target). Block B1 branches to step block.
// Because of re-import of B0, Bstep is also orphaned. Since Bstep is a finally target it cannot be removed. To
// work around this we will duplicate B0 (call it B0Dup) before reseting. B0Dup is marked as BBJ_CALLFINALLY and
// only serves to pair up with B1 (BBJ_ALWAYS) that got orphaned. Now during orphan block deletion B0Dup and B1
// will be treated as pair and handled correctly.
if (block->bbJumpKind == BBJ_CALLFINALLY)
{
BasicBlock* dupBlock = bbNewBasicBlock(block->bbJumpKind);
dupBlock->bbFlags = block->bbFlags;
dupBlock->bbJumpDest = block->bbJumpDest;
dupBlock->copyEHRegion(block);
dupBlock->bbCatchTyp = block->bbCatchTyp;
// Mark this block as
// a) not referenced by any other block to make sure that it gets deleted
// b) weight zero
// c) prevent from being imported
// d) as internal
// e) as rarely run
dupBlock->bbRefs = 0;
dupBlock->bbWeight = BB_ZERO_WEIGHT;
dupBlock->bbFlags |= BBF_IMPORTED | BBF_INTERNAL | BBF_RUN_RARELY;
// Insert the block right after the block which is getting reset so that BBJ_CALLFINALLY and BBJ_ALWAYS
// will be next to each other.
fgInsertBBafter(block, dupBlock);
#ifdef DEBUG
if (verbose)
{
printf("New Basic Block " FMT_BB " duplicate of " FMT_BB " created.\n", dupBlock->bbNum, block->bbNum);
}
#endif
}
#endif // FEATURE_EH_FUNCLETS
block->bbJumpKind = BBJ_LEAVE;
fgInitBBLookup();
block->bbJumpDest = fgLookupBB(jmpAddr);
// We will leave the BBJ_ALWAYS block we introduced. When it's reimported
// the BBJ_ALWAYS block will be unreachable, and will be removed after. The
// reason we don't want to remove the block at this point is that if we call
// fgInitBBLookup() again we will do it wrong as the BBJ_ALWAYS block won't be
// added and the linked list length will be different than fgBBcount.
}
/*****************************************************************************/
// Get the first non-prefix opcode. Used for verification of valid combinations
// of prefixes and actual opcodes.
OPCODE Compiler::impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp)
{
while (codeAddr < codeEndp)
{
OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
codeAddr += sizeof(__int8);
if (opcode == CEE_PREFIX1)
{
if (codeAddr >= codeEndp)
{
break;
}
opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
codeAddr += sizeof(__int8);
}
switch (opcode)
{
case CEE_UNALIGNED:
case CEE_VOLATILE:
case CEE_TAILCALL:
case CEE_CONSTRAINED:
case CEE_READONLY:
break;
default:
return opcode;
}
codeAddr += opcodeSizes[opcode];
}
return CEE_ILLEGAL;
}
/*****************************************************************************/
// Checks whether the opcode is a valid opcode for volatile. and unaligned. prefixes
void Compiler::impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix)
{
OPCODE opcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
if (!(
// Opcode of all ldind and stdind happen to be in continuous, except stind.i.
((CEE_LDIND_I1 <= opcode) && (opcode <= CEE_STIND_R8)) || (opcode == CEE_STIND_I) ||
(opcode == CEE_LDFLD) || (opcode == CEE_STFLD) || (opcode == CEE_LDOBJ) || (opcode == CEE_STOBJ) ||
(opcode == CEE_INITBLK) || (opcode == CEE_CPBLK) ||
// volatile. prefix is allowed with the ldsfld and stsfld
(volatilePrefix && ((opcode == CEE_LDSFLD) || (opcode == CEE_STSFLD)))))
{
BADCODE("Invalid opcode for unaligned. or volatile. prefix");
}
}
/*****************************************************************************/
#ifdef DEBUG
#undef RETURN // undef contracts RETURN macro
enum controlFlow_t
{
NEXT,
CALL,
RETURN,
THROW,
BRANCH,
COND_BRANCH,
BREAK,
PHI,
META,
};
const static controlFlow_t controlFlow[] = {
#define OPDEF(c, s, pop, push, args, type, l, s1, s2, flow) flow,
#include "opcode.def"
#undef OPDEF
};
#endif // DEBUG
/*****************************************************************************
* Determine the result type of an arithemetic operation
* On 64-bit inserts upcasts when native int is mixed with int32
*/
var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTree** pOp1, GenTree** pOp2)
{
var_types type = TYP_UNDEF;
GenTree* op1 = *pOp1;
GenTree* op2 = *pOp2;
// Arithemetic operations are generally only allowed with
// primitive types, but certain operations are allowed
// with byrefs
if ((oper == GT_SUB) && (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
{
if ((genActualType(op1->TypeGet()) == TYP_BYREF) && (genActualType(op2->TypeGet()) == TYP_BYREF))
{
// byref1-byref2 => gives a native int
type = TYP_I_IMPL;
}
else if (genActualTypeIsIntOrI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_BYREF))
{
// [native] int - byref => gives a native int
//
// The reason is that it is possible, in managed C++,
// to have a tree like this:
//
// -
// / \.
// / \.
// / \.
// / \.
// const(h) int addr byref
//
// <BUGNUM> VSW 318822 </BUGNUM>
//
// So here we decide to make the resulting type to be a native int.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_64BIT
if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
{
// insert an explicit upcast
op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
}
#endif // TARGET_64BIT
type = TYP_I_IMPL;
}
else
{
// byref - [native] int => gives a byref
assert(genActualType(op1->TypeGet()) == TYP_BYREF && genActualTypeIsIntOrI(op2->TypeGet()));
#ifdef TARGET_64BIT
if ((genActualType(op2->TypeGet()) != TYP_I_IMPL))
{
// insert an explicit upcast
op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
}
#endif // TARGET_64BIT
type = TYP_BYREF;
}
}
else if ((oper == GT_ADD) &&
(genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
{
// byref + [native] int => gives a byref
// (or)
// [native] int + byref => gives a byref
// only one can be a byref : byref op byref not allowed
assert(genActualType(op1->TypeGet()) != TYP_BYREF || genActualType(op2->TypeGet()) != TYP_BYREF);
assert(genActualTypeIsIntOrI(op1->TypeGet()) || genActualTypeIsIntOrI(op2->TypeGet()));
#ifdef TARGET_64BIT
if (genActualType(op2->TypeGet()) == TYP_BYREF)
{
if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
{
// insert an explicit upcast
op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
}
}
else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
{
// insert an explicit upcast
op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
}
#endif // TARGET_64BIT
type = TYP_BYREF;
}
#ifdef TARGET_64BIT
else if (genActualType(op1->TypeGet()) == TYP_I_IMPL || genActualType(op2->TypeGet()) == TYP_I_IMPL)
{
assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
// int + long => gives long
// long + int => gives long
// we get this because in the IL the long isn't Int64, it's just IntPtr
if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
{
// insert an explicit upcast
op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
}
else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
{
// insert an explicit upcast
op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
}
type = TYP_I_IMPL;
}
#else // 32-bit TARGET
else if (genActualType(op1->TypeGet()) == TYP_LONG || genActualType(op2->TypeGet()) == TYP_LONG)
{
assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
// int + long => gives long
// long + int => gives long
type = TYP_LONG;
}
#endif // TARGET_64BIT
else
{
// int + int => gives an int
assert(genActualType(op1->TypeGet()) != TYP_BYREF && genActualType(op2->TypeGet()) != TYP_BYREF);
assert(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
(varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType)));
type = genActualType(op1->gtType);
// If both operands are TYP_FLOAT, then leave it as TYP_FLOAT.
// Otherwise, turn floats into doubles
if ((type == TYP_FLOAT) && (genActualType(op2->gtType) != TYP_FLOAT))
{
assert(genActualType(op2->gtType) == TYP_DOUBLE);
type = TYP_DOUBLE;
}
}
assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_FLOAT || type == TYP_LONG || type == TYP_INT);
return type;
}
//------------------------------------------------------------------------
// impOptimizeCastClassOrIsInst: attempt to resolve a cast when jitting
//
// Arguments:
// op1 - value to cast
// pResolvedToken - resolved token for type to cast to
// isCastClass - true if this is a castclass, false if isinst
//
// Return Value:
// tree representing optimized cast, or null if no optimization possible
GenTree* Compiler::impOptimizeCastClassOrIsInst(GenTree* op1, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass)
{
assert(op1->TypeGet() == TYP_REF);
// Don't optimize for minopts or debug codegen.
if (opts.OptimizationDisabled())
{
return nullptr;
}
// See what we know about the type of the object being cast.
bool isExact = false;
bool isNonNull = false;
CORINFO_CLASS_HANDLE fromClass = gtGetClassHandle(op1, &isExact, &isNonNull);
if (fromClass != nullptr)
{
CORINFO_CLASS_HANDLE toClass = pResolvedToken->hClass;
JITDUMP("\nConsidering optimization of %s from %s%p (%s) to %p (%s)\n", isCastClass ? "castclass" : "isinst",
isExact ? "exact " : "", dspPtr(fromClass), eeGetClassName(fromClass), dspPtr(toClass),
eeGetClassName(toClass));
// Perhaps we know if the cast will succeed or fail.
TypeCompareState castResult = info.compCompHnd->compareTypesForCast(fromClass, toClass);
if (castResult == TypeCompareState::Must)
{
// Cast will succeed, result is simply op1.
JITDUMP("Cast will succeed, optimizing to simply return input\n");
return op1;
}
else if (castResult == TypeCompareState::MustNot)
{
// See if we can sharpen exactness by looking for final classes
if (!isExact)
{
isExact = impIsClassExact(fromClass);
}
// Cast to exact type will fail. Handle case where we have
// an exact type (that is, fromClass is not a subtype)
// and we're not going to throw on failure.
if (isExact && !isCastClass)
{
JITDUMP("Cast will fail, optimizing to return null\n");
GenTree* result = gtNewIconNode(0, TYP_REF);
// If the cast was fed by a box, we can remove that too.
if (op1->IsBoxedValue())
{
JITDUMP("Also removing upstream box\n");
gtTryRemoveBoxUpstreamEffects(op1);
}
return result;
}
else if (isExact)
{
JITDUMP("Not optimizing failing castclass (yet)\n");
}
else
{
JITDUMP("Can't optimize since fromClass is inexact\n");
}
}
else
{
JITDUMP("Result of cast unknown, must generate runtime test\n");
}
}
else
{
JITDUMP("\nCan't optimize since fromClass is unknown\n");
}
return nullptr;
}
//------------------------------------------------------------------------
// impCastClassOrIsInstToTree: build and import castclass/isinst
//
// Arguments:
// op1 - value to cast
// op2 - type handle for type to cast to
// pResolvedToken - resolved token from the cast operation
// isCastClass - true if this is castclass, false means isinst
//
// Return Value:
// Tree representing the cast
//
// Notes:
// May expand into a series of runtime checks or a helper call.
GenTree* Compiler::impCastClassOrIsInstToTree(
GenTree* op1, GenTree* op2, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass, IL_OFFSET ilOffset)
{
assert(op1->TypeGet() == TYP_REF);
// Optimistically assume the jit should expand this as an inline test
bool shouldExpandInline = true;
// Profitability check.
//
// Don't bother with inline expansion when jit is trying to
// generate code quickly, or the cast is in code that won't run very
// often, or the method already is pretty big.
if (compCurBB->isRunRarely() || opts.OptimizationDisabled())
{
// not worth the code expansion if jitting fast or in a rarely run block
shouldExpandInline = false;
}
else if ((op1->gtFlags & GTF_GLOB_EFFECT) && lvaHaveManyLocals())
{
// not worth creating an untracked local variable
shouldExpandInline = false;
}
else if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR) && (JitConfig.JitCastProfiling() == 1))
{
// Optimizations are enabled but we're still instrumenting (including casts)
if (isCastClass && !impIsClassExact(pResolvedToken->hClass))
{
// Usually, we make a speculative assumption that it makes sense to expand castclass
// even for non-sealed classes, but let's rely on PGO in this specific case
shouldExpandInline = false;
}
}
// Pessimistically assume the jit cannot expand this as an inline test
bool canExpandInline = false;
const CorInfoHelpFunc helper = info.compCompHnd->getCastingHelper(pResolvedToken, isCastClass);
// Legality check.
//
// Not all classclass/isinst operations can be inline expanded.
// Check legality only if an inline expansion is desirable.
if (shouldExpandInline)
{
if (isCastClass)
{
// Jit can only inline expand the normal CHKCASTCLASS helper.
canExpandInline = (helper == CORINFO_HELP_CHKCASTCLASS);
}
else
{
if (helper == CORINFO_HELP_ISINSTANCEOFCLASS)
{
// If the class is exact, the jit can expand the IsInst check inline.
canExpandInline = impIsClassExact(pResolvedToken->hClass);
}
}
}
const bool expandInline = canExpandInline && shouldExpandInline;
if (!expandInline)
{
JITDUMP("\nExpanding %s as call because %s\n", isCastClass ? "castclass" : "isinst",
canExpandInline ? "want smaller code or faster jitting" : "inline expansion not legal");
// If we CSE this class handle we prevent assertionProp from making SubType assertions
// so instead we force the CSE logic to not consider CSE-ing this class handle.
//
op2->gtFlags |= GTF_DONT_CSE;
GenTreeCall* call = gtNewHelperCallNode(helper, TYP_REF, gtNewCallArgs(op2, op1));
if (impIsCastHelperEligibleForClassProbe(call) && !impIsClassExact(pResolvedToken->hClass))
{
ClassProfileCandidateInfo* pInfo = new (this, CMK_Inlining) ClassProfileCandidateInfo;
pInfo->ilOffset = ilOffset;
pInfo->probeIndex = info.compClassProbeCount++;
call->gtClassProfileCandidateInfo = pInfo;
compCurBB->bbFlags |= BBF_HAS_CLASS_PROFILE;
}
return call;
}
JITDUMP("\nExpanding %s inline\n", isCastClass ? "castclass" : "isinst");
impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark2"));
GenTree* temp;
GenTree* condMT;
//
// expand the methodtable match:
//
// condMT ==> GT_NE
// / \.
// GT_IND op2 (typically CNS_INT)
// |
// op1Copy
//
// This can replace op1 with a GT_COMMA that evaluates op1 into a local
//
op1 = impCloneExpr(op1, &temp, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("CASTCLASS eval op1"));
//
// op1 is now known to be a non-complex tree
// thus we can use gtClone(op1) from now on
//
GenTree* op2Var = op2;
if (isCastClass)
{
op2Var = fgInsertCommaFormTemp(&op2);
lvaTable[op2Var->AsLclVarCommon()->GetLclNum()].lvIsCSE = true;
}
temp = gtNewMethodTableLookup(temp);
condMT = gtNewOperNode(GT_NE, TYP_INT, temp, op2);
GenTree* condNull;
//
// expand the null check:
//
// condNull ==> GT_EQ
// / \.
// op1Copy CNS_INT
// null
//
condNull = gtNewOperNode(GT_EQ, TYP_INT, gtClone(op1), gtNewIconNode(0, TYP_REF));
//
// expand the true and false trees for the condMT
//
GenTree* condFalse = gtClone(op1);
GenTree* condTrue;
if (isCastClass)
{
//
// use the special helper that skips the cases checked by our inlined cast
//
const CorInfoHelpFunc specialHelper = CORINFO_HELP_CHKCASTCLASS_SPECIAL;
condTrue = gtNewHelperCallNode(specialHelper, TYP_REF, gtNewCallArgs(op2Var, gtClone(op1)));
}
else
{
condTrue = gtNewIconNode(0, TYP_REF);
}
GenTree* qmarkMT;
//
// Generate first QMARK - COLON tree
//
// qmarkMT ==> GT_QMARK
// / \.
// condMT GT_COLON
// / \.
// condFalse condTrue
//
temp = new (this, GT_COLON) GenTreeColon(TYP_REF, condTrue, condFalse);
qmarkMT = gtNewQmarkNode(TYP_REF, condMT, temp->AsColon());
if (isCastClass && impIsClassExact(pResolvedToken->hClass) && condTrue->OperIs(GT_CALL))
{
// condTrue is used only for throwing InvalidCastException in case of casting to an exact class.
condTrue->AsCall()->gtCallMoreFlags |= GTF_CALL_M_DOES_NOT_RETURN;
}
GenTree* qmarkNull;
//
// Generate second QMARK - COLON tree
//
// qmarkNull ==> GT_QMARK
// / \.
// condNull GT_COLON
// / \.
// qmarkMT op1Copy
//
temp = new (this, GT_COLON) GenTreeColon(TYP_REF, gtClone(op1), qmarkMT);
qmarkNull = gtNewQmarkNode(TYP_REF, condNull, temp->AsColon());
qmarkNull->gtFlags |= GTF_QMARK_CAST_INSTOF;
// Make QMark node a top level node by spilling it.
unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark2"));
impAssignTempGen(tmp, qmarkNull, (unsigned)CHECK_SPILL_NONE);
// TODO-CQ: Is it possible op1 has a better type?
//
// See also gtGetHelperCallClassHandle where we make the same
// determination for the helper call variants.
LclVarDsc* lclDsc = lvaGetDesc(tmp);
assert(lclDsc->lvSingleDef == 0);
lclDsc->lvSingleDef = 1;
JITDUMP("Marked V%02u as a single def temp\n", tmp);
lvaSetClass(tmp, pResolvedToken->hClass);
return gtNewLclvNode(tmp, TYP_REF);
}
#ifndef DEBUG
#define assertImp(cond) ((void)0)
#else
#define assertImp(cond) \
do \
{ \
if (!(cond)) \
{ \
const int cchAssertImpBuf = 600; \
char* assertImpBuf = (char*)_alloca(cchAssertImpBuf); \
_snprintf_s(assertImpBuf, cchAssertImpBuf, cchAssertImpBuf - 1, \
"%s : Possibly bad IL with CEE_%s at offset %04Xh (op1=%s op2=%s stkDepth=%d)", #cond, \
impCurOpcName, impCurOpcOffs, op1 ? varTypeName(op1->TypeGet()) : "NULL", \
op2 ? varTypeName(op2->TypeGet()) : "NULL", verCurrentState.esStackDepth); \
assertAbort(assertImpBuf, __FILE__, __LINE__); \
} \
} while (0)
#endif // DEBUG
//------------------------------------------------------------------------
// impBlockIsInALoop: check if a block might be in a loop
//
// Arguments:
// block - block to check
//
// Returns:
// true if the block might be in a loop.
//
// Notes:
// Conservatively correct; may return true for some blocks that are
// not actually in loops.
//
bool Compiler::impBlockIsInALoop(BasicBlock* block)
{
return (compIsForInlining() && ((impInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) != 0)) ||
((block->bbFlags & BBF_BACKWARD_JUMP) != 0);
}
#ifdef _PREFAST_
#pragma warning(push)
#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
/*****************************************************************************
* Import the instr for the given basic block
*/
void Compiler::impImportBlockCode(BasicBlock* block)
{
#define _impResolveToken(kind) impResolveToken(codeAddr, &resolvedToken, kind)
#ifdef DEBUG
if (verbose)
{
printf("\nImporting " FMT_BB " (PC=%03u) of '%s'", block->bbNum, block->bbCodeOffs, info.compFullName);
}
#endif
unsigned nxtStmtIndex = impInitBlockLineInfo();
IL_OFFSET nxtStmtOffs;
CorInfoHelpFunc helper;
CorInfoIsAccessAllowedResult accessAllowedResult;
CORINFO_HELPER_DESC calloutHelper;
const BYTE* lastLoadToken = nullptr;
/* Get the tree list started */
impBeginTreeList();
#ifdef FEATURE_ON_STACK_REPLACEMENT
bool enablePatchpoints = opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && (JitConfig.TC_OnStackReplacement() > 0);
#ifdef DEBUG
// Optionally suppress patchpoints by method hash
//
static ConfigMethodRange JitEnablePatchpointRange;
JitEnablePatchpointRange.EnsureInit(JitConfig.JitEnablePatchpointRange());
const unsigned hash = impInlineRoot()->info.compMethodHash();
const bool inRange = JitEnablePatchpointRange.Contains(hash);
enablePatchpoints &= inRange;
#endif // DEBUG
if (enablePatchpoints)
{
// We don't inline at Tier0, if we do, we may need rethink our approach.
// Could probably support inlines that don't introduce flow.
//
assert(!compIsForInlining());
// OSR is not yet supported for methods with explicit tail calls.
//
// But we also do not have to switch these methods to be optimized, as we should be
// able to avoid getting trapped in Tier0 code by normal call counting.
// So instead, just suppress adding patchpoints.
//
if (!compTailPrefixSeen)
{
// We only need to add patchpoints if the method can loop.
//
if (compHasBackwardJump)
{
assert(compCanHavePatchpoints());
// By default we use the "adaptive" strategy.
//
// This can create both source and target patchpoints within a given
// loop structure, which isn't ideal, but is not incorrect. We will
// just have some extra Tier0 overhead.
//
// Todo: implement support for mid-block patchpoints. If `block`
// is truly a backedge source (and not in a handler) then we should be
// able to find a stack empty point somewhere in the block.
//
const int patchpointStrategy = JitConfig.TC_PatchpointStrategy();
bool addPatchpoint = false;
bool mustUseTargetPatchpoint = false;
switch (patchpointStrategy)
{
default:
{
// Patchpoints at backedge sources, if possible, otherwise targets.
//
addPatchpoint = ((block->bbFlags & BBF_BACKWARD_JUMP_SOURCE) == BBF_BACKWARD_JUMP_SOURCE);
mustUseTargetPatchpoint = (verCurrentState.esStackDepth != 0) || block->hasHndIndex();
break;
}
case 1:
{
// Patchpoints at stackempty backedge targets.
// Note if we have loops where the IL stack is not empty on the backedge we can't patchpoint
// them.
//
// We should not have allowed OSR if there were backedges in handlers.
//
assert(!block->hasHndIndex());
addPatchpoint = ((block->bbFlags & BBF_BACKWARD_JUMP_TARGET) == BBF_BACKWARD_JUMP_TARGET) &&
(verCurrentState.esStackDepth == 0);
break;
}
case 2:
{
// Adaptive strategy.
//
// Patchpoints at backedge targets if there are multiple backedges,
// otherwise at backedge sources, if possible. Note a block can be both; if so we
// just need one patchpoint.
//
if ((block->bbFlags & BBF_BACKWARD_JUMP_TARGET) == BBF_BACKWARD_JUMP_TARGET)
{
// We don't know backedge count, so just use ref count.
//
addPatchpoint = (block->bbRefs > 1) && (verCurrentState.esStackDepth == 0);
}
if (!addPatchpoint && ((block->bbFlags & BBF_BACKWARD_JUMP_SOURCE) == BBF_BACKWARD_JUMP_SOURCE))
{
addPatchpoint = true;
mustUseTargetPatchpoint = (verCurrentState.esStackDepth != 0) || block->hasHndIndex();
// Also force target patchpoint if target block has multiple (backedge) preds.
//
if (!mustUseTargetPatchpoint)
{
for (BasicBlock* const succBlock : block->Succs(this))
{
if ((succBlock->bbNum <= block->bbNum) && (succBlock->bbRefs > 1))
{
mustUseTargetPatchpoint = true;
break;
}
}
}
}
break;
}
}
if (addPatchpoint)
{
if (mustUseTargetPatchpoint)
{
// We wanted a source patchpoint, but could not have one.
// So, add patchpoints to the backedge targets.
//
for (BasicBlock* const succBlock : block->Succs(this))
{
if (succBlock->bbNum <= block->bbNum)
{
// The succBlock had better agree it's a target.
//
assert((succBlock->bbFlags & BBF_BACKWARD_JUMP_TARGET) == BBF_BACKWARD_JUMP_TARGET);
// We may already have decided to put a patchpoint in succBlock. If not, add one.
//
if ((succBlock->bbFlags & BBF_PATCHPOINT) != 0)
{
// In some cases the target may not be stack-empty at entry.
// If so, we will bypass patchpoints for this backedge.
//
if (succBlock->bbStackDepthOnEntry() > 0)
{
JITDUMP("\nCan't set source patchpoint at " FMT_BB ", can't use target " FMT_BB
" as it has non-empty stack on entry.\n",
block->bbNum, succBlock->bbNum);
}
else
{
JITDUMP("\nCan't set source patchpoint at " FMT_BB ", using target " FMT_BB
" instead\n",
block->bbNum, succBlock->bbNum);
assert(!succBlock->hasHndIndex());
succBlock->bbFlags |= BBF_PATCHPOINT;
}
}
}
}
}
else
{
assert(!block->hasHndIndex());
block->bbFlags |= BBF_PATCHPOINT;
}
setMethodHasPatchpoint();
}
}
else
{
// Should not see backward branch targets w/o backwards branches.
// So if !compHasBackwardsBranch, these flags should never be set.
//
assert((block->bbFlags & (BBF_BACKWARD_JUMP_TARGET | BBF_BACKWARD_JUMP_SOURCE)) == 0);
}
}
#ifdef DEBUG
// As a stress test, we can place patchpoints at the start of any block
// that is a stack empty point and is not within a handler.
//
// Todo: enable for mid-block stack empty points too.
//
const int offsetOSR = JitConfig.JitOffsetOnStackReplacement();
const int randomOSR = JitConfig.JitRandomOnStackReplacement();
const bool tryOffsetOSR = offsetOSR >= 0;
const bool tryRandomOSR = randomOSR > 0;
if (compCanHavePatchpoints() && (tryOffsetOSR || tryRandomOSR) && (verCurrentState.esStackDepth == 0) &&
!block->hasHndIndex() && ((block->bbFlags & BBF_PATCHPOINT) == 0))
{
// Block start can have a patchpoint. See if we should add one.
//
bool addPatchpoint = false;
// Specific offset?
//
if (tryOffsetOSR)
{
if (impCurOpcOffs == (unsigned)offsetOSR)
{
addPatchpoint = true;
}
}
// Random?
//
else
{
// Reuse the random inliner's random state.
// Note m_inlineStrategy is always created, even if we're not inlining.
//
CLRRandom* const random = impInlineRoot()->m_inlineStrategy->GetRandom(randomOSR);
const int randomValue = (int)random->Next(100);
addPatchpoint = (randomValue < randomOSR);
}
if (addPatchpoint)
{
block->bbFlags |= BBF_PATCHPOINT;
setMethodHasPatchpoint();
}
JITDUMP("\n** %s patchpoint%s added to " FMT_BB " (il offset %u)\n", tryOffsetOSR ? "offset" : "random",
addPatchpoint ? "" : " not", block->bbNum, impCurOpcOffs);
}
#endif // DEBUG
}
// Mark stack-empty rare blocks to be considered for partial compilation.
//
// Ideally these are conditionally executed blocks -- if the method is going
// to unconditionally throw, there's not as much to be gained by deferring jitting.
// For now, we just screen out the entry bb.
//
// In general we might want track all the IL stack empty points so we can
// propagate rareness back through flow and place the partial compilation patchpoints "earlier"
// so there are fewer overall.
//
// Note unlike OSR, it's ok to forgo these.
//
// Todo: stress mode...
//
if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && (JitConfig.TC_PartialCompilation() > 0) &&
compCanHavePatchpoints() && !compTailPrefixSeen)
{
// Is this block a good place for partial compilation?
//
if ((block != fgFirstBB) && block->isRunRarely() && (verCurrentState.esStackDepth == 0) &&
((block->bbFlags & BBF_PATCHPOINT) == 0) && !block->hasHndIndex())
{
JITDUMP("\nBlock " FMT_BB " will be a partial compilation patchpoint -- not importing\n", block->bbNum);
block->bbFlags |= BBF_PARTIAL_COMPILATION_PATCHPOINT;
setMethodHasPartialCompilationPatchpoint();
// Change block to BBJ_THROW so we won't trigger importation of successors.
//
block->bbJumpKind = BBJ_THROW;
// If this method has a explicit generic context, the only uses of it may be in
// the IL for this block. So assume it's used.
//
if (info.compMethodInfo->options &
(CORINFO_GENERICS_CTXT_FROM_METHODDESC | CORINFO_GENERICS_CTXT_FROM_METHODTABLE))
{
lvaGenericsContextInUse = true;
}
return;
}
}
#endif // FEATURE_ON_STACK_REPLACEMENT
/* Walk the opcodes that comprise the basic block */
const BYTE* codeAddr = info.compCode + block->bbCodeOffs;
const BYTE* codeEndp = info.compCode + block->bbCodeOffsEnd;
IL_OFFSET opcodeOffs = block->bbCodeOffs;
IL_OFFSET lastSpillOffs = opcodeOffs;
signed jmpDist;
/* remember the start of the delegate creation sequence (used for verification) */
const BYTE* delegateCreateStart = nullptr;
int prefixFlags = 0;
bool explicitTailCall, constraintCall, readonlyCall;
typeInfo tiRetVal;
unsigned numArgs = info.compArgsCount;
/* Now process all the opcodes in the block */
var_types callTyp = TYP_COUNT;
OPCODE prevOpcode = CEE_ILLEGAL;
if (block->bbCatchTyp)
{
if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
{
impCurStmtOffsSet(block->bbCodeOffs);
}
// We will spill the GT_CATCH_ARG and the input of the BB_QMARK block
// to a temp. This is a trade off for code simplicity
impSpillSpecialSideEff();
}
while (codeAddr < codeEndp)
{
#ifdef FEATURE_READYTORUN
bool usingReadyToRunHelper = false;
#endif
CORINFO_RESOLVED_TOKEN resolvedToken;
CORINFO_RESOLVED_TOKEN constrainedResolvedToken = {};
CORINFO_CALL_INFO callInfo;
CORINFO_FIELD_INFO fieldInfo;
tiRetVal = typeInfo(); // Default type info
//---------------------------------------------------------------------
/* We need to restrict the max tree depth as many of the Compiler
functions are recursive. We do this by spilling the stack */
if (verCurrentState.esStackDepth)
{
/* Has it been a while since we last saw a non-empty stack (which
guarantees that the tree depth isnt accumulating. */
if ((opcodeOffs - lastSpillOffs) > MAX_TREE_SIZE && impCanSpillNow(prevOpcode))
{
impSpillStackEnsure();
lastSpillOffs = opcodeOffs;
}
}
else
{
lastSpillOffs = opcodeOffs;
impBoxTempInUse = false; // nothing on the stack, box temp OK to use again
}
/* Compute the current instr offset */
opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
#ifndef DEBUG
if (opts.compDbgInfo)
#endif
{
nxtStmtOffs =
(nxtStmtIndex < info.compStmtOffsetsCount) ? info.compStmtOffsets[nxtStmtIndex] : BAD_IL_OFFSET;
/* Have we reached the next stmt boundary ? */
if (nxtStmtOffs != BAD_IL_OFFSET && opcodeOffs >= nxtStmtOffs)
{
assert(nxtStmtOffs == info.compStmtOffsets[nxtStmtIndex]);
if (verCurrentState.esStackDepth != 0 && opts.compDbgCode)
{
/* We need to provide accurate IP-mapping at this point.
So spill anything on the stack so that it will form
gtStmts with the correct stmt offset noted */
impSpillStackEnsure(true);
}
// Have we reported debug info for any tree?
if (impCurStmtDI.IsValid() && opts.compDbgCode)
{
GenTree* placeHolder = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
impAppendTree(placeHolder, (unsigned)CHECK_SPILL_NONE, impCurStmtDI);
assert(!impCurStmtDI.IsValid());
}
if (!impCurStmtDI.IsValid())
{
/* Make sure that nxtStmtIndex is in sync with opcodeOffs.
If opcodeOffs has gone past nxtStmtIndex, catch up */
while ((nxtStmtIndex + 1) < info.compStmtOffsetsCount &&
info.compStmtOffsets[nxtStmtIndex + 1] <= opcodeOffs)
{
nxtStmtIndex++;
}
/* Go to the new stmt */
impCurStmtOffsSet(info.compStmtOffsets[nxtStmtIndex]);
/* Update the stmt boundary index */
nxtStmtIndex++;
assert(nxtStmtIndex <= info.compStmtOffsetsCount);
/* Are there any more line# entries after this one? */
if (nxtStmtIndex < info.compStmtOffsetsCount)
{
/* Remember where the next line# starts */
nxtStmtOffs = info.compStmtOffsets[nxtStmtIndex];
}
else
{
/* No more line# entries */
nxtStmtOffs = BAD_IL_OFFSET;
}
}
}
else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES) &&
(verCurrentState.esStackDepth == 0))
{
/* At stack-empty locations, we have already added the tree to
the stmt list with the last offset. We just need to update
impCurStmtDI
*/
impCurStmtOffsSet(opcodeOffs);
}
else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) &&
impOpcodeIsCallSiteBoundary(prevOpcode))
{
/* Make sure we have a type cached */
assert(callTyp != TYP_COUNT);
if (callTyp == TYP_VOID)
{
impCurStmtOffsSet(opcodeOffs);
}
else if (opts.compDbgCode)
{
impSpillStackEnsure(true);
impCurStmtOffsSet(opcodeOffs);
}
}
else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES) && (prevOpcode == CEE_NOP))
{
if (opts.compDbgCode)
{
impSpillStackEnsure(true);
}
impCurStmtOffsSet(opcodeOffs);
}
assert(!impCurStmtDI.IsValid() || (nxtStmtOffs == BAD_IL_OFFSET) ||
(impCurStmtDI.GetLocation().GetOffset() <= nxtStmtOffs));
}
CORINFO_CLASS_HANDLE clsHnd = DUMMY_INIT(NULL);
CORINFO_CLASS_HANDLE ldelemClsHnd = DUMMY_INIT(NULL);
CORINFO_CLASS_HANDLE stelemClsHnd = DUMMY_INIT(NULL);
var_types lclTyp, ovflType = TYP_UNKNOWN;
GenTree* op1 = DUMMY_INIT(NULL);
GenTree* op2 = DUMMY_INIT(NULL);
GenTree* newObjThisPtr = DUMMY_INIT(NULL);
bool uns = DUMMY_INIT(false);
bool isLocal = false;
/* Get the next opcode and the size of its parameters */
OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
codeAddr += sizeof(__int8);
#ifdef DEBUG
impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
JITDUMP("\n [%2u] %3u (0x%03x) ", verCurrentState.esStackDepth, impCurOpcOffs, impCurOpcOffs);
#endif
DECODE_OPCODE:
// Return if any previous code has caused inline to fail.
if (compDonotInline())
{
return;
}
/* Get the size of additional parameters */
signed int sz = opcodeSizes[opcode];
#ifdef DEBUG
clsHnd = NO_CLASS_HANDLE;
lclTyp = TYP_COUNT;
callTyp = TYP_COUNT;
impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
impCurOpcName = opcodeNames[opcode];
if (verbose && (opcode != CEE_PREFIX1))
{
printf("%s", impCurOpcName);
}
/* Use assertImp() to display the opcode */
op1 = op2 = nullptr;
#endif
/* See what kind of an opcode we have, then */
unsigned mflags = 0;
unsigned clsFlags = 0;
switch (opcode)
{
unsigned lclNum;
var_types type;
GenTree* op3;
genTreeOps oper;
unsigned size;
int val;
CORINFO_SIG_INFO sig;
IL_OFFSET jmpAddr;
bool ovfl, unordered, callNode;
bool ldstruct;
CORINFO_CLASS_HANDLE tokenType;
union {
int intVal;
float fltVal;
__int64 lngVal;
double dblVal;
} cval;
case CEE_PREFIX1:
opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
codeAddr += sizeof(__int8);
goto DECODE_OPCODE;
SPILL_APPEND:
// We need to call impSpillLclRefs() for a struct type lclVar.
// This is because there may be loads of that lclVar on the evaluation stack, and
// we need to ensure that those loads are completed before we modify it.
if ((op1->OperGet() == GT_ASG) && varTypeIsStruct(op1->gtGetOp1()))
{
GenTree* lhs = op1->gtGetOp1();
GenTreeLclVarCommon* lclVar = nullptr;
if (lhs->gtOper == GT_LCL_VAR)
{
lclVar = lhs->AsLclVarCommon();
}
else if (lhs->OperIsBlk())
{
// Check if LHS address is within some struct local, to catch
// cases where we're updating the struct by something other than a stfld
GenTree* addr = lhs->AsBlk()->Addr();
// Catches ADDR(LCL_VAR), or ADD(ADDR(LCL_VAR),CNS_INT))
lclVar = addr->IsLocalAddrExpr();
// Catches ADDR(FIELD(... ADDR(LCL_VAR)))
if (lclVar == nullptr)
{
GenTree* lclTree = nullptr;
if (impIsAddressInLocal(addr, &lclTree))
{
lclVar = lclTree->AsLclVarCommon();
}
}
}
if (lclVar != nullptr)
{
impSpillLclRefs(lclVar->GetLclNum());
}
}
/* Append 'op1' to the list of statements */
impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI);
goto DONE_APPEND;
APPEND:
/* Append 'op1' to the list of statements */
impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtDI);
goto DONE_APPEND;
DONE_APPEND:
#ifdef DEBUG
// Remember at which BC offset the tree was finished
impNoteLastILoffs();
#endif
break;
case CEE_LDNULL:
impPushNullObjRefOnStack();
break;
case CEE_LDC_I4_M1:
case CEE_LDC_I4_0:
case CEE_LDC_I4_1:
case CEE_LDC_I4_2:
case CEE_LDC_I4_3:
case CEE_LDC_I4_4:
case CEE_LDC_I4_5:
case CEE_LDC_I4_6:
case CEE_LDC_I4_7:
case CEE_LDC_I4_8:
cval.intVal = (opcode - CEE_LDC_I4_0);
assert(-1 <= cval.intVal && cval.intVal <= 8);
goto PUSH_I4CON;
case CEE_LDC_I4_S:
cval.intVal = getI1LittleEndian(codeAddr);
goto PUSH_I4CON;
case CEE_LDC_I4:
cval.intVal = getI4LittleEndian(codeAddr);
goto PUSH_I4CON;
PUSH_I4CON:
JITDUMP(" %d", cval.intVal);
impPushOnStack(gtNewIconNode(cval.intVal), typeInfo(TI_INT));
break;
case CEE_LDC_I8:
cval.lngVal = getI8LittleEndian(codeAddr);
JITDUMP(" 0x%016llx", cval.lngVal);
impPushOnStack(gtNewLconNode(cval.lngVal), typeInfo(TI_LONG));
break;
case CEE_LDC_R8:
cval.dblVal = getR8LittleEndian(codeAddr);
JITDUMP(" %#.17g", cval.dblVal);
impPushOnStack(gtNewDconNode(cval.dblVal), typeInfo(TI_DOUBLE));
break;
case CEE_LDC_R4:
cval.dblVal = getR4LittleEndian(codeAddr);
JITDUMP(" %#.17g", cval.dblVal);
impPushOnStack(gtNewDconNode(cval.dblVal, TYP_FLOAT), typeInfo(TI_DOUBLE));
break;
case CEE_LDSTR:
val = getU4LittleEndian(codeAddr);
JITDUMP(" %08X", val);
impPushOnStack(gtNewSconNode(val, info.compScopeHnd), tiRetVal);
break;
case CEE_LDARG:
lclNum = getU2LittleEndian(codeAddr);
JITDUMP(" %u", lclNum);
impLoadArg(lclNum, opcodeOffs + sz + 1);
break;
case CEE_LDARG_S:
lclNum = getU1LittleEndian(codeAddr);
JITDUMP(" %u", lclNum);
impLoadArg(lclNum, opcodeOffs + sz + 1);
break;
case CEE_LDARG_0:
case CEE_LDARG_1:
case CEE_LDARG_2:
case CEE_LDARG_3:
lclNum = (opcode - CEE_LDARG_0);
assert(lclNum >= 0 && lclNum < 4);
impLoadArg(lclNum, opcodeOffs + sz + 1);
break;
case CEE_LDLOC:
lclNum = getU2LittleEndian(codeAddr);
JITDUMP(" %u", lclNum);
impLoadLoc(lclNum, opcodeOffs + sz + 1);
break;
case CEE_LDLOC_S:
lclNum = getU1LittleEndian(codeAddr);
JITDUMP(" %u", lclNum);
impLoadLoc(lclNum, opcodeOffs + sz + 1);
break;
case CEE_LDLOC_0:
case CEE_LDLOC_1:
case CEE_LDLOC_2:
case CEE_LDLOC_3:
lclNum = (opcode - CEE_LDLOC_0);
assert(lclNum >= 0 && lclNum < 4);
impLoadLoc(lclNum, opcodeOffs + sz + 1);
break;
case CEE_STARG:
lclNum = getU2LittleEndian(codeAddr);
goto STARG;
case CEE_STARG_S:
lclNum = getU1LittleEndian(codeAddr);
STARG:
JITDUMP(" %u", lclNum);
if (compIsForInlining())
{
op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
noway_assert(op1->gtOper == GT_LCL_VAR);
lclNum = op1->AsLclVar()->GetLclNum();
goto VAR_ST_VALID;
}
lclNum = compMapILargNum(lclNum); // account for possible hidden param
assertImp(lclNum < numArgs);
if (lclNum == info.compThisArg)
{
lclNum = lvaArg0Var;
}
// We should have seen this arg write in the prescan
assert(lvaTable[lclNum].lvHasILStoreOp);
goto VAR_ST;
case CEE_STLOC:
lclNum = getU2LittleEndian(codeAddr);
isLocal = true;
JITDUMP(" %u", lclNum);
goto LOC_ST;
case CEE_STLOC_S:
lclNum = getU1LittleEndian(codeAddr);
isLocal = true;
JITDUMP(" %u", lclNum);
goto LOC_ST;
case CEE_STLOC_0:
case CEE_STLOC_1:
case CEE_STLOC_2:
case CEE_STLOC_3:
isLocal = true;
lclNum = (opcode - CEE_STLOC_0);
assert(lclNum >= 0 && lclNum < 4);
LOC_ST:
if (compIsForInlining())
{
lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
/* Have we allocated a temp for this local? */
lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline stloc first use temp"));
goto _PopValue;
}
lclNum += numArgs;
VAR_ST:
if (lclNum >= info.compLocalsCount && lclNum != lvaArg0Var)
{
BADCODE("Bad IL");
}
VAR_ST_VALID:
/* if it is a struct assignment, make certain we don't overflow the buffer */
assert(lclTyp != TYP_STRUCT || lvaLclSize(lclNum) >= info.compCompHnd->getClassSize(clsHnd));
if (lvaTable[lclNum].lvNormalizeOnLoad())
{
lclTyp = lvaGetRealType(lclNum);
}
else
{
lclTyp = lvaGetActualType(lclNum);
}
_PopValue:
/* Pop the value being assigned */
{
StackEntry se = impPopStack();
clsHnd = se.seTypeInfo.GetClassHandle();
op1 = se.val;
tiRetVal = se.seTypeInfo;
}
#ifdef FEATURE_SIMD
if (varTypeIsSIMD(lclTyp) && (lclTyp != op1->TypeGet()))
{
assert(op1->TypeGet() == TYP_STRUCT);
op1->gtType = lclTyp;
}
#endif // FEATURE_SIMD
op1 = impImplicitIorI4Cast(op1, lclTyp);
#ifdef TARGET_64BIT
// Downcast the TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
if (varTypeIsI(op1->TypeGet()) && (genActualType(lclTyp) == TYP_INT))
{
op1 = gtNewCastNode(TYP_INT, op1, false, TYP_INT);
}
#endif // TARGET_64BIT
// We had better assign it a value of the correct type
assertImp(
genActualType(lclTyp) == genActualType(op1->gtType) ||
(genActualType(lclTyp) == TYP_I_IMPL && op1->IsLocalAddrExpr() != nullptr) ||
(genActualType(lclTyp) == TYP_I_IMPL && (op1->gtType == TYP_BYREF || op1->gtType == TYP_REF)) ||
(genActualType(op1->gtType) == TYP_I_IMPL && lclTyp == TYP_BYREF) ||
(varTypeIsFloating(lclTyp) && varTypeIsFloating(op1->TypeGet())) ||
((genActualType(lclTyp) == TYP_BYREF) && genActualType(op1->TypeGet()) == TYP_REF));
/* If op1 is "&var" then its type is the transient "*" and it can
be used either as TYP_BYREF or TYP_I_IMPL */
if (op1->IsLocalAddrExpr() != nullptr)
{
assertImp(genActualType(lclTyp) == TYP_I_IMPL || lclTyp == TYP_BYREF);
/* When "&var" is created, we assume it is a byref. If it is
being assigned to a TYP_I_IMPL var, change the type to
prevent unnecessary GC info */
if (genActualType(lclTyp) == TYP_I_IMPL)
{
op1->gtType = TYP_I_IMPL;
}
}
// If this is a local and the local is a ref type, see
// if we can improve type information based on the
// value being assigned.
if (isLocal && (lclTyp == TYP_REF))
{
// We should have seen a stloc in our IL prescan.
assert(lvaTable[lclNum].lvHasILStoreOp);
// Is there just one place this local is defined?
const bool isSingleDefLocal = lvaTable[lclNum].lvSingleDef;
// Conservative check that there is just one
// definition that reaches this store.
const bool hasSingleReachingDef = (block->bbStackDepthOnEntry() == 0);
if (isSingleDefLocal && hasSingleReachingDef)
{
lvaUpdateClass(lclNum, op1, clsHnd);
}
}
/* Filter out simple assignments to itself */
if (op1->gtOper == GT_LCL_VAR && lclNum == op1->AsLclVarCommon()->GetLclNum())
{
if (opts.compDbgCode)
{
op1 = gtNewNothingNode();
goto SPILL_APPEND;
}
else
{
break;
}
}
/* Create the assignment node */
op2 = gtNewLclvNode(lclNum, lclTyp DEBUGARG(opcodeOffs + sz + 1));
/* If the local is aliased or pinned, we need to spill calls and
indirections from the stack. */
if ((lvaTable[lclNum].IsAddressExposed() || lvaTable[lclNum].lvHasLdAddrOp ||
lvaTable[lclNum].lvPinned) &&
(verCurrentState.esStackDepth > 0))
{
impSpillSideEffects(false,
(unsigned)CHECK_SPILL_ALL DEBUGARG("Local could be aliased or is pinned"));
}
/* Spill any refs to the local from the stack */
impSpillLclRefs(lclNum);
// We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
// We insert a cast to the dest 'op2' type
//
if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
varTypeIsFloating(op2->gtType))
{
op1 = gtNewCastNode(op2->TypeGet(), op1, false, op2->TypeGet());
}
if (varTypeIsStruct(lclTyp))
{
op1 = impAssignStruct(op2, op1, clsHnd, (unsigned)CHECK_SPILL_ALL);
}
else
{
op1 = gtNewAssignNode(op2, op1);
}
goto SPILL_APPEND;
case CEE_LDLOCA:
lclNum = getU2LittleEndian(codeAddr);
goto LDLOCA;
case CEE_LDLOCA_S:
lclNum = getU1LittleEndian(codeAddr);
LDLOCA:
JITDUMP(" %u", lclNum);
if (compIsForInlining())
{
// Get the local type
lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
/* Have we allocated a temp for this local? */
lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline ldloca(s) first use temp"));
assert(!lvaGetDesc(lclNum)->lvNormalizeOnLoad());
op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum));
goto _PUSH_ADRVAR;
}
lclNum += numArgs;
assertImp(lclNum < info.compLocalsCount);
goto ADRVAR;
case CEE_LDARGA:
lclNum = getU2LittleEndian(codeAddr);
goto LDARGA;
case CEE_LDARGA_S:
lclNum = getU1LittleEndian(codeAddr);
LDARGA:
JITDUMP(" %u", lclNum);
Verify(lclNum < info.compILargsCount, "bad arg num");
if (compIsForInlining())
{
// In IL, LDARGA(_S) is used to load the byref managed pointer of struct argument,
// followed by a ldfld to load the field.
op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
if (op1->gtOper != GT_LCL_VAR)
{
compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDARGA_NOT_LOCAL_VAR);
return;
}
assert(op1->gtOper == GT_LCL_VAR);
goto _PUSH_ADRVAR;
}
lclNum = compMapILargNum(lclNum); // account for possible hidden param
assertImp(lclNum < numArgs);
if (lclNum == info.compThisArg)
{
lclNum = lvaArg0Var;
}
goto ADRVAR;
ADRVAR:
op1 = impCreateLocalNode(lclNum DEBUGARG(opcodeOffs + sz + 1));
_PUSH_ADRVAR:
assert(op1->gtOper == GT_LCL_VAR);
/* Note that this is supposed to create the transient type "*"
which may be used as a TYP_I_IMPL. However we catch places
where it is used as a TYP_I_IMPL and change the node if needed.
Thus we are pessimistic and may report byrefs in the GC info
where it was not absolutely needed, but it is safer this way.
*/
op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
// &aliasedVar doesnt need GTF_GLOB_REF, though alisasedVar does
assert((op1->gtFlags & GTF_GLOB_REF) == 0);
tiRetVal = lvaTable[lclNum].lvVerTypeInfo;
impPushOnStack(op1, tiRetVal);
break;
case CEE_ARGLIST:
if (!info.compIsVarArgs)
{
BADCODE("arglist in non-vararg method");
}
assertImp((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG);
/* The ARGLIST cookie is a hidden 'last' parameter, we have already
adjusted the arg count cos this is like fetching the last param */
assertImp(0 < numArgs);
lclNum = lvaVarargsHandleArg;
op1 = gtNewLclvNode(lclNum, TYP_I_IMPL DEBUGARG(opcodeOffs + sz + 1));
op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
impPushOnStack(op1, tiRetVal);
break;
case CEE_ENDFINALLY:
if (compIsForInlining())
{
assert(!"Shouldn't have exception handlers in the inliner!");
compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFINALLY);
return;
}
if (verCurrentState.esStackDepth > 0)
{
impEvalSideEffects();
}
if (info.compXcptnsCount == 0)
{
BADCODE("endfinally outside finally");
}
assert(verCurrentState.esStackDepth == 0);
op1 = gtNewOperNode(GT_RETFILT, TYP_VOID, nullptr);
goto APPEND;
case CEE_ENDFILTER:
if (compIsForInlining())
{
assert(!"Shouldn't have exception handlers in the inliner!");
compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFILTER);
return;
}
block->bbSetRunRarely(); // filters are rare
if (info.compXcptnsCount == 0)
{
BADCODE("endfilter outside filter");
}
op1 = impPopStack().val;
assertImp(op1->gtType == TYP_INT);
if (!bbInFilterILRange(block))
{
BADCODE("EndFilter outside a filter handler");
}
/* Mark current bb as end of filter */
assert(compCurBB->bbFlags & BBF_DONT_REMOVE);
assert(compCurBB->bbJumpKind == BBJ_EHFILTERRET);
/* Mark catch handler as successor */
op1 = gtNewOperNode(GT_RETFILT, op1->TypeGet(), op1);
if (verCurrentState.esStackDepth != 0)
{
verRaiseVerifyException(INDEBUG("stack must be 1 on end of filter") DEBUGARG(__FILE__)
DEBUGARG(__LINE__));
}
goto APPEND;
case CEE_RET:
prefixFlags &= ~PREFIX_TAILCALL; // ret without call before it
RET:
if (!impReturnInstruction(prefixFlags, opcode))
{
return; // abort
}
else
{
break;
}
case CEE_JMP:
assert(!compIsForInlining());
if ((info.compFlags & CORINFO_FLG_SYNCH) || block->hasTryIndex() || block->hasHndIndex())
{
/* CEE_JMP does not make sense in some "protected" regions. */
BADCODE("Jmp not allowed in protected region");
}
if (opts.IsReversePInvoke())
{
BADCODE("Jmp not allowed in reverse P/Invoke");
}
if (verCurrentState.esStackDepth != 0)
{
BADCODE("Stack must be empty after CEE_JMPs");
}
_impResolveToken(CORINFO_TOKENKIND_Method);
JITDUMP(" %08X", resolvedToken.token);
/* The signature of the target has to be identical to ours.
At least check that argCnt and returnType match */
eeGetMethodSig(resolvedToken.hMethod, &sig);
if (sig.numArgs != info.compMethodInfo->args.numArgs ||
sig.retType != info.compMethodInfo->args.retType ||
sig.callConv != info.compMethodInfo->args.callConv)
{
BADCODE("Incompatible target for CEE_JMPs");
}
op1 = new (this, GT_JMP) GenTreeVal(GT_JMP, TYP_VOID, (size_t)resolvedToken.hMethod);
/* Mark the basic block as being a JUMP instead of RETURN */
block->bbFlags |= BBF_HAS_JMP;
/* Set this flag to make sure register arguments have a location assigned
* even if we don't use them inside the method */
compJmpOpUsed = true;
fgNoStructPromotion = true;
goto APPEND;
case CEE_LDELEMA:
assertImp(sz == sizeof(unsigned));
_impResolveToken(CORINFO_TOKENKIND_Class);
JITDUMP(" %08X", resolvedToken.token);
ldelemClsHnd = resolvedToken.hClass;
// If it's a value class array we just do a simple address-of
if (eeIsValueClass(ldelemClsHnd))
{
CorInfoType cit = info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd);
if (cit == CORINFO_TYPE_UNDEF)
{
lclTyp = TYP_STRUCT;
}
else
{
lclTyp = JITtype2varType(cit);
}
goto ARR_LD_POST_VERIFY;
}
// Similarly, if its a readonly access, we can do a simple address-of
// without doing a runtime type-check
if (prefixFlags & PREFIX_READONLY)
{
lclTyp = TYP_REF;
goto ARR_LD_POST_VERIFY;
}
// Otherwise we need the full helper function with run-time type check
op1 = impTokenToHandle(&resolvedToken);
if (op1 == nullptr)
{ // compDonotInline()
return;
}
{
GenTreeCall::Use* args = gtNewCallArgs(op1); // Type
args = gtPrependNewCallArg(impPopStack().val, args); // index
args = gtPrependNewCallArg(impPopStack().val, args); // array
op1 = gtNewHelperCallNode(CORINFO_HELP_LDELEMA_REF, TYP_BYREF, args);
}
impPushOnStack(op1, tiRetVal);
break;
// ldelem for reference and value types
case CEE_LDELEM:
assertImp(sz == sizeof(unsigned));
_impResolveToken(CORINFO_TOKENKIND_Class);
JITDUMP(" %08X", resolvedToken.token);
ldelemClsHnd = resolvedToken.hClass;
// If it's a reference type or generic variable type
// then just generate code as though it's a ldelem.ref instruction
if (!eeIsValueClass(ldelemClsHnd))
{
lclTyp = TYP_REF;
opcode = CEE_LDELEM_REF;
}
else
{
CorInfoType jitTyp = info.compCompHnd->asCorInfoType(ldelemClsHnd);
lclTyp = JITtype2varType(jitTyp);
tiRetVal = verMakeTypeInfo(ldelemClsHnd); // precise type always needed for struct
tiRetVal.NormaliseForStack();
}
goto ARR_LD_POST_VERIFY;
case CEE_LDELEM_I1:
lclTyp = TYP_BYTE;
goto ARR_LD;
case CEE_LDELEM_I2:
lclTyp = TYP_SHORT;
goto ARR_LD;
case CEE_LDELEM_I:
lclTyp = TYP_I_IMPL;
goto ARR_LD;
// Should be UINT, but since no platform widens 4->8 bytes it doesn't matter
// and treating it as TYP_INT avoids other asserts.
case CEE_LDELEM_U4:
lclTyp = TYP_INT;
goto ARR_LD;
case CEE_LDELEM_I4:
lclTyp = TYP_INT;
goto ARR_LD;
case CEE_LDELEM_I8:
lclTyp = TYP_LONG;
goto ARR_LD;
case CEE_LDELEM_REF:
lclTyp = TYP_REF;
goto ARR_LD;
case CEE_LDELEM_R4:
lclTyp = TYP_FLOAT;
goto ARR_LD;
case CEE_LDELEM_R8:
lclTyp = TYP_DOUBLE;
goto ARR_LD;
case CEE_LDELEM_U1:
lclTyp = TYP_UBYTE;
goto ARR_LD;
case CEE_LDELEM_U2:
lclTyp = TYP_USHORT;
goto ARR_LD;
ARR_LD:
ARR_LD_POST_VERIFY:
/* Pull the index value and array address */
op2 = impPopStack().val;
op1 = impPopStack().val;
assertImp(op1->gtType == TYP_REF);
/* Check for null pointer - in the inliner case we simply abort */
if (compIsForInlining())
{
if (op1->gtOper == GT_CNS_INT)
{
compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NULL_FOR_LDELEM);
return;
}
}
/* Mark the block as containing an index expression */
if (op1->gtOper == GT_LCL_VAR)
{
if (op2->gtOper == GT_LCL_VAR || op2->gtOper == GT_CNS_INT || op2->gtOper == GT_ADD)
{
block->bbFlags |= BBF_HAS_IDX_LEN;
optMethodFlags |= OMF_HAS_ARRAYREF;
}
}
/* Create the index node and push it on the stack */
op1 = gtNewIndexRef(lclTyp, op1, op2);
ldstruct = (opcode == CEE_LDELEM && lclTyp == TYP_STRUCT);
if ((opcode == CEE_LDELEMA) || ldstruct ||
(ldelemClsHnd != DUMMY_INIT(NULL) && eeIsValueClass(ldelemClsHnd)))
{
assert(ldelemClsHnd != DUMMY_INIT(NULL));
// remember the element size
if (lclTyp == TYP_REF)
{
op1->AsIndex()->gtIndElemSize = TARGET_POINTER_SIZE;
}
else
{
// If ldElemClass is precisely a primitive type, use that, otherwise, preserve the struct type.
if (info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd) == CORINFO_TYPE_UNDEF)
{
op1->AsIndex()->gtStructElemClass = ldelemClsHnd;
}
assert(lclTyp != TYP_STRUCT || op1->AsIndex()->gtStructElemClass != nullptr);
if (lclTyp == TYP_STRUCT)
{
size = info.compCompHnd->getClassSize(ldelemClsHnd);
op1->AsIndex()->gtIndElemSize = size;
op1->gtType = lclTyp;
}
}
if ((opcode == CEE_LDELEMA) || ldstruct)
{
// wrap it in a &
lclTyp = TYP_BYREF;
op1 = gtNewOperNode(GT_ADDR, lclTyp, op1);
}
else
{
assert(lclTyp != TYP_STRUCT);
}
}
if (ldstruct)
{
// Create an OBJ for the result
op1 = gtNewObjNode(ldelemClsHnd, op1);
op1->gtFlags |= GTF_EXCEPT;
}
impPushOnStack(op1, tiRetVal);
break;
// stelem for reference and value types
case CEE_STELEM:
assertImp(sz == sizeof(unsigned));
_impResolveToken(CORINFO_TOKENKIND_Class);
JITDUMP(" %08X", resolvedToken.token);
stelemClsHnd = resolvedToken.hClass;
// If it's a reference type just behave as though it's a stelem.ref instruction
if (!eeIsValueClass(stelemClsHnd))
{
goto STELEM_REF_POST_VERIFY;
}
// Otherwise extract the type
{
CorInfoType jitTyp = info.compCompHnd->asCorInfoType(stelemClsHnd);
lclTyp = JITtype2varType(jitTyp);
goto ARR_ST_POST_VERIFY;
}
case CEE_STELEM_REF:
STELEM_REF_POST_VERIFY:
if (opts.OptimizationEnabled())
{
GenTree* array = impStackTop(2).val;
GenTree* value = impStackTop().val;
// Is this a case where we can skip the covariant store check?
if (impCanSkipCovariantStoreCheck(value, array))
{
lclTyp = TYP_REF;
goto ARR_ST_POST_VERIFY;
}
}
// Else call a helper function to do the assignment
op1 = gtNewHelperCallNode(CORINFO_HELP_ARRADDR_ST, TYP_VOID, impPopCallArgs(3, nullptr));
goto SPILL_APPEND;
case CEE_STELEM_I1:
lclTyp = TYP_BYTE;
goto ARR_ST;
case CEE_STELEM_I2:
lclTyp = TYP_SHORT;
goto ARR_ST;
case CEE_STELEM_I:
lclTyp = TYP_I_IMPL;
goto ARR_ST;
case CEE_STELEM_I4:
lclTyp = TYP_INT;
goto ARR_ST;
case CEE_STELEM_I8:
lclTyp = TYP_LONG;
goto ARR_ST;
case CEE_STELEM_R4:
lclTyp = TYP_FLOAT;
goto ARR_ST;
case CEE_STELEM_R8:
lclTyp = TYP_DOUBLE;
goto ARR_ST;
ARR_ST:
ARR_ST_POST_VERIFY:
/* The strict order of evaluation is LHS-operands, RHS-operands,
range-check, and then assignment. However, codegen currently
does the range-check before evaluation the RHS-operands. So to
maintain strict ordering, we spill the stack. */
if (impStackTop().val->gtFlags & GTF_SIDE_EFFECT)
{
impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
"Strict ordering of exceptions for Array store"));
}
/* Pull the new value from the stack */
op2 = impPopStack().val;
/* Pull the index value */
op1 = impPopStack().val;
/* Pull the array address */
op3 = impPopStack().val;
assertImp(op3->gtType == TYP_REF);
if (op2->IsLocalAddrExpr() != nullptr)
{
op2->gtType = TYP_I_IMPL;
}
// Mark the block as containing an index expression
if (op3->gtOper == GT_LCL_VAR)
{
if (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CNS_INT || op1->gtOper == GT_ADD)
{
block->bbFlags |= BBF_HAS_IDX_LEN;
optMethodFlags |= OMF_HAS_ARRAYREF;
}
}
/* Create the index node */
op1 = gtNewIndexRef(lclTyp, op3, op1);
/* Create the assignment node and append it */
if (lclTyp == TYP_STRUCT)
{
assert(stelemClsHnd != DUMMY_INIT(NULL));
op1->AsIndex()->gtStructElemClass = stelemClsHnd;
op1->AsIndex()->gtIndElemSize = info.compCompHnd->getClassSize(stelemClsHnd);
}
if (varTypeIsStruct(op1))
{
op1 = impAssignStruct(op1, op2, stelemClsHnd, (unsigned)CHECK_SPILL_ALL);
}
else
{
op2 = impImplicitR4orR8Cast(op2, op1->TypeGet());
op1 = gtNewAssignNode(op1, op2);
}
/* Mark the expression as containing an assignment */
op1->gtFlags |= GTF_ASG;
goto SPILL_APPEND;
case CEE_ADD:
oper = GT_ADD;
goto MATH_OP2;
case CEE_ADD_OVF:
uns = false;
goto ADD_OVF;
case CEE_ADD_OVF_UN:
uns = true;
goto ADD_OVF;
ADD_OVF:
ovfl = true;
callNode = false;
oper = GT_ADD;
goto MATH_OP2_FLAGS;
case CEE_SUB:
oper = GT_SUB;
goto MATH_OP2;
case CEE_SUB_OVF:
uns = false;
goto SUB_OVF;
case CEE_SUB_OVF_UN:
uns = true;
goto SUB_OVF;
SUB_OVF:
ovfl = true;
callNode = false;
oper = GT_SUB;
goto MATH_OP2_FLAGS;
case CEE_MUL:
oper = GT_MUL;
goto MATH_MAYBE_CALL_NO_OVF;
case CEE_MUL_OVF:
uns = false;
goto MUL_OVF;
case CEE_MUL_OVF_UN:
uns = true;
goto MUL_OVF;
MUL_OVF:
ovfl = true;
oper = GT_MUL;
goto MATH_MAYBE_CALL_OVF;
// Other binary math operations
case CEE_DIV:
oper = GT_DIV;
goto MATH_MAYBE_CALL_NO_OVF;
case CEE_DIV_UN:
oper = GT_UDIV;
goto MATH_MAYBE_CALL_NO_OVF;
case CEE_REM:
oper = GT_MOD;
goto MATH_MAYBE_CALL_NO_OVF;
case CEE_REM_UN:
oper = GT_UMOD;
goto MATH_MAYBE_CALL_NO_OVF;
MATH_MAYBE_CALL_NO_OVF:
ovfl = false;
MATH_MAYBE_CALL_OVF:
// Morpher has some complex logic about when to turn different
// typed nodes on different platforms into helper calls. We
// need to either duplicate that logic here, or just
// pessimistically make all the nodes large enough to become
// call nodes. Since call nodes aren't that much larger and
// these opcodes are infrequent enough I chose the latter.
callNode = true;
goto MATH_OP2_FLAGS;
case CEE_AND:
oper = GT_AND;
goto MATH_OP2;
case CEE_OR:
oper = GT_OR;
goto MATH_OP2;
case CEE_XOR:
oper = GT_XOR;
goto MATH_OP2;
MATH_OP2: // For default values of 'ovfl' and 'callNode'
ovfl = false;
callNode = false;
MATH_OP2_FLAGS: // If 'ovfl' and 'callNode' have already been set
/* Pull two values and push back the result */
op2 = impPopStack().val;
op1 = impPopStack().val;
/* Can't do arithmetic with references */
assertImp(genActualType(op1->TypeGet()) != TYP_REF && genActualType(op2->TypeGet()) != TYP_REF);
// Change both to TYP_I_IMPL (impBashVarAddrsToI won't change if its a true byref, only
// if it is in the stack)
impBashVarAddrsToI(op1, op2);
type = impGetByRefResultType(oper, uns, &op1, &op2);
assert(!ovfl || !varTypeIsFloating(op1->gtType));
/* Special case: "int+0", "int-0", "int*1", "int/1" */
if (op2->gtOper == GT_CNS_INT)
{
if ((op2->IsIntegralConst(0) && (oper == GT_ADD || oper == GT_SUB)) ||
(op2->IsIntegralConst(1) && (oper == GT_MUL || oper == GT_DIV)))
{
impPushOnStack(op1, tiRetVal);
break;
}
}
// We can generate a TYP_FLOAT operation that has a TYP_DOUBLE operand
//
if (varTypeIsFloating(type) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType))
{
if (op1->TypeGet() != type)
{
// We insert a cast of op1 to 'type'
op1 = gtNewCastNode(type, op1, false, type);
}
if (op2->TypeGet() != type)
{
// We insert a cast of op2 to 'type'
op2 = gtNewCastNode(type, op2, false, type);
}
}
if (callNode)
{
/* These operators can later be transformed into 'GT_CALL' */
assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MUL]);
#ifndef TARGET_ARM
assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_DIV]);
assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UDIV]);
assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MOD]);
assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UMOD]);
#endif
// It's tempting to use LargeOpOpcode() here, but this logic is *not* saying
// that we'll need to transform into a general large node, but rather specifically
// to a call: by doing it this way, things keep working if there are multiple sizes,
// and a CALL is no longer the largest.
// That said, as of now it *is* a large node, so we'll do this with an assert rather
// than an "if".
assert(GenTree::s_gtNodeSizes[GT_CALL] == TREE_NODE_SZ_LARGE);
op1 = new (this, GT_CALL) GenTreeOp(oper, type, op1, op2 DEBUGARG(/*largeNode*/ true));
}
else
{
op1 = gtNewOperNode(oper, type, op1, op2);
}
/* Special case: integer/long division may throw an exception */
if (varTypeIsIntegral(op1->TypeGet()) && op1->OperMayThrow(this))
{
op1->gtFlags |= GTF_EXCEPT;
}
if (ovfl)
{
assert(oper == GT_ADD || oper == GT_SUB || oper == GT_MUL);
if (ovflType != TYP_UNKNOWN)
{
op1->gtType = ovflType;
}
op1->gtFlags |= (GTF_EXCEPT | GTF_OVERFLOW);
if (uns)
{
op1->gtFlags |= GTF_UNSIGNED;
}
}
impPushOnStack(op1, tiRetVal);
break;
case CEE_SHL:
oper = GT_LSH;
goto CEE_SH_OP2;
case CEE_SHR:
oper = GT_RSH;
goto CEE_SH_OP2;
case CEE_SHR_UN:
oper = GT_RSZ;
goto CEE_SH_OP2;
CEE_SH_OP2:
op2 = impPopStack().val;
op1 = impPopStack().val; // operand to be shifted
impBashVarAddrsToI(op1, op2);
type = genActualType(op1->TypeGet());
op1 = gtNewOperNode(oper, type, op1, op2);
impPushOnStack(op1, tiRetVal);
break;
case CEE_NOT:
op1 = impPopStack().val;
impBashVarAddrsToI(op1, nullptr);
type = genActualType(op1->TypeGet());
impPushOnStack(gtNewOperNode(GT_NOT, type, op1), tiRetVal);
break;
case CEE_CKFINITE:
op1 = impPopStack().val;
type = op1->TypeGet();
op1 = gtNewOperNode(GT_CKFINITE, type, op1);
op1->gtFlags |= GTF_EXCEPT;
impPushOnStack(op1, tiRetVal);
break;
case CEE_LEAVE:
val = getI4LittleEndian(codeAddr); // jump distance
jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int32)) + val);
goto LEAVE;
case CEE_LEAVE_S:
val = getI1LittleEndian(codeAddr); // jump distance
jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int8)) + val);
LEAVE:
if (compIsForInlining())
{
compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_LEAVE);
return;
}
JITDUMP(" %04X", jmpAddr);
if (block->bbJumpKind != BBJ_LEAVE)
{
impResetLeaveBlock(block, jmpAddr);
}
assert(jmpAddr == block->bbJumpDest->bbCodeOffs);
impImportLeave(block);
impNoteBranchOffs();
break;
case CEE_BR:
case CEE_BR_S:
jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr);
if (compIsForInlining() && jmpDist == 0)
{
break; /* NOP */
}
impNoteBranchOffs();
break;
case CEE_BRTRUE:
case CEE_BRTRUE_S:
case CEE_BRFALSE:
case CEE_BRFALSE_S:
/* Pop the comparand (now there's a neat term) from the stack */
op1 = impPopStack().val;
type = op1->TypeGet();
// Per Ecma-355, brfalse and brtrue are only specified for nint, ref, and byref.
//
// We've historically been a bit more permissive, so here we allow
// any type that gtNewZeroConNode can handle.
if (!varTypeIsArithmetic(type) && !varTypeIsGC(type))
{
BADCODE("invalid type for brtrue/brfalse");
}
if (opts.OptimizationEnabled() && (block->bbJumpDest == block->bbNext))
{
block->bbJumpKind = BBJ_NONE;
if (op1->gtFlags & GTF_GLOB_EFFECT)
{
op1 = gtUnusedValNode(op1);
goto SPILL_APPEND;
}
else
{
break;
}
}
if (op1->OperIsCompare())
{
if (opcode == CEE_BRFALSE || opcode == CEE_BRFALSE_S)
{
// Flip the sense of the compare
op1 = gtReverseCond(op1);
}
}
else
{
// We'll compare against an equally-sized integer 0
// For small types, we always compare against int
op2 = gtNewZeroConNode(genActualType(op1->gtType));
// Create the comparison operator and try to fold it
oper = (opcode == CEE_BRTRUE || opcode == CEE_BRTRUE_S) ? GT_NE : GT_EQ;
op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
}
// fall through
COND_JUMP:
/* Fold comparison if we can */
op1 = gtFoldExpr(op1);
/* Try to fold the really simple cases like 'iconst *, ifne/ifeq'*/
/* Don't make any blocks unreachable in import only mode */
if ((op1->gtOper == GT_CNS_INT) && !compIsForImportOnly())
{
/* gtFoldExpr() should prevent this as we don't want to make any blocks
unreachable under compDbgCode */
assert(!opts.compDbgCode);
BBjumpKinds foldedJumpKind = (BBjumpKinds)(op1->AsIntCon()->gtIconVal ? BBJ_ALWAYS : BBJ_NONE);
assertImp((block->bbJumpKind == BBJ_COND) // normal case
|| (block->bbJumpKind == foldedJumpKind)); // this can happen if we are reimporting the
// block for the second time
block->bbJumpKind = foldedJumpKind;
#ifdef DEBUG
if (verbose)
{
if (op1->AsIntCon()->gtIconVal)
{
printf("\nThe conditional jump becomes an unconditional jump to " FMT_BB "\n",
block->bbJumpDest->bbNum);
}
else
{
printf("\nThe block falls through into the next " FMT_BB "\n", block->bbNext->bbNum);
}
}
#endif
break;
}
op1 = gtNewOperNode(GT_JTRUE, TYP_VOID, op1);
/* GT_JTRUE is handled specially for non-empty stacks. See 'addStmt'
in impImportBlock(block). For correct line numbers, spill stack. */
if (opts.compDbgCode && impCurStmtDI.IsValid())
{
impSpillStackEnsure(true);
}
goto SPILL_APPEND;
case CEE_CEQ:
oper = GT_EQ;
uns = false;
goto CMP_2_OPs;
case CEE_CGT_UN:
oper = GT_GT;
uns = true;
goto CMP_2_OPs;
case CEE_CGT:
oper = GT_GT;
uns = false;
goto CMP_2_OPs;
case CEE_CLT_UN:
oper = GT_LT;
uns = true;
goto CMP_2_OPs;
case CEE_CLT:
oper = GT_LT;
uns = false;
goto CMP_2_OPs;
CMP_2_OPs:
op2 = impPopStack().val;
op1 = impPopStack().val;
// Recognize the IL idiom of CGT_UN(op1, 0) and normalize
// it so that downstream optimizations don't have to.
if ((opcode == CEE_CGT_UN) && op2->IsIntegralConst(0))
{
oper = GT_NE;
uns = false;
}
#ifdef TARGET_64BIT
// TODO-Casts: create a helper that upcasts int32 -> native int when necessary.
// See also identical code in impGetByRefResultType and STSFLD import.
if (varTypeIsI(op1) && (genActualType(op2) == TYP_INT))
{
op2 = gtNewCastNode(TYP_I_IMPL, op2, uns, TYP_I_IMPL);
}
else if (varTypeIsI(op2) && (genActualType(op1) == TYP_INT))
{
op1 = gtNewCastNode(TYP_I_IMPL, op1, uns, TYP_I_IMPL);
}
#endif // TARGET_64BIT
assertImp(genActualType(op1) == genActualType(op2) || (varTypeIsI(op1) && varTypeIsI(op2)) ||
(varTypeIsFloating(op1) && varTypeIsFloating(op2)));
// Create the comparison node.
op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
// TODO: setting both flags when only one is appropriate.
if (uns)
{
op1->gtFlags |= GTF_RELOP_NAN_UN | GTF_UNSIGNED;
}
// Fold result, if possible.
op1 = gtFoldExpr(op1);
impPushOnStack(op1, tiRetVal);
break;
case CEE_BEQ_S:
case CEE_BEQ:
oper = GT_EQ;
goto CMP_2_OPs_AND_BR;
case CEE_BGE_S:
case CEE_BGE:
oper = GT_GE;
goto CMP_2_OPs_AND_BR;
case CEE_BGE_UN_S:
case CEE_BGE_UN:
oper = GT_GE;
goto CMP_2_OPs_AND_BR_UN;
case CEE_BGT_S:
case CEE_BGT:
oper = GT_GT;
goto CMP_2_OPs_AND_BR;
case CEE_BGT_UN_S:
case CEE_BGT_UN:
oper = GT_GT;
goto CMP_2_OPs_AND_BR_UN;
case CEE_BLE_S:
case CEE_BLE:
oper = GT_LE;
goto CMP_2_OPs_AND_BR;
case CEE_BLE_UN_S:
case CEE_BLE_UN:
oper = GT_LE;
goto CMP_2_OPs_AND_BR_UN;
case CEE_BLT_S:
case CEE_BLT:
oper = GT_LT;
goto CMP_2_OPs_AND_BR;
case CEE_BLT_UN_S:
case CEE_BLT_UN:
oper = GT_LT;
goto CMP_2_OPs_AND_BR_UN;
case CEE_BNE_UN_S:
case CEE_BNE_UN:
oper = GT_NE;
goto CMP_2_OPs_AND_BR_UN;
CMP_2_OPs_AND_BR_UN:
uns = true;
unordered = true;
goto CMP_2_OPs_AND_BR_ALL;
CMP_2_OPs_AND_BR:
uns = false;
unordered = false;
goto CMP_2_OPs_AND_BR_ALL;
CMP_2_OPs_AND_BR_ALL:
/* Pull two values */
op2 = impPopStack().val;
op1 = impPopStack().val;
#ifdef TARGET_64BIT
if ((op1->TypeGet() == TYP_I_IMPL) && (genActualType(op2->TypeGet()) == TYP_INT))
{
op2 = gtNewCastNode(TYP_I_IMPL, op2, uns, uns ? TYP_U_IMPL : TYP_I_IMPL);
}
else if ((op2->TypeGet() == TYP_I_IMPL) && (genActualType(op1->TypeGet()) == TYP_INT))
{
op1 = gtNewCastNode(TYP_I_IMPL, op1, uns, uns ? TYP_U_IMPL : TYP_I_IMPL);
}
#endif // TARGET_64BIT
assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
(varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet())) ||
(varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType)));
if (opts.OptimizationEnabled() && (block->bbJumpDest == block->bbNext))
{
block->bbJumpKind = BBJ_NONE;
if (op1->gtFlags & GTF_GLOB_EFFECT)
{
impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
"Branch to next Optimization, op1 side effect"));
impAppendTree(gtUnusedValNode(op1), (unsigned)CHECK_SPILL_NONE, impCurStmtDI);
}
if (op2->gtFlags & GTF_GLOB_EFFECT)
{
impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
"Branch to next Optimization, op2 side effect"));
impAppendTree(gtUnusedValNode(op2), (unsigned)CHECK_SPILL_NONE, impCurStmtDI);
}
#ifdef DEBUG
if ((op1->gtFlags | op2->gtFlags) & GTF_GLOB_EFFECT)
{
impNoteLastILoffs();
}
#endif
break;
}
// We can generate an compare of different sized floating point op1 and op2
// We insert a cast
//
if (varTypeIsFloating(op1->TypeGet()))
{
if (op1->TypeGet() != op2->TypeGet())
{
assert(varTypeIsFloating(op2->TypeGet()));
// say op1=double, op2=float. To avoid loss of precision
// while comparing, op2 is converted to double and double
// comparison is done.
if (op1->TypeGet() == TYP_DOUBLE)
{
// We insert a cast of op2 to TYP_DOUBLE
op2 = gtNewCastNode(TYP_DOUBLE, op2, false, TYP_DOUBLE);
}
else if (op2->TypeGet() == TYP_DOUBLE)
{
// We insert a cast of op1 to TYP_DOUBLE
op1 = gtNewCastNode(TYP_DOUBLE, op1, false, TYP_DOUBLE);
}
}
}
/* Create and append the operator */
op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
if (uns)
{
op1->gtFlags |= GTF_UNSIGNED;
}
if (unordered)
{
op1->gtFlags |= GTF_RELOP_NAN_UN;
}
goto COND_JUMP;
case CEE_SWITCH:
/* Pop the switch value off the stack */
op1 = impPopStack().val;
assertImp(genActualTypeIsIntOrI(op1->TypeGet()));
/* We can create a switch node */
op1 = gtNewOperNode(GT_SWITCH, TYP_VOID, op1);
val = (int)getU4LittleEndian(codeAddr);
codeAddr += 4 + val * 4; // skip over the switch-table
goto SPILL_APPEND;
/************************** Casting OPCODES ***************************/
case CEE_CONV_OVF_I1:
lclTyp = TYP_BYTE;
goto CONV_OVF;
case CEE_CONV_OVF_I2:
lclTyp = TYP_SHORT;
goto CONV_OVF;
case CEE_CONV_OVF_I:
lclTyp = TYP_I_IMPL;
goto CONV_OVF;
case CEE_CONV_OVF_I4:
lclTyp = TYP_INT;
goto CONV_OVF;
case CEE_CONV_OVF_I8:
lclTyp = TYP_LONG;
goto CONV_OVF;
case CEE_CONV_OVF_U1:
lclTyp = TYP_UBYTE;
goto CONV_OVF;
case CEE_CONV_OVF_U2:
lclTyp = TYP_USHORT;
goto CONV_OVF;
case CEE_CONV_OVF_U:
lclTyp = TYP_U_IMPL;
goto CONV_OVF;
case CEE_CONV_OVF_U4:
lclTyp = TYP_UINT;
goto CONV_OVF;
case CEE_CONV_OVF_U8:
lclTyp = TYP_ULONG;
goto CONV_OVF;
case CEE_CONV_OVF_I1_UN:
lclTyp = TYP_BYTE;
goto CONV_OVF_UN;
case CEE_CONV_OVF_I2_UN:
lclTyp = TYP_SHORT;
goto CONV_OVF_UN;
case CEE_CONV_OVF_I_UN:
lclTyp = TYP_I_IMPL;
goto CONV_OVF_UN;
case CEE_CONV_OVF_I4_UN:
lclTyp = TYP_INT;
goto CONV_OVF_UN;
case CEE_CONV_OVF_I8_UN:
lclTyp = TYP_LONG;
goto CONV_OVF_UN;
case CEE_CONV_OVF_U1_UN:
lclTyp = TYP_UBYTE;
goto CONV_OVF_UN;
case CEE_CONV_OVF_U2_UN:
lclTyp = TYP_USHORT;
goto CONV_OVF_UN;
case CEE_CONV_OVF_U_UN:
lclTyp = TYP_U_IMPL;
goto CONV_OVF_UN;
case CEE_CONV_OVF_U4_UN:
lclTyp = TYP_UINT;
goto CONV_OVF_UN;
case CEE_CONV_OVF_U8_UN:
lclTyp = TYP_ULONG;
goto CONV_OVF_UN;
CONV_OVF_UN:
uns = true;
goto CONV_OVF_COMMON;
CONV_OVF:
uns = false;
goto CONV_OVF_COMMON;
CONV_OVF_COMMON:
ovfl = true;
goto _CONV;
case CEE_CONV_I1:
lclTyp = TYP_BYTE;
goto CONV;
case CEE_CONV_I2:
lclTyp = TYP_SHORT;
goto CONV;
case CEE_CONV_I:
lclTyp = TYP_I_IMPL;
goto CONV;
case CEE_CONV_I4:
lclTyp = TYP_INT;
goto CONV;
case CEE_CONV_I8:
lclTyp = TYP_LONG;
goto CONV;
case CEE_CONV_U1:
lclTyp = TYP_UBYTE;
goto CONV;
case CEE_CONV_U2:
lclTyp = TYP_USHORT;
goto CONV;
#if (REGSIZE_BYTES == 8)
case CEE_CONV_U:
lclTyp = TYP_U_IMPL;
goto CONV_UN;
#else
case CEE_CONV_U:
lclTyp = TYP_U_IMPL;
goto CONV;
#endif
case CEE_CONV_U4:
lclTyp = TYP_UINT;
goto CONV;
case CEE_CONV_U8:
lclTyp = TYP_ULONG;
goto CONV_UN;
case CEE_CONV_R4:
lclTyp = TYP_FLOAT;
goto CONV;
case CEE_CONV_R8:
lclTyp = TYP_DOUBLE;
goto CONV;
case CEE_CONV_R_UN:
lclTyp = TYP_DOUBLE;
goto CONV_UN;
CONV_UN:
uns = true;
ovfl = false;
goto _CONV;
CONV:
uns = false;
ovfl = false;
goto _CONV;
_CONV:
// only converts from FLOAT or DOUBLE to an integer type
// and converts from ULONG (or LONG on ARM) to DOUBLE are morphed to calls
if (varTypeIsFloating(lclTyp))
{
callNode = varTypeIsLong(impStackTop().val) || uns // uint->dbl gets turned into uint->long->dbl
#ifdef TARGET_64BIT
// TODO-ARM64-Bug?: This was AMD64; I enabled it for ARM64 also. OK?
// TYP_BYREF could be used as TYP_I_IMPL which is long.
// TODO-CQ: remove this when we lower casts long/ulong --> float/double
// and generate SSE2 code instead of going through helper calls.
|| (impStackTop().val->TypeGet() == TYP_BYREF)
#endif
;
}
else
{
callNode = varTypeIsFloating(impStackTop().val->TypeGet());
}
op1 = impPopStack().val;
impBashVarAddrsToI(op1);
// Casts from floating point types must not have GTF_UNSIGNED set.
if (varTypeIsFloating(op1))
{
uns = false;
}
// At this point uns, ovf, callNode are all set.
if (varTypeIsSmall(lclTyp) && !ovfl && op1->gtType == TYP_INT && op1->gtOper == GT_AND)
{
op2 = op1->AsOp()->gtOp2;
if (op2->gtOper == GT_CNS_INT)
{
ssize_t ival = op2->AsIntCon()->gtIconVal;
ssize_t mask, umask;
switch (lclTyp)
{
case TYP_BYTE:
case TYP_UBYTE:
mask = 0x00FF;
umask = 0x007F;
break;
case TYP_USHORT:
case TYP_SHORT:
mask = 0xFFFF;
umask = 0x7FFF;
break;
default:
assert(!"unexpected type");
return;
}
if (((ival & umask) == ival) || ((ival & mask) == ival && uns))
{
/* Toss the cast, it's a waste of time */
impPushOnStack(op1, tiRetVal);
break;
}
else if (ival == mask)
{
/* Toss the masking, it's a waste of time, since
we sign-extend from the small value anyways */
op1 = op1->AsOp()->gtOp1;
}
}
}
/* The 'op2' sub-operand of a cast is the 'real' type number,
since the result of a cast to one of the 'small' integer
types is an integer.
*/
type = genActualType(lclTyp);
// If this is a no-op cast, just use op1.
if (!ovfl && (type == op1->TypeGet()) && (genTypeSize(type) == genTypeSize(lclTyp)))
{
// Nothing needs to change
}
// Work is evidently required, add cast node
else
{
if (callNode)
{
op1 = gtNewCastNodeL(type, op1, uns, lclTyp);
}
else
{
op1 = gtNewCastNode(type, op1, uns, lclTyp);
}
if (ovfl)
{
op1->gtFlags |= (GTF_OVERFLOW | GTF_EXCEPT);
}
if (op1->gtGetOp1()->OperIsConst() && opts.OptimizationEnabled())
{
// Try and fold the introduced cast
op1 = gtFoldExprConst(op1);
}
}
impPushOnStack(op1, tiRetVal);
break;
case CEE_NEG:
op1 = impPopStack().val;
impBashVarAddrsToI(op1, nullptr);
impPushOnStack(gtNewOperNode(GT_NEG, genActualType(op1->gtType), op1), tiRetVal);
break;
case CEE_POP:
{
/* Pull the top value from the stack */
StackEntry se = impPopStack();
clsHnd = se.seTypeInfo.GetClassHandle();
op1 = se.val;
/* Get hold of the type of the value being duplicated */
lclTyp = genActualType(op1->gtType);
/* Does the value have any side effects? */
if ((op1->gtFlags & GTF_SIDE_EFFECT) || opts.compDbgCode)
{
// Since we are throwing away the value, just normalize
// it to its address. This is more efficient.
if (varTypeIsStruct(op1))
{
JITDUMP("\n ... CEE_POP struct ...\n");
DISPTREE(op1);
#ifdef UNIX_AMD64_ABI
// Non-calls, such as obj or ret_expr, have to go through this.
// Calls with large struct return value have to go through this.
// Helper calls with small struct return value also have to go
// through this since they do not follow Unix calling convention.
if (op1->gtOper != GT_CALL ||
!IsMultiRegReturnedType(clsHnd, op1->AsCall()->GetUnmanagedCallConv()) ||
op1->AsCall()->gtCallType == CT_HELPER)
#endif // UNIX_AMD64_ABI
{
// If the value being produced comes from loading
// via an underlying address, just null check the address.
if (op1->OperIs(GT_FIELD, GT_IND, GT_OBJ))
{
gtChangeOperToNullCheck(op1, block);
}
else
{
op1 = impGetStructAddr(op1, clsHnd, (unsigned)CHECK_SPILL_ALL, false);
}
JITDUMP("\n ... optimized to ...\n");
DISPTREE(op1);
}
}
// If op1 is non-overflow cast, throw it away since it is useless.
// Another reason for throwing away the useless cast is in the context of
// implicit tail calls when the operand of pop is GT_CAST(GT_CALL(..)).
// The cast gets added as part of importing GT_CALL, which gets in the way
// of fgMorphCall() on the forms of tail call nodes that we assert.
if ((op1->gtOper == GT_CAST) && !op1->gtOverflow())
{
op1 = op1->AsOp()->gtOp1;
}
if (op1->gtOper != GT_CALL)
{
if ((op1->gtFlags & GTF_SIDE_EFFECT) != 0)
{
op1 = gtUnusedValNode(op1);
}
else
{
// Can't bash to NOP here because op1 can be referenced from `currentBlock->bbEntryState`,
// if we ever need to reimport we need a valid LCL_VAR on it.
op1 = gtNewNothingNode();
}
}
/* Append the value to the tree list */
goto SPILL_APPEND;
}
/* No side effects - just throw the <BEEP> thing away */
}
break;
case CEE_DUP:
{
StackEntry se = impPopStack();
GenTree* tree = se.val;
tiRetVal = se.seTypeInfo;
op1 = tree;
// If the expression to dup is simple, just clone it.
// Otherwise spill it to a temp, and reload the temp twice.
bool cloneExpr = false;
if (!opts.compDbgCode)
{
// Duplicate 0 and +0.0
if (op1->IsIntegralConst(0) || op1->IsFloatPositiveZero())
{
cloneExpr = true;
}
// Duplicate locals and addresses of them
else if (op1->IsLocal())
{
cloneExpr = true;
}
else if (op1->TypeIs(TYP_BYREF) && op1->OperIs(GT_ADDR) && op1->gtGetOp1()->IsLocal() &&
(OPCODE)impGetNonPrefixOpcode(codeAddr + sz, codeEndp) != CEE_INITOBJ)
{
cloneExpr = true;
}
}
else
{
// Always clone for debug mode
cloneExpr = true;
}
if (!cloneExpr)
{
const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("dup spill"));
impAssignTempGen(tmpNum, op1, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL);
var_types type = genActualType(lvaTable[tmpNum].TypeGet());
op1 = gtNewLclvNode(tmpNum, type);
// Propagate type info to the temp from the stack and the original tree
if (type == TYP_REF)
{
assert(lvaTable[tmpNum].lvSingleDef == 0);
lvaTable[tmpNum].lvSingleDef = 1;
JITDUMP("Marked V%02u as a single def local\n", tmpNum);
lvaSetClass(tmpNum, tree, tiRetVal.GetClassHandle());
}
}
op1 = impCloneExpr(op1, &op2, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("DUP instruction"));
assert(!(op1->gtFlags & GTF_GLOB_EFFECT) && !(op2->gtFlags & GTF_GLOB_EFFECT));
impPushOnStack(op1, tiRetVal);
impPushOnStack(op2, tiRetVal);
}
break;
case CEE_STIND_I1:
lclTyp = TYP_BYTE;
goto STIND;
case CEE_STIND_I2:
lclTyp = TYP_SHORT;
goto STIND;
case CEE_STIND_I4:
lclTyp = TYP_INT;
goto STIND;
case CEE_STIND_I8:
lclTyp = TYP_LONG;
goto STIND;
case CEE_STIND_I:
lclTyp = TYP_I_IMPL;
goto STIND;
case CEE_STIND_REF:
lclTyp = TYP_REF;
goto STIND;
case CEE_STIND_R4:
lclTyp = TYP_FLOAT;
goto STIND;
case CEE_STIND_R8:
lclTyp = TYP_DOUBLE;
goto STIND;
STIND:
op2 = impPopStack().val; // value to store
op1 = impPopStack().val; // address to store to
// you can indirect off of a TYP_I_IMPL (if we are in C) or a BYREF
assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
impBashVarAddrsToI(op1, op2);
op2 = impImplicitR4orR8Cast(op2, lclTyp);
#ifdef TARGET_64BIT
// Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
{
op2->gtType = TYP_I_IMPL;
}
else
{
// Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
//
if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
{
op2 = gtNewCastNode(TYP_INT, op2, false, TYP_INT);
}
// Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
//
if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
{
op2 = gtNewCastNode(TYP_I_IMPL, op2, false, TYP_I_IMPL);
}
}
#endif // TARGET_64BIT
if (opcode == CEE_STIND_REF)
{
// STIND_REF can be used to store TYP_INT, TYP_I_IMPL, TYP_REF, or TYP_BYREF
assertImp(varTypeIsIntOrI(op2->gtType) || varTypeIsGC(op2->gtType));
lclTyp = genActualType(op2->TypeGet());
}
// Check target type.
#ifdef DEBUG
if (op2->gtType == TYP_BYREF || lclTyp == TYP_BYREF)
{
if (op2->gtType == TYP_BYREF)
{
assertImp(lclTyp == TYP_BYREF || lclTyp == TYP_I_IMPL);
}
else if (lclTyp == TYP_BYREF)
{
assertImp(op2->gtType == TYP_BYREF || varTypeIsIntOrI(op2->gtType));
}
}
else
{
assertImp(genActualType(op2->gtType) == genActualType(lclTyp) ||
((lclTyp == TYP_I_IMPL) && (genActualType(op2->gtType) == TYP_INT)) ||
(varTypeIsFloating(op2->gtType) && varTypeIsFloating(lclTyp)));
}
#endif
op1 = gtNewOperNode(GT_IND, lclTyp, op1);
// stind could point anywhere, example a boxed class static int
op1->gtFlags |= GTF_IND_TGTANYWHERE;
if (prefixFlags & PREFIX_VOLATILE)
{
assert(op1->OperGet() == GT_IND);
op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
op1->gtFlags |= GTF_IND_VOLATILE;
}
if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
{
assert(op1->OperGet() == GT_IND);
op1->gtFlags |= GTF_IND_UNALIGNED;
}
op1 = gtNewAssignNode(op1, op2);
op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
// Spill side-effects AND global-data-accesses
if (verCurrentState.esStackDepth > 0)
{
impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STIND"));
}
goto APPEND;
case CEE_LDIND_I1:
lclTyp = TYP_BYTE;
goto LDIND;
case CEE_LDIND_I2:
lclTyp = TYP_SHORT;
goto LDIND;
case CEE_LDIND_U4:
case CEE_LDIND_I4:
lclTyp = TYP_INT;
goto LDIND;
case CEE_LDIND_I8:
lclTyp = TYP_LONG;
goto LDIND;
case CEE_LDIND_REF:
lclTyp = TYP_REF;
goto LDIND;
case CEE_LDIND_I:
lclTyp = TYP_I_IMPL;
goto LDIND;
case CEE_LDIND_R4:
lclTyp = TYP_FLOAT;
goto LDIND;
case CEE_LDIND_R8:
lclTyp = TYP_DOUBLE;
goto LDIND;
case CEE_LDIND_U1:
lclTyp = TYP_UBYTE;
goto LDIND;
case CEE_LDIND_U2:
lclTyp = TYP_USHORT;
goto LDIND;
LDIND:
op1 = impPopStack().val; // address to load from
impBashVarAddrsToI(op1);
#ifdef TARGET_64BIT
// Allow an upcast of op1 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
//
if (genActualType(op1->gtType) == TYP_INT)
{
op1 = gtNewCastNode(TYP_I_IMPL, op1, false, TYP_I_IMPL);
}
#endif
assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
op1 = gtNewOperNode(GT_IND, lclTyp, op1);
// ldind could point anywhere, example a boxed class static int
op1->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
if (prefixFlags & PREFIX_VOLATILE)
{
assert(op1->OperGet() == GT_IND);
op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
op1->gtFlags |= GTF_IND_VOLATILE;
}
if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
{
assert(op1->OperGet() == GT_IND);
op1->gtFlags |= GTF_IND_UNALIGNED;
}
impPushOnStack(op1, tiRetVal);
break;
case CEE_UNALIGNED:
assert(sz == 1);
val = getU1LittleEndian(codeAddr);
++codeAddr;
JITDUMP(" %u", val);
if ((val != 1) && (val != 2) && (val != 4))
{
BADCODE("Alignment unaligned. must be 1, 2, or 4");
}
Verify(!(prefixFlags & PREFIX_UNALIGNED), "Multiple unaligned. prefixes");
prefixFlags |= PREFIX_UNALIGNED;
impValidateMemoryAccessOpcode(codeAddr, codeEndp, false);
PREFIX:
opcode = (OPCODE)getU1LittleEndian(codeAddr);
opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
codeAddr += sizeof(__int8);
goto DECODE_OPCODE;
case CEE_VOLATILE:
Verify(!(prefixFlags & PREFIX_VOLATILE), "Multiple volatile. prefixes");
prefixFlags |= PREFIX_VOLATILE;
impValidateMemoryAccessOpcode(codeAddr, codeEndp, true);
assert(sz == 0);
goto PREFIX;
case CEE_LDFTN:
{
// Need to do a lookup here so that we perform an access check
// and do a NOWAY if protections are violated
_impResolveToken(CORINFO_TOKENKIND_Method);
JITDUMP(" %08X", resolvedToken.token);
eeGetCallInfo(&resolvedToken, (prefixFlags & PREFIX_CONSTRAINED) ? &constrainedResolvedToken : nullptr,
combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN), &callInfo);
// This check really only applies to intrinsic Array.Address methods
if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
{
NO_WAY("Currently do not support LDFTN of Parameterized functions");
}
// Do this before DO_LDFTN since CEE_LDVIRTFN does it on its own.
impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
DO_LDFTN:
op1 = impMethodPointer(&resolvedToken, &callInfo);
if (compDonotInline())
{
return;
}
// Call info may have more precise information about the function than
// the resolved token.
mdToken constrainedToken = prefixFlags & PREFIX_CONSTRAINED ? constrainedResolvedToken.token : 0;
methodPointerInfo* heapToken = impAllocateMethodPointerInfo(resolvedToken, constrainedToken);
assert(callInfo.hMethod != nullptr);
heapToken->m_token.hMethod = callInfo.hMethod;
impPushOnStack(op1, typeInfo(heapToken));
break;
}
case CEE_LDVIRTFTN:
{
/* Get the method token */
_impResolveToken(CORINFO_TOKENKIND_Method);
JITDUMP(" %08X", resolvedToken.token);
eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef */,
combine(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN),
CORINFO_CALLINFO_CALLVIRT),
&callInfo);
// This check really only applies to intrinsic Array.Address methods
if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
{
NO_WAY("Currently do not support LDFTN of Parameterized functions");
}
mflags = callInfo.methodFlags;
impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
if (compIsForInlining())
{
if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
{
compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDVIRTFN_ON_NON_VIRTUAL);
return;
}
}
CORINFO_SIG_INFO& ftnSig = callInfo.sig;
/* Get the object-ref */
op1 = impPopStack().val;
assertImp(op1->gtType == TYP_REF);
if (opts.IsReadyToRun())
{
if (callInfo.kind != CORINFO_VIRTUALCALL_LDVIRTFTN)
{
if (op1->gtFlags & GTF_SIDE_EFFECT)
{
op1 = gtUnusedValNode(op1);
impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI);
}
goto DO_LDFTN;
}
}
else if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
{
if (op1->gtFlags & GTF_SIDE_EFFECT)
{
op1 = gtUnusedValNode(op1);
impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI);
}
goto DO_LDFTN;
}
GenTree* fptr = impImportLdvirtftn(op1, &resolvedToken, &callInfo);
if (compDonotInline())
{
return;
}
methodPointerInfo* heapToken = impAllocateMethodPointerInfo(resolvedToken, 0);
assert(heapToken->m_token.tokenType == CORINFO_TOKENKIND_Method);
assert(callInfo.hMethod != nullptr);
heapToken->m_token.tokenType = CORINFO_TOKENKIND_Ldvirtftn;
heapToken->m_token.hMethod = callInfo.hMethod;
impPushOnStack(fptr, typeInfo(heapToken));
break;
}
case CEE_CONSTRAINED:
assertImp(sz == sizeof(unsigned));
impResolveToken(codeAddr, &constrainedResolvedToken, CORINFO_TOKENKIND_Constrained);
codeAddr += sizeof(unsigned); // prefix instructions must increment codeAddr manually
JITDUMP(" (%08X) ", constrainedResolvedToken.token);
Verify(!(prefixFlags & PREFIX_CONSTRAINED), "Multiple constrained. prefixes");
prefixFlags |= PREFIX_CONSTRAINED;
{
OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
if (actualOpcode != CEE_CALLVIRT && actualOpcode != CEE_CALL && actualOpcode != CEE_LDFTN)
{
BADCODE("constrained. has to be followed by callvirt, call or ldftn");
}
}
goto PREFIX;
case CEE_READONLY:
JITDUMP(" readonly.");
Verify(!(prefixFlags & PREFIX_READONLY), "Multiple readonly. prefixes");
prefixFlags |= PREFIX_READONLY;
{
OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
if (actualOpcode != CEE_LDELEMA && !impOpcodeIsCallOpcode(actualOpcode))
{
BADCODE("readonly. has to be followed by ldelema or call");
}
}
assert(sz == 0);
goto PREFIX;
case CEE_TAILCALL:
JITDUMP(" tail.");
Verify(!(prefixFlags & PREFIX_TAILCALL_EXPLICIT), "Multiple tailcall. prefixes");
prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
{
OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
if (!impOpcodeIsCallOpcode(actualOpcode))
{
BADCODE("tailcall. has to be followed by call, callvirt or calli");
}
}
assert(sz == 0);
goto PREFIX;
case CEE_NEWOBJ:
/* Since we will implicitly insert newObjThisPtr at the start of the
argument list, spill any GTF_ORDER_SIDEEFF */
impSpillSpecialSideEff();
/* NEWOBJ does not respond to TAIL */
prefixFlags &= ~PREFIX_TAILCALL_EXPLICIT;
/* NEWOBJ does not respond to CONSTRAINED */
prefixFlags &= ~PREFIX_CONSTRAINED;
_impResolveToken(CORINFO_TOKENKIND_NewObj);
eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_ALLOWINSTPARAM), &callInfo);
mflags = callInfo.methodFlags;
if ((mflags & (CORINFO_FLG_STATIC | CORINFO_FLG_ABSTRACT)) != 0)
{
BADCODE("newobj on static or abstract method");
}
// Insert the security callout before any actual code is generated
impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
// There are three different cases for new
// Object size is variable (depends on arguments)
// 1) Object is an array (arrays treated specially by the EE)
// 2) Object is some other variable sized object (e.g. String)
// 3) Class Size can be determined beforehand (normal case)
// In the first case, we need to call a NEWOBJ helper (multinewarray)
// in the second case we call the constructor with a '0' this pointer
// In the third case we alloc the memory, then call the constuctor
clsFlags = callInfo.classFlags;
if (clsFlags & CORINFO_FLG_ARRAY)
{
// Arrays need to call the NEWOBJ helper.
assertImp(clsFlags & CORINFO_FLG_VAROBJSIZE);
impImportNewObjArray(&resolvedToken, &callInfo);
if (compDonotInline())
{
return;
}
callTyp = TYP_REF;
break;
}
// At present this can only be String
else if (clsFlags & CORINFO_FLG_VAROBJSIZE)
{
// Skip this thisPtr argument
newObjThisPtr = nullptr;
/* Remember that this basic block contains 'new' of an object */
block->bbFlags |= BBF_HAS_NEWOBJ;
optMethodFlags |= OMF_HAS_NEWOBJ;
}
else
{
// This is the normal case where the size of the object is
// fixed. Allocate the memory and call the constructor.
// Note: We cannot add a peep to avoid use of temp here
// becase we don't have enough interference info to detect when
// sources and destination interfere, example: s = new S(ref);
// TODO: We find the correct place to introduce a general
// reverse copy prop for struct return values from newobj or
// any function returning structs.
/* get a temporary for the new object */
lclNum = lvaGrabTemp(true DEBUGARG("NewObj constructor temp"));
if (compDonotInline())
{
// Fail fast if lvaGrabTemp fails with CALLSITE_TOO_MANY_LOCALS.
assert(compInlineResult->GetObservation() == InlineObservation::CALLSITE_TOO_MANY_LOCALS);
return;
}
// In the value class case we only need clsHnd for size calcs.
//
// The lookup of the code pointer will be handled by CALL in this case
if (clsFlags & CORINFO_FLG_VALUECLASS)
{
if (compIsForInlining())
{
// If value class has GC fields, inform the inliner. It may choose to
// bail out on the inline.
DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
{
compInlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
if (compInlineResult->IsFailure())
{
return;
}
// Do further notification in the case where the call site is rare;
// some policies do not track the relative hotness of call sites for
// "always" inline cases.
if (impInlineInfo->iciBlock->isRunRarely())
{
compInlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
if (compInlineResult->IsFailure())
{
return;
}
}
}
}
CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
if (impIsPrimitive(jitTyp))
{
lvaTable[lclNum].lvType = JITtype2varType(jitTyp);
}
else
{
// The local variable itself is the allocated space.
// Here we need unsafe value cls check, since the address of struct is taken for further use
// and potentially exploitable.
lvaSetStruct(lclNum, resolvedToken.hClass, true /* unsafe value cls check */);
}
bool bbInALoop = impBlockIsInALoop(block);
bool bbIsReturn = (block->bbJumpKind == BBJ_RETURN) &&
(!compIsForInlining() || (impInlineInfo->iciBlock->bbJumpKind == BBJ_RETURN));
LclVarDsc* const lclDsc = lvaGetDesc(lclNum);
if (fgVarNeedsExplicitZeroInit(lclNum, bbInALoop, bbIsReturn))
{
// Append a tree to zero-out the temp
newObjThisPtr = gtNewLclvNode(lclNum, lclDsc->TypeGet());
newObjThisPtr = gtNewBlkOpNode(newObjThisPtr, // Dest
gtNewIconNode(0), // Value
false, // isVolatile
false); // not copyBlock
impAppendTree(newObjThisPtr, (unsigned)CHECK_SPILL_NONE, impCurStmtDI);
}
else
{
JITDUMP("\nSuppressing zero-init for V%02u -- expect to zero in prolog\n", lclNum);
lclDsc->lvSuppressedZeroInit = 1;
compSuppressedZeroInit = true;
}
// Obtain the address of the temp
newObjThisPtr =
gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet()));
}
else
{
// If we're newing up a finalizable object, spill anything that can cause exceptions.
//
bool hasSideEffects = false;
CorInfoHelpFunc newHelper =
info.compCompHnd->getNewHelper(&resolvedToken, info.compMethodHnd, &hasSideEffects);
if (hasSideEffects)
{
JITDUMP("\nSpilling stack for finalizable newobj\n");
impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("finalizable newobj spill"));
}
const bool useParent = true;
op1 = gtNewAllocObjNode(&resolvedToken, useParent);
if (op1 == nullptr)
{
return;
}
// Remember that this basic block contains 'new' of an object
block->bbFlags |= BBF_HAS_NEWOBJ;
optMethodFlags |= OMF_HAS_NEWOBJ;
// Append the assignment to the temp/local. Dont need to spill
// at all as we are just calling an EE-Jit helper which can only
// cause an (async) OutOfMemoryException.
// We assign the newly allocated object (by a GT_ALLOCOBJ node)
// to a temp. Note that the pattern "temp = allocObj" is required
// by ObjectAllocator phase to be able to determine GT_ALLOCOBJ nodes
// without exhaustive walk over all expressions.
impAssignTempGen(lclNum, op1, (unsigned)CHECK_SPILL_NONE);
assert(lvaTable[lclNum].lvSingleDef == 0);
lvaTable[lclNum].lvSingleDef = 1;
JITDUMP("Marked V%02u as a single def local\n", lclNum);
lvaSetClass(lclNum, resolvedToken.hClass, true /* is Exact */);
newObjThisPtr = gtNewLclvNode(lclNum, TYP_REF);
}
}
goto CALL;
case CEE_CALLI:
/* CALLI does not respond to CONSTRAINED */
prefixFlags &= ~PREFIX_CONSTRAINED;
FALLTHROUGH;
case CEE_CALLVIRT:
case CEE_CALL:
// We can't call getCallInfo on the token from a CALLI, but we need it in
// many other places. We unfortunately embed that knowledge here.
if (opcode != CEE_CALLI)
{
_impResolveToken(CORINFO_TOKENKIND_Method);
eeGetCallInfo(&resolvedToken,
(prefixFlags & PREFIX_CONSTRAINED) ? &constrainedResolvedToken : nullptr,
// this is how impImportCall invokes getCallInfo
combine(combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS),
(opcode == CEE_CALLVIRT) ? CORINFO_CALLINFO_CALLVIRT : CORINFO_CALLINFO_NONE),
&callInfo);
}
else
{
// Suppress uninitialized use warning.
memset(&resolvedToken, 0, sizeof(resolvedToken));
memset(&callInfo, 0, sizeof(callInfo));
resolvedToken.token = getU4LittleEndian(codeAddr);
resolvedToken.tokenContext = impTokenLookupContextHandle;
resolvedToken.tokenScope = info.compScopeHnd;
}
CALL: // memberRef should be set.
// newObjThisPtr should be set for CEE_NEWOBJ
JITDUMP(" %08X", resolvedToken.token);
constraintCall = (prefixFlags & PREFIX_CONSTRAINED) != 0;
bool newBBcreatedForTailcallStress;
bool passedStressModeValidation;
newBBcreatedForTailcallStress = false;
passedStressModeValidation = true;
if (compIsForInlining())
{
if (compDonotInline())
{
return;
}
// We rule out inlinees with explicit tail calls in fgMakeBasicBlocks.
assert((prefixFlags & PREFIX_TAILCALL_EXPLICIT) == 0);
}
else
{
if (compTailCallStress())
{
// Have we created a new BB after the "call" instruction in fgMakeBasicBlocks()?
// Tail call stress only recognizes call+ret patterns and forces them to be
// explicit tail prefixed calls. Also fgMakeBasicBlocks() under tail call stress
// doesn't import 'ret' opcode following the call into the basic block containing
// the call instead imports it to a new basic block. Note that fgMakeBasicBlocks()
// is already checking that there is an opcode following call and hence it is
// safe here to read next opcode without bounds check.
newBBcreatedForTailcallStress =
impOpcodeIsCallOpcode(opcode) && // Current opcode is a CALL, (not a CEE_NEWOBJ). So, don't
// make it jump to RET.
(OPCODE)getU1LittleEndian(codeAddr + sz) == CEE_RET; // Next opcode is a CEE_RET
bool hasTailPrefix = (prefixFlags & PREFIX_TAILCALL_EXPLICIT);
if (newBBcreatedForTailcallStress && !hasTailPrefix)
{
// Do a more detailed evaluation of legality
const bool returnFalseIfInvalid = true;
const bool passedConstraintCheck =
verCheckTailCallConstraint(opcode, &resolvedToken,
constraintCall ? &constrainedResolvedToken : nullptr,
returnFalseIfInvalid);
if (passedConstraintCheck)
{
// Now check with the runtime
CORINFO_METHOD_HANDLE declaredCalleeHnd = callInfo.hMethod;
bool isVirtual = (callInfo.kind == CORINFO_VIRTUALCALL_STUB) ||
(callInfo.kind == CORINFO_VIRTUALCALL_VTABLE);
CORINFO_METHOD_HANDLE exactCalleeHnd = isVirtual ? nullptr : declaredCalleeHnd;
if (info.compCompHnd->canTailCall(info.compMethodHnd, declaredCalleeHnd, exactCalleeHnd,
hasTailPrefix)) // Is it legal to do tailcall?
{
// Stress the tailcall.
JITDUMP(" (Tailcall stress: prefixFlags |= PREFIX_TAILCALL_EXPLICIT)");
prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
prefixFlags |= PREFIX_TAILCALL_STRESS;
}
else
{
// Runtime disallows this tail call
JITDUMP(" (Tailcall stress: runtime preventing tailcall)");
passedStressModeValidation = false;
}
}
else
{
// Constraints disallow this tail call
JITDUMP(" (Tailcall stress: constraint check failed)");
passedStressModeValidation = false;
}
}
}
}
// This is split up to avoid goto flow warnings.
bool isRecursive;
isRecursive = !compIsForInlining() && (callInfo.hMethod == info.compMethodHnd);
// If we've already disqualified this call as a tail call under tail call stress,
// don't consider it for implicit tail calling either.
//
// When not running under tail call stress, we may mark this call as an implicit
// tail call candidate. We'll do an "equivalent" validation during impImportCall.
//
// Note that when running under tail call stress, a call marked as explicit
// tail prefixed will not be considered for implicit tail calling.
if (passedStressModeValidation &&
impIsImplicitTailCallCandidate(opcode, codeAddr + sz, codeEndp, prefixFlags, isRecursive))
{
if (compIsForInlining())
{
#if FEATURE_TAILCALL_OPT_SHARED_RETURN
// Are we inlining at an implicit tail call site? If so the we can flag
// implicit tail call sites in the inline body. These call sites
// often end up in non BBJ_RETURN blocks, so only flag them when
// we're able to handle shared returns.
if (impInlineInfo->iciCall->IsImplicitTailCall())
{
JITDUMP("\n (Inline Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
}
#endif // FEATURE_TAILCALL_OPT_SHARED_RETURN
}
else
{
JITDUMP("\n (Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
}
}
// Treat this call as tail call for verification only if "tail" prefixed (i.e. explicit tail call).
explicitTailCall = (prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0;
readonlyCall = (prefixFlags & PREFIX_READONLY) != 0;
if (opcode != CEE_CALLI && opcode != CEE_NEWOBJ)
{
// All calls and delegates need a security callout.
// For delegates, this is the call to the delegate constructor, not the access check on the
// LD(virt)FTN.
impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
}
callTyp = impImportCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
newObjThisPtr, prefixFlags, &callInfo, opcodeOffs);
if (compDonotInline())
{
// We do not check fails after lvaGrabTemp. It is covered with CoreCLR_13272 issue.
assert((callTyp == TYP_UNDEF) ||
(compInlineResult->GetObservation() == InlineObservation::CALLSITE_TOO_MANY_LOCALS));
return;
}
if (explicitTailCall || newBBcreatedForTailcallStress) // If newBBcreatedForTailcallStress is true, we
// have created a new BB after the "call"
// instruction in fgMakeBasicBlocks(). So we need to jump to RET regardless.
{
assert(!compIsForInlining());
goto RET;
}
break;
case CEE_LDFLD:
case CEE_LDSFLD:
case CEE_LDFLDA:
case CEE_LDSFLDA:
{
bool isLoadAddress = (opcode == CEE_LDFLDA || opcode == CEE_LDSFLDA);
bool isLoadStatic = (opcode == CEE_LDSFLD || opcode == CEE_LDSFLDA);
/* Get the CP_Fieldref index */
assertImp(sz == sizeof(unsigned));
_impResolveToken(CORINFO_TOKENKIND_Field);
JITDUMP(" %08X", resolvedToken.token);
int aflags = isLoadAddress ? CORINFO_ACCESS_ADDRESS : CORINFO_ACCESS_GET;
GenTree* obj = nullptr;
typeInfo* tiObj = nullptr;
CORINFO_CLASS_HANDLE objType = nullptr; // used for fields
if (opcode == CEE_LDFLD || opcode == CEE_LDFLDA)
{
tiObj = &impStackTop().seTypeInfo;
StackEntry se = impPopStack();
objType = se.seTypeInfo.GetClassHandle();
obj = se.val;
if (impIsThis(obj))
{
aflags |= CORINFO_ACCESS_THIS;
}
}
eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
// Figure out the type of the member. We always call canAccessField, so you always need this
// handle
CorInfoType ciType = fieldInfo.fieldType;
clsHnd = fieldInfo.structType;
lclTyp = JITtype2varType(ciType);
if (compIsForInlining())
{
switch (fieldInfo.fieldAccessor)
{
case CORINFO_FIELD_INSTANCE_HELPER:
case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
case CORINFO_FIELD_STATIC_ADDR_HELPER:
case CORINFO_FIELD_STATIC_TLS:
compInlineResult->NoteFatal(InlineObservation::CALLEE_LDFLD_NEEDS_HELPER);
return;
case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
/* We may be able to inline the field accessors in specific instantiations of generic
* methods */
compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDFLD_NEEDS_HELPER);
return;
default:
break;
}
if (!isLoadAddress && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && lclTyp == TYP_STRUCT &&
clsHnd)
{
if ((info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd) == CORINFO_TYPE_UNDEF) &&
!(info.compFlags & CORINFO_FLG_FORCEINLINE))
{
// Loading a static valuetype field usually will cause a JitHelper to be called
// for the static base. This will bloat the code.
compInlineResult->Note(InlineObservation::CALLEE_LDFLD_STATIC_VALUECLASS);
if (compInlineResult->IsFailure())
{
return;
}
}
}
}
tiRetVal = verMakeTypeInfo(ciType, clsHnd);
if (isLoadAddress)
{
tiRetVal.MakeByRef();
}
else
{
tiRetVal.NormaliseForStack();
}
// Perform this check always to ensure that we get field access exceptions even with
// SkipVerification.
impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
// Raise InvalidProgramException if static load accesses non-static field
if (isLoadStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
{
BADCODE("static access on an instance field");
}
// We are using ldfld/a on a static field. We allow it, but need to get side-effect from obj.
if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
{
if (obj->gtFlags & GTF_SIDE_EFFECT)
{
obj = gtUnusedValNode(obj);
impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtDI);
}
obj = nullptr;
}
/* Preserve 'small' int types */
if (!varTypeIsSmall(lclTyp))
{
lclTyp = genActualType(lclTyp);
}
bool usesHelper = false;
switch (fieldInfo.fieldAccessor)
{
case CORINFO_FIELD_INSTANCE:
#ifdef FEATURE_READYTORUN
case CORINFO_FIELD_INSTANCE_WITH_BASE:
#endif
{
// If the object is a struct, what we really want is
// for the field to operate on the address of the struct.
if (!varTypeGCtype(obj->TypeGet()) && impIsValueType(tiObj))
{
assert(opcode == CEE_LDFLD && objType != nullptr);
obj = impGetStructAddr(obj, objType, (unsigned)CHECK_SPILL_ALL, true);
}
/* Create the data member node */
op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset);
#ifdef FEATURE_READYTORUN
if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
{
op1->AsField()->gtFieldLookup = fieldInfo.fieldLookup;
}
#endif
op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
if (fgAddrCouldBeNull(obj))
{
op1->gtFlags |= GTF_EXCEPT;
}
// If the object is a BYREF then our target is a value class and
// it could point anywhere, example a boxed class static int
if (obj->gtType == TYP_BYREF)
{
op1->gtFlags |= GTF_IND_TGTANYWHERE;
}
DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
if (StructHasOverlappingFields(typeFlags))
{
op1->AsField()->gtFldMayOverlap = true;
}
// wrap it in a address of operator if necessary
if (isLoadAddress)
{
op1 = gtNewOperNode(GT_ADDR,
(var_types)(varTypeIsGC(obj->TypeGet()) ? TYP_BYREF : TYP_I_IMPL), op1);
}
else
{
if (compIsForInlining() &&
impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, nullptr, obj,
impInlineInfo->inlArgInfo))
{
impInlineInfo->thisDereferencedFirst = true;
}
}
}
break;
case CORINFO_FIELD_STATIC_TLS:
#ifdef TARGET_X86
// Legacy TLS access is implemented as intrinsic on x86 only
/* Create the data member node */
op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
if (isLoadAddress)
{
op1 = gtNewOperNode(GT_ADDR, (var_types)TYP_I_IMPL, op1);
}
break;
#else
fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
FALLTHROUGH;
#endif
case CORINFO_FIELD_STATIC_ADDR_HELPER:
case CORINFO_FIELD_INSTANCE_HELPER:
case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
clsHnd, nullptr);
usesHelper = true;
break;
case CORINFO_FIELD_STATIC_ADDRESS:
// Replace static read-only fields with constant if possible
if ((aflags & CORINFO_ACCESS_GET) && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_FINAL) &&
!(fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) &&
(varTypeIsIntegral(lclTyp) || varTypeIsFloating(lclTyp)))
{
CorInfoInitClassResult initClassResult =
info.compCompHnd->initClass(resolvedToken.hField, info.compMethodHnd,
impTokenLookupContextHandle);
if (initClassResult & CORINFO_INITCLASS_INITIALIZED)
{
void** pFldAddr = nullptr;
void* fldAddr =
info.compCompHnd->getFieldAddress(resolvedToken.hField, (void**)&pFldAddr);
// We should always be able to access this static's address directly
//
assert(pFldAddr == nullptr);
op1 = impImportStaticReadOnlyField(fldAddr, lclTyp);
// Widen small types since we're propagating the value
// instead of producing an indir.
//
op1->gtType = genActualType(lclTyp);
goto FIELD_DONE;
}
}
FALLTHROUGH;
case CORINFO_FIELD_STATIC_RVA_ADDRESS:
case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
lclTyp);
break;
case CORINFO_FIELD_INTRINSIC_ZERO:
{
assert(aflags & CORINFO_ACCESS_GET);
// Widen to stack type
lclTyp = genActualType(lclTyp);
op1 = gtNewIconNode(0, lclTyp);
goto FIELD_DONE;
}
break;
case CORINFO_FIELD_INTRINSIC_EMPTY_STRING:
{
assert(aflags & CORINFO_ACCESS_GET);
// Import String.Empty as "" (GT_CNS_STR with a fake SconCPX = 0)
op1 = gtNewSconNode(EMPTY_STRING_SCON, nullptr);
goto FIELD_DONE;
}
break;
case CORINFO_FIELD_INTRINSIC_ISLITTLEENDIAN:
{
assert(aflags & CORINFO_ACCESS_GET);
// Widen to stack type
lclTyp = genActualType(lclTyp);
#if BIGENDIAN
op1 = gtNewIconNode(0, lclTyp);
#else
op1 = gtNewIconNode(1, lclTyp);
#endif
goto FIELD_DONE;
}
break;
default:
assert(!"Unexpected fieldAccessor");
}
if (!isLoadAddress)
{
if (prefixFlags & PREFIX_VOLATILE)
{
op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
if (!usesHelper)
{
assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
(op1->OperGet() == GT_OBJ));
op1->gtFlags |= GTF_IND_VOLATILE;
}
}
if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
{
if (!usesHelper)
{
assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
(op1->OperGet() == GT_OBJ));
op1->gtFlags |= GTF_IND_UNALIGNED;
}
}
}
/* Check if the class needs explicit initialization */
if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
{
GenTree* helperNode = impInitClass(&resolvedToken);
if (compDonotInline())
{
return;
}
if (helperNode != nullptr)
{
op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
}
}
FIELD_DONE:
impPushOnStack(op1, tiRetVal);
}
break;
case CEE_STFLD:
case CEE_STSFLD:
{
bool isStoreStatic = (opcode == CEE_STSFLD);
CORINFO_CLASS_HANDLE fieldClsHnd; // class of the field (if it's a ref type)
/* Get the CP_Fieldref index */
assertImp(sz == sizeof(unsigned));
_impResolveToken(CORINFO_TOKENKIND_Field);
JITDUMP(" %08X", resolvedToken.token);
int aflags = CORINFO_ACCESS_SET;
GenTree* obj = nullptr;
typeInfo* tiObj = nullptr;
typeInfo tiVal;
/* Pull the value from the stack */
StackEntry se = impPopStack();
op2 = se.val;
tiVal = se.seTypeInfo;
clsHnd = tiVal.GetClassHandle();
if (opcode == CEE_STFLD)
{
tiObj = &impStackTop().seTypeInfo;
obj = impPopStack().val;
if (impIsThis(obj))
{
aflags |= CORINFO_ACCESS_THIS;
}
}
eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
// Figure out the type of the member. We always call canAccessField, so you always need this
// handle
CorInfoType ciType = fieldInfo.fieldType;
fieldClsHnd = fieldInfo.structType;
lclTyp = JITtype2varType(ciType);
if (compIsForInlining())
{
/* Is this a 'special' (COM) field? or a TLS ref static field?, field stored int GC heap? or
* per-inst static? */
switch (fieldInfo.fieldAccessor)
{
case CORINFO_FIELD_INSTANCE_HELPER:
case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
case CORINFO_FIELD_STATIC_ADDR_HELPER:
case CORINFO_FIELD_STATIC_TLS:
compInlineResult->NoteFatal(InlineObservation::CALLEE_STFLD_NEEDS_HELPER);
return;
case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
/* We may be able to inline the field accessors in specific instantiations of generic
* methods */
compInlineResult->NoteFatal(InlineObservation::CALLSITE_STFLD_NEEDS_HELPER);
return;
default:
break;
}
}
impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
// Raise InvalidProgramException if static store accesses non-static field
if (isStoreStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
{
BADCODE("static access on an instance field");
}
// We are using stfld on a static field.
// We allow it, but need to eval any side-effects for obj
if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
{
if (obj->gtFlags & GTF_SIDE_EFFECT)
{
obj = gtUnusedValNode(obj);
impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtDI);
}
obj = nullptr;
}
/* Preserve 'small' int types */
if (!varTypeIsSmall(lclTyp))
{
lclTyp = genActualType(lclTyp);
}
switch (fieldInfo.fieldAccessor)
{
case CORINFO_FIELD_INSTANCE:
#ifdef FEATURE_READYTORUN
case CORINFO_FIELD_INSTANCE_WITH_BASE:
#endif
{
/* Create the data member node */
op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset);
DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
if (StructHasOverlappingFields(typeFlags))
{
op1->AsField()->gtFldMayOverlap = true;
}
#ifdef FEATURE_READYTORUN
if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
{
op1->AsField()->gtFieldLookup = fieldInfo.fieldLookup;
}
#endif
op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
if (fgAddrCouldBeNull(obj))
{
op1->gtFlags |= GTF_EXCEPT;
}
// If object is a BYREF then our target is a value class and
// it could point anywhere, example a boxed class static int
if (obj->gtType == TYP_BYREF)
{
op1->gtFlags |= GTF_IND_TGTANYWHERE;
}
if (compIsForInlining() &&
impInlineIsGuaranteedThisDerefBeforeAnySideEffects(op2, nullptr, obj,
impInlineInfo->inlArgInfo))
{
impInlineInfo->thisDereferencedFirst = true;
}
}
break;
case CORINFO_FIELD_STATIC_TLS:
#ifdef TARGET_X86
// Legacy TLS access is implemented as intrinsic on x86 only
/* Create the data member node */
op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
break;
#else
fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
FALLTHROUGH;
#endif
case CORINFO_FIELD_STATIC_ADDR_HELPER:
case CORINFO_FIELD_INSTANCE_HELPER:
case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
clsHnd, op2);
goto SPILL_APPEND;
case CORINFO_FIELD_STATIC_ADDRESS:
case CORINFO_FIELD_STATIC_RVA_ADDRESS:
case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
lclTyp);
break;
default:
assert(!"Unexpected fieldAccessor");
}
// Create the member assignment, unless we have a TYP_STRUCT.
bool deferStructAssign = (lclTyp == TYP_STRUCT);
if (!deferStructAssign)
{
if (prefixFlags & PREFIX_VOLATILE)
{
assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
op1->gtFlags |= GTF_IND_VOLATILE;
}
if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
{
assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
op1->gtFlags |= GTF_IND_UNALIGNED;
}
/* V4.0 allows assignment of i4 constant values to i8 type vars when IL verifier is bypassed (full
trust apps). The reason this works is that JIT stores an i4 constant in Gentree union during
importation and reads from the union as if it were a long during code generation. Though this
can potentially read garbage, one can get lucky to have this working correctly.
This code pattern is generated by Dev10 MC++ compiler while storing to fields when compiled with
/O2 switch (default when compiling retail configs in Dev10) and a customer app has taken a
dependency on it. To be backward compatible, we will explicitly add an upward cast here so that
it works correctly always.
Note that this is limited to x86 alone as there is no back compat to be addressed for Arm JIT
for V4.0.
*/
CLANG_FORMAT_COMMENT_ANCHOR;
#ifndef TARGET_64BIT
// In UWP6.0 and beyond (post-.NET Core 2.0), we decided to let this cast from int to long be
// generated for ARM as well as x86, so the following IR will be accepted:
// STMTx (IL 0x... ???)
// * ASG long
// +--* CLS_VAR long
// \--* CNS_INT int 2
if ((op1->TypeGet() != op2->TypeGet()) && op2->OperIsConst() && varTypeIsIntOrI(op2->TypeGet()) &&
varTypeIsLong(op1->TypeGet()))
{
op2 = gtNewCastNode(op1->TypeGet(), op2, false, op1->TypeGet());
}
#endif
#ifdef TARGET_64BIT
// Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
{
op2->gtType = TYP_I_IMPL;
}
else
{
// Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
//
if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
{
op2 = gtNewCastNode(TYP_INT, op2, false, TYP_INT);
}
// Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
//
if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
{
op2 = gtNewCastNode(TYP_I_IMPL, op2, false, TYP_I_IMPL);
}
}
#endif
// We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
// We insert a cast to the dest 'op1' type
//
if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
varTypeIsFloating(op2->gtType))
{
op2 = gtNewCastNode(op1->TypeGet(), op2, false, op1->TypeGet());
}
op1 = gtNewAssignNode(op1, op2);
/* Mark the expression as containing an assignment */
op1->gtFlags |= GTF_ASG;
}
/* Check if the class needs explicit initialization */
if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
{
GenTree* helperNode = impInitClass(&resolvedToken);
if (compDonotInline())
{
return;
}
if (helperNode != nullptr)
{
op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
}
}
/* stfld can interfere with value classes (consider the sequence
ldloc, ldloca, ..., stfld, stloc). We will be conservative and
spill all value class references from the stack. */
if (obj && ((obj->gtType == TYP_BYREF) || (obj->gtType == TYP_I_IMPL)))
{
assert(tiObj);
// If we can resolve the field to be within some local,
// then just spill that local.
//
GenTreeLclVarCommon* const lcl = obj->IsLocalAddrExpr();
if (lcl != nullptr)
{
impSpillLclRefs(lcl->GetLclNum());
}
else if (impIsValueType(tiObj))
{
impSpillEvalStack();
}
else
{
impSpillValueClasses();
}
}
/* Spill any refs to the same member from the stack */
impSpillLclRefs((ssize_t)resolvedToken.hField);
/* stsfld also interferes with indirect accesses (for aliased
statics) and calls. But don't need to spill other statics
as we have explicitly spilled this particular static field. */
impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STFLD"));
if (deferStructAssign)
{
op1 = impAssignStruct(op1, op2, clsHnd, (unsigned)CHECK_SPILL_ALL);
}
}
goto APPEND;
case CEE_NEWARR:
{
/* Get the class type index operand */
_impResolveToken(CORINFO_TOKENKIND_Newarr);
JITDUMP(" %08X", resolvedToken.token);
if (!opts.IsReadyToRun())
{
// Need to restore array classes before creating array objects on the heap
op1 = impTokenToHandle(&resolvedToken, nullptr, true /*mustRestoreHandle*/);
if (op1 == nullptr)
{ // compDonotInline()
return;
}
}
tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
accessAllowedResult =
info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
/* Form the arglist: array class handle, size */
op2 = impPopStack().val;
assertImp(genActualTypeIsIntOrI(op2->gtType));
#ifdef TARGET_64BIT
// The array helper takes a native int for array length.
// So if we have an int, explicitly extend it to be a native int.
if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
{
if (op2->IsIntegralConst())
{
op2->gtType = TYP_I_IMPL;
}
else
{
bool isUnsigned = false;
op2 = gtNewCastNode(TYP_I_IMPL, op2, isUnsigned, TYP_I_IMPL);
}
}
#endif // TARGET_64BIT
#ifdef FEATURE_READYTORUN
if (opts.IsReadyToRun())
{
op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEWARR_1, TYP_REF,
gtNewCallArgs(op2));
usingReadyToRunHelper = (op1 != nullptr);
if (!usingReadyToRunHelper)
{
// TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
// and the newarr call with a single call to a dynamic R2R cell that will:
// 1) Load the context
// 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
// 3) Allocate the new array
// Reason: performance (today, we'll always use the slow helper for the R2R generics case)
// Need to restore array classes before creating array objects on the heap
op1 = impTokenToHandle(&resolvedToken, nullptr, true /*mustRestoreHandle*/);
if (op1 == nullptr)
{ // compDonotInline()
return;
}
}
}
if (!usingReadyToRunHelper)
#endif
{
GenTreeCall::Use* args = gtNewCallArgs(op1, op2);
/* Create a call to 'new' */
// Note that this only works for shared generic code because the same helper is used for all
// reference array types
op1 = gtNewHelperCallNode(info.compCompHnd->getNewArrHelper(resolvedToken.hClass), TYP_REF, args);
}
op1->AsCall()->compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)resolvedToken.hClass;
/* Remember that this basic block contains 'new' of an sd array */
block->bbFlags |= BBF_HAS_NEWARRAY;
optMethodFlags |= OMF_HAS_NEWARRAY;
/* Push the result of the call on the stack */
impPushOnStack(op1, tiRetVal);
callTyp = TYP_REF;
}
break;
case CEE_LOCALLOC:
// We don't allow locallocs inside handlers
if (block->hasHndIndex())
{
BADCODE("Localloc can't be inside handler");
}
// Get the size to allocate
op2 = impPopStack().val;
assertImp(genActualTypeIsIntOrI(op2->gtType));
if (verCurrentState.esStackDepth != 0)
{
BADCODE("Localloc can only be used when the stack is empty");
}
// If the localloc is not in a loop and its size is a small constant,
// create a new local var of TYP_BLK and return its address.
{
bool convertedToLocal = false;
// Need to aggressively fold here, as even fixed-size locallocs
// will have casts in the way.
op2 = gtFoldExpr(op2);
if (op2->IsIntegralConst())
{
const ssize_t allocSize = op2->AsIntCon()->IconValue();
bool bbInALoop = impBlockIsInALoop(block);
if (allocSize == 0)
{
// Result is nullptr
JITDUMP("Converting stackalloc of 0 bytes to push null unmanaged pointer\n");
op1 = gtNewIconNode(0, TYP_I_IMPL);
convertedToLocal = true;
}
else if ((allocSize > 0) && !bbInALoop)
{
// Get the size threshold for local conversion
ssize_t maxSize = DEFAULT_MAX_LOCALLOC_TO_LOCAL_SIZE;
#ifdef DEBUG
// Optionally allow this to be modified
maxSize = JitConfig.JitStackAllocToLocalSize();
#endif // DEBUG
if (allocSize <= maxSize)
{
const unsigned stackallocAsLocal = lvaGrabTemp(false DEBUGARG("stackallocLocal"));
JITDUMP("Converting stackalloc of %zd bytes to new local V%02u\n", allocSize,
stackallocAsLocal);
lvaTable[stackallocAsLocal].lvType = TYP_BLK;
lvaTable[stackallocAsLocal].lvExactSize = (unsigned)allocSize;
lvaTable[stackallocAsLocal].lvIsUnsafeBuffer = true;
op1 = gtNewLclvNode(stackallocAsLocal, TYP_BLK);
op1 = gtNewOperNode(GT_ADDR, TYP_I_IMPL, op1);
convertedToLocal = true;
if (!this->opts.compDbgEnC)
{
// Ensure we have stack security for this method.
// Reorder layout since the converted localloc is treated as an unsafe buffer.
setNeedsGSSecurityCookie();
compGSReorderStackLayout = true;
}
}
}
}
if (!convertedToLocal)
{
// Bail out if inlining and the localloc was not converted.
//
// Note we might consider allowing the inline, if the call
// site is not in a loop.
if (compIsForInlining())
{
InlineObservation obs = op2->IsIntegralConst()
? InlineObservation::CALLEE_LOCALLOC_TOO_LARGE
: InlineObservation::CALLSITE_LOCALLOC_SIZE_UNKNOWN;
compInlineResult->NoteFatal(obs);
return;
}
op1 = gtNewOperNode(GT_LCLHEAP, TYP_I_IMPL, op2);
// May throw a stack overflow exception. Obviously, we don't want locallocs to be CSE'd.
op1->gtFlags |= (GTF_EXCEPT | GTF_DONT_CSE);
// Ensure we have stack security for this method.
setNeedsGSSecurityCookie();
/* The FP register may not be back to the original value at the end
of the method, even if the frame size is 0, as localloc may
have modified it. So we will HAVE to reset it */
compLocallocUsed = true;
}
else
{
compLocallocOptimized = true;
}
}
impPushOnStack(op1, tiRetVal);
break;
case CEE_ISINST:
{
/* Get the type token */
assertImp(sz == sizeof(unsigned));
_impResolveToken(CORINFO_TOKENKIND_Casting);
JITDUMP(" %08X", resolvedToken.token);
if (!opts.IsReadyToRun())
{
op2 = impTokenToHandle(&resolvedToken, nullptr, false);
if (op2 == nullptr)
{ // compDonotInline()
return;
}
}
accessAllowedResult =
info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
op1 = impPopStack().val;
GenTree* optTree = impOptimizeCastClassOrIsInst(op1, &resolvedToken, false);
if (optTree != nullptr)
{
impPushOnStack(optTree, tiRetVal);
}
else
{
#ifdef FEATURE_READYTORUN
if (opts.IsReadyToRun())
{
GenTreeCall* opLookup =
impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_ISINSTANCEOF, TYP_REF,
gtNewCallArgs(op1));
usingReadyToRunHelper = (opLookup != nullptr);
op1 = (usingReadyToRunHelper ? opLookup : op1);
if (!usingReadyToRunHelper)
{
// TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
// and the isinstanceof_any call with a single call to a dynamic R2R cell that will:
// 1) Load the context
// 2) Perform the generic dictionary lookup and caching, and generate the appropriate
// stub
// 3) Perform the 'is instance' check on the input object
// Reason: performance (today, we'll always use the slow helper for the R2R generics case)
op2 = impTokenToHandle(&resolvedToken, nullptr, false);
if (op2 == nullptr)
{ // compDonotInline()
return;
}
}
}
if (!usingReadyToRunHelper)
#endif
{
op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, false, opcodeOffs);
}
if (compDonotInline())
{
return;
}
impPushOnStack(op1, tiRetVal);
}
break;
}
case CEE_REFANYVAL:
// get the class handle and make a ICON node out of it
_impResolveToken(CORINFO_TOKENKIND_Class);
JITDUMP(" %08X", resolvedToken.token);
op2 = impTokenToHandle(&resolvedToken);
if (op2 == nullptr)
{ // compDonotInline()
return;
}
op1 = impPopStack().val;
// make certain it is normalized;
op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
// Call helper GETREFANY(classHandle, op1);
op1 = gtNewHelperCallNode(CORINFO_HELP_GETREFANY, TYP_BYREF, gtNewCallArgs(op2, op1));
impPushOnStack(op1, tiRetVal);
break;
case CEE_REFANYTYPE:
op1 = impPopStack().val;
// make certain it is normalized;
op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
if (op1->gtOper == GT_OBJ)
{
// Get the address of the refany
op1 = op1->AsOp()->gtOp1;
// Fetch the type from the correct slot
op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
gtNewIconNode(OFFSETOF__CORINFO_TypedReference__type, TYP_I_IMPL));
op1 = gtNewOperNode(GT_IND, TYP_BYREF, op1);
}
else
{
assertImp(op1->gtOper == GT_MKREFANY);
// The pointer may have side-effects
if (op1->AsOp()->gtOp1->gtFlags & GTF_SIDE_EFFECT)
{
impAppendTree(op1->AsOp()->gtOp1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI);
#ifdef DEBUG
impNoteLastILoffs();
#endif
}
// We already have the class handle
op1 = op1->AsOp()->gtOp2;
}
// convert native TypeHandle to RuntimeTypeHandle
{
GenTreeCall::Use* helperArgs = gtNewCallArgs(op1);
op1 = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE_MAYBENULL, TYP_STRUCT,
helperArgs);
CORINFO_CLASS_HANDLE classHandle = impGetTypeHandleClass();
// The handle struct is returned in register
op1->AsCall()->gtReturnType = GetRuntimeHandleUnderlyingType();
op1->AsCall()->gtRetClsHnd = classHandle;
#if FEATURE_MULTIREG_RET
op1->AsCall()->InitializeStructReturnType(this, classHandle, op1->AsCall()->GetUnmanagedCallConv());
#endif
tiRetVal = typeInfo(TI_STRUCT, classHandle);
}
impPushOnStack(op1, tiRetVal);
break;
case CEE_LDTOKEN:
{
/* Get the Class index */
assertImp(sz == sizeof(unsigned));
lastLoadToken = codeAddr;
_impResolveToken(CORINFO_TOKENKIND_Ldtoken);
tokenType = info.compCompHnd->getTokenTypeAsHandle(&resolvedToken);
op1 = impTokenToHandle(&resolvedToken, nullptr, true);
if (op1 == nullptr)
{ // compDonotInline()
return;
}
helper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE;
assert(resolvedToken.hClass != nullptr);
if (resolvedToken.hMethod != nullptr)
{
helper = CORINFO_HELP_METHODDESC_TO_STUBRUNTIMEMETHOD;
}
else if (resolvedToken.hField != nullptr)
{
helper = CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD;
}
GenTreeCall::Use* helperArgs = gtNewCallArgs(op1);
op1 = gtNewHelperCallNode(helper, TYP_STRUCT, helperArgs);
// The handle struct is returned in register and
// it could be consumed both as `TYP_STRUCT` and `TYP_REF`.
op1->AsCall()->gtReturnType = GetRuntimeHandleUnderlyingType();
#if FEATURE_MULTIREG_RET
op1->AsCall()->InitializeStructReturnType(this, tokenType, op1->AsCall()->GetUnmanagedCallConv());
#endif
op1->AsCall()->gtRetClsHnd = tokenType;
tiRetVal = verMakeTypeInfo(tokenType);
impPushOnStack(op1, tiRetVal);
}
break;
case CEE_UNBOX:
case CEE_UNBOX_ANY:
{
/* Get the Class index */
assertImp(sz == sizeof(unsigned));
_impResolveToken(CORINFO_TOKENKIND_Class);
JITDUMP(" %08X", resolvedToken.token);
bool runtimeLookup;
op2 = impTokenToHandle(&resolvedToken, &runtimeLookup);
if (op2 == nullptr)
{
assert(compDonotInline());
return;
}
// Run this always so we can get access exceptions even with SkipVerification.
accessAllowedResult =
info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
if (opcode == CEE_UNBOX_ANY && !eeIsValueClass(resolvedToken.hClass))
{
JITDUMP("\n Importing UNBOX.ANY(refClass) as CASTCLASS\n");
op1 = impPopStack().val;
goto CASTCLASS;
}
/* Pop the object and create the unbox helper call */
/* You might think that for UNBOX_ANY we need to push a different */
/* (non-byref) type, but here we're making the tiRetVal that is used */
/* for the intermediate pointer which we then transfer onto the OBJ */
/* instruction. OBJ then creates the appropriate tiRetVal. */
op1 = impPopStack().val;
assertImp(op1->gtType == TYP_REF);
helper = info.compCompHnd->getUnBoxHelper(resolvedToken.hClass);
assert(helper == CORINFO_HELP_UNBOX || helper == CORINFO_HELP_UNBOX_NULLABLE);
// Check legality and profitability of inline expansion for unboxing.
const bool canExpandInline = (helper == CORINFO_HELP_UNBOX);
const bool shouldExpandInline = !compCurBB->isRunRarely() && opts.OptimizationEnabled();
if (canExpandInline && shouldExpandInline)
{
// See if we know anything about the type of op1, the object being unboxed.
bool isExact = false;
bool isNonNull = false;
CORINFO_CLASS_HANDLE clsHnd = gtGetClassHandle(op1, &isExact, &isNonNull);
// We can skip the "exact" bit here as we are comparing to a value class.
// compareTypesForEquality should bail on comparisions for shared value classes.
if (clsHnd != NO_CLASS_HANDLE)
{
const TypeCompareState compare =
info.compCompHnd->compareTypesForEquality(resolvedToken.hClass, clsHnd);
if (compare == TypeCompareState::Must)
{
JITDUMP("\nOptimizing %s (%s) -- type test will succeed\n",
opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY", eeGetClassName(clsHnd));
// For UNBOX, null check (if necessary), and then leave the box payload byref on the stack.
if (opcode == CEE_UNBOX)
{
GenTree* cloneOperand;
op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("optimized unbox clone"));
GenTree* boxPayloadOffset = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL);
GenTree* boxPayloadAddress =
gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, boxPayloadOffset);
GenTree* nullcheck = gtNewNullCheck(op1, block);
GenTree* result = gtNewOperNode(GT_COMMA, TYP_BYREF, nullcheck, boxPayloadAddress);
impPushOnStack(result, tiRetVal);
break;
}
// For UNBOX.ANY load the struct from the box payload byref (the load will nullcheck)
assert(opcode == CEE_UNBOX_ANY);
GenTree* boxPayloadOffset = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL);
GenTree* boxPayloadAddress = gtNewOperNode(GT_ADD, TYP_BYREF, op1, boxPayloadOffset);
impPushOnStack(boxPayloadAddress, tiRetVal);
oper = GT_OBJ;
goto OBJ;
}
else
{
JITDUMP("\nUnable to optimize %s -- can't resolve type comparison\n",
opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY");
}
}
else
{
JITDUMP("\nUnable to optimize %s -- class for [%06u] not known\n",
opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY", dspTreeID(op1));
}
JITDUMP("\n Importing %s as inline sequence\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY");
// we are doing normal unboxing
// inline the common case of the unbox helper
// UNBOX(exp) morphs into
// clone = pop(exp);
// ((*clone == typeToken) ? nop : helper(clone, typeToken));
// push(clone + TARGET_POINTER_SIZE)
//
GenTree* cloneOperand;
op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("inline UNBOX clone1"));
op1 = gtNewMethodTableLookup(op1);
GenTree* condBox = gtNewOperNode(GT_EQ, TYP_INT, op1, op2);
op1 = impCloneExpr(cloneOperand, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("inline UNBOX clone2"));
op2 = impTokenToHandle(&resolvedToken);
if (op2 == nullptr)
{ // compDonotInline()
return;
}
op1 = gtNewHelperCallNode(helper, TYP_VOID, gtNewCallArgs(op2, op1));
op1 = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), op1);
op1 = gtNewQmarkNode(TYP_VOID, condBox, op1->AsColon());
// QMARK nodes cannot reside on the evaluation stack. Because there
// may be other trees on the evaluation stack that side-effect the
// sources of the UNBOX operation we must spill the stack.
impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI);
// Create the address-expression to reference past the object header
// to the beginning of the value-type. Today this means adjusting
// past the base of the objects vtable field which is pointer sized.
op2 = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL);
op1 = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, op2);
}
else
{
JITDUMP("\n Importing %s as helper call because %s\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY",
canExpandInline ? "want smaller code or faster jitting" : "inline expansion not legal");
// Don't optimize, just call the helper and be done with it
op1 = gtNewHelperCallNode(helper,
(var_types)((helper == CORINFO_HELP_UNBOX) ? TYP_BYREF : TYP_STRUCT),
gtNewCallArgs(op2, op1));
if (op1->gtType == TYP_STRUCT)
{
op1->AsCall()->gtRetClsHnd = resolvedToken.hClass;
}
}
assert((helper == CORINFO_HELP_UNBOX && op1->gtType == TYP_BYREF) || // Unbox helper returns a byref.
(helper == CORINFO_HELP_UNBOX_NULLABLE &&
varTypeIsStruct(op1)) // UnboxNullable helper returns a struct.
);
/*
----------------------------------------------------------------------
| \ helper | | |
| \ | | |
| \ | CORINFO_HELP_UNBOX | CORINFO_HELP_UNBOX_NULLABLE |
| \ | (which returns a BYREF) | (which returns a STRUCT) | |
| opcode \ | | |
|---------------------------------------------------------------------
| UNBOX | push the BYREF | spill the STRUCT to a local, |
| | | push the BYREF to this local |
|---------------------------------------------------------------------
| UNBOX_ANY | push a GT_OBJ of | push the STRUCT |
| | the BYREF | For Linux when the |
| | | struct is returned in two |
| | | registers create a temp |
| | | which address is passed to |
| | | the unbox_nullable helper. |
|---------------------------------------------------------------------
*/
if (opcode == CEE_UNBOX)
{
if (helper == CORINFO_HELP_UNBOX_NULLABLE)
{
// Unbox nullable helper returns a struct type.
// We need to spill it to a temp so than can take the address of it.
// Here we need unsafe value cls check, since the address of struct is taken to be used
// further along and potetially be exploitable.
unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a nullable"));
lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
op2 = gtNewLclvNode(tmp, TYP_STRUCT);
op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
op2 = gtNewLclvNode(tmp, TYP_STRUCT);
op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
}
assert(op1->gtType == TYP_BYREF);
}
else
{
assert(opcode == CEE_UNBOX_ANY);
if (helper == CORINFO_HELP_UNBOX)
{
// Normal unbox helper returns a TYP_BYREF.
impPushOnStack(op1, tiRetVal);
oper = GT_OBJ;
goto OBJ;
}
assert(helper == CORINFO_HELP_UNBOX_NULLABLE && "Make sure the helper is nullable!");
#if FEATURE_MULTIREG_RET
if (varTypeIsStruct(op1) &&
IsMultiRegReturnedType(resolvedToken.hClass, CorInfoCallConvExtension::Managed))
{
// Unbox nullable helper returns a TYP_STRUCT.
// For the multi-reg case we need to spill it to a temp so that
// we can pass the address to the unbox_nullable jit helper.
unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a register returnable nullable"));
lvaTable[tmp].lvIsMultiRegArg = true;
lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
op2 = gtNewLclvNode(tmp, TYP_STRUCT);
op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
op2 = gtNewLclvNode(tmp, TYP_STRUCT);
op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
// In this case the return value of the unbox helper is TYP_BYREF.
// Make sure the right type is placed on the operand type stack.
impPushOnStack(op1, tiRetVal);
// Load the struct.
oper = GT_OBJ;
assert(op1->gtType == TYP_BYREF);
goto OBJ;
}
else
#endif // !FEATURE_MULTIREG_RET
{
// If non register passable struct we have it materialized in the RetBuf.
assert(op1->gtType == TYP_STRUCT);
tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
assert(tiRetVal.IsValueClass());
}
}
impPushOnStack(op1, tiRetVal);
}
break;
case CEE_BOX:
{
/* Get the Class index */
assertImp(sz == sizeof(unsigned));
_impResolveToken(CORINFO_TOKENKIND_Box);
JITDUMP(" %08X", resolvedToken.token);
accessAllowedResult =
info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
// Note BOX can be used on things that are not value classes, in which
// case we get a NOP. However the verifier's view of the type on the
// stack changes (in generic code a 'T' becomes a 'boxed T')
if (!eeIsValueClass(resolvedToken.hClass))
{
JITDUMP("\n Importing BOX(refClass) as NOP\n");
verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo = tiRetVal;
break;
}
// Look ahead for box idioms
int matched = impBoxPatternMatch(&resolvedToken, codeAddr + sz, codeEndp);
if (matched >= 0)
{
// Skip the matched IL instructions
sz += matched;
break;
}
impImportAndPushBox(&resolvedToken);
if (compDonotInline())
{
return;
}
}
break;
case CEE_SIZEOF:
/* Get the Class index */
assertImp(sz == sizeof(unsigned));
_impResolveToken(CORINFO_TOKENKIND_Class);
JITDUMP(" %08X", resolvedToken.token);
op1 = gtNewIconNode(info.compCompHnd->getClassSize(resolvedToken.hClass));
impPushOnStack(op1, tiRetVal);
break;
case CEE_CASTCLASS:
/* Get the Class index */
assertImp(sz == sizeof(unsigned));
_impResolveToken(CORINFO_TOKENKIND_Casting);
JITDUMP(" %08X", resolvedToken.token);
if (!opts.IsReadyToRun())
{
op2 = impTokenToHandle(&resolvedToken, nullptr, false);
if (op2 == nullptr)
{ // compDonotInline()
return;
}
}
accessAllowedResult =
info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
op1 = impPopStack().val;
/* Pop the address and create the 'checked cast' helper call */
// At this point we expect typeRef to contain the token, op1 to contain the value being cast,
// and op2 to contain code that creates the type handle corresponding to typeRef
CASTCLASS:
{
GenTree* optTree = impOptimizeCastClassOrIsInst(op1, &resolvedToken, true);
if (optTree != nullptr)
{
impPushOnStack(optTree, tiRetVal);
}
else
{
#ifdef FEATURE_READYTORUN
if (opts.IsReadyToRun())
{
GenTreeCall* opLookup =
impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_CHKCAST, TYP_REF,
gtNewCallArgs(op1));
usingReadyToRunHelper = (opLookup != nullptr);
op1 = (usingReadyToRunHelper ? opLookup : op1);
if (!usingReadyToRunHelper)
{
// TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
// and the chkcastany call with a single call to a dynamic R2R cell that will:
// 1) Load the context
// 2) Perform the generic dictionary lookup and caching, and generate the appropriate
// stub
// 3) Check the object on the stack for the type-cast
// Reason: performance (today, we'll always use the slow helper for the R2R generics case)
op2 = impTokenToHandle(&resolvedToken, nullptr, false);
if (op2 == nullptr)
{ // compDonotInline()
return;
}
}
}
if (!usingReadyToRunHelper)
#endif
{
op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, true, opcodeOffs);
}
if (compDonotInline())
{
return;
}
/* Push the result back on the stack */
impPushOnStack(op1, tiRetVal);
}
}
break;
case CEE_THROW:
// Any block with a throw is rarely executed.
block->bbSetRunRarely();
// Pop the exception object and create the 'throw' helper call
op1 = gtNewHelperCallNode(CORINFO_HELP_THROW, TYP_VOID, gtNewCallArgs(impPopStack().val));
// Fall through to clear out the eval stack.
EVAL_APPEND:
if (verCurrentState.esStackDepth > 0)
{
impEvalSideEffects();
}
assert(verCurrentState.esStackDepth == 0);
goto APPEND;
case CEE_RETHROW:
assert(!compIsForInlining());
if (info.compXcptnsCount == 0)
{
BADCODE("rethrow outside catch");
}
/* Create the 'rethrow' helper call */
op1 = gtNewHelperCallNode(CORINFO_HELP_RETHROW, TYP_VOID);
goto EVAL_APPEND;
case CEE_INITOBJ:
assertImp(sz == sizeof(unsigned));
_impResolveToken(CORINFO_TOKENKIND_Class);
JITDUMP(" %08X", resolvedToken.token);
op2 = gtNewIconNode(0); // Value
op1 = impPopStack().val; // Dest
if (eeIsValueClass(resolvedToken.hClass))
{
op1 = gtNewStructVal(resolvedToken.hClass, op1);
if (op1->OperIs(GT_OBJ))
{
gtSetObjGcInfo(op1->AsObj());
}
}
else
{
size = info.compCompHnd->getClassSize(resolvedToken.hClass);
assert(size == TARGET_POINTER_SIZE);
op1 = gtNewBlockVal(op1, size);
}
op1 = gtNewBlkOpNode(op1, op2, (prefixFlags & PREFIX_VOLATILE) != 0, false);
goto SPILL_APPEND;
case CEE_INITBLK:
op3 = impPopStack().val; // Size
op2 = impPopStack().val; // Value
op1 = impPopStack().val; // Dst addr
if (op3->IsCnsIntOrI())
{
size = (unsigned)op3->AsIntConCommon()->IconValue();
op1 = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, typGetBlkLayout(size));
op1 = gtNewBlkOpNode(op1, op2, (prefixFlags & PREFIX_VOLATILE) != 0, false);
}
else
{
if (!op2->IsIntegralConst(0))
{
op2 = gtNewOperNode(GT_INIT_VAL, TYP_INT, op2);
}
op1 = new (this, GT_STORE_DYN_BLK) GenTreeStoreDynBlk(op1, op2, op3);
size = 0;
if ((prefixFlags & PREFIX_VOLATILE) != 0)
{
op1->gtFlags |= GTF_BLK_VOLATILE;
}
}
goto SPILL_APPEND;
case CEE_CPBLK:
op3 = impPopStack().val; // Size
op2 = impPopStack().val; // Src addr
op1 = impPopStack().val; // Dst addr
if (op2->OperGet() == GT_ADDR)
{
op2 = op2->AsOp()->gtOp1;
}
else
{
op2 = gtNewOperNode(GT_IND, TYP_STRUCT, op2);
}
if (op3->IsCnsIntOrI())
{
size = (unsigned)op3->AsIntConCommon()->IconValue();
op1 = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, typGetBlkLayout(size));
op1 = gtNewBlkOpNode(op1, op2, (prefixFlags & PREFIX_VOLATILE) != 0, true);
}
else
{
op1 = new (this, GT_STORE_DYN_BLK) GenTreeStoreDynBlk(op1, op2, op3);
size = 0;
if ((prefixFlags & PREFIX_VOLATILE) != 0)
{
op1->gtFlags |= GTF_BLK_VOLATILE;
}
}
goto SPILL_APPEND;
case CEE_CPOBJ:
assertImp(sz == sizeof(unsigned));
_impResolveToken(CORINFO_TOKENKIND_Class);
JITDUMP(" %08X", resolvedToken.token);
if (!eeIsValueClass(resolvedToken.hClass))
{
op1 = impPopStack().val; // address to load from
impBashVarAddrsToI(op1);
assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
impPushOnStack(op1, typeInfo());
opcode = CEE_STIND_REF;
lclTyp = TYP_REF;
goto STIND;
}
op2 = impPopStack().val; // Src
op1 = impPopStack().val; // Dest
op1 = gtNewCpObjNode(op1, op2, resolvedToken.hClass, ((prefixFlags & PREFIX_VOLATILE) != 0));
goto SPILL_APPEND;
case CEE_STOBJ:
{
assertImp(sz == sizeof(unsigned));
_impResolveToken(CORINFO_TOKENKIND_Class);
JITDUMP(" %08X", resolvedToken.token);
if (eeIsValueClass(resolvedToken.hClass))
{
lclTyp = TYP_STRUCT;
}
else
{
lclTyp = TYP_REF;
}
if (lclTyp == TYP_REF)
{
opcode = CEE_STIND_REF;
goto STIND;
}
CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
if (impIsPrimitive(jitTyp))
{
lclTyp = JITtype2varType(jitTyp);
goto STIND;
}
op2 = impPopStack().val; // Value
op1 = impPopStack().val; // Ptr
assertImp(varTypeIsStruct(op2));
op1 = impAssignStructPtr(op1, op2, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
if (op1->OperIsBlkOp() && (prefixFlags & PREFIX_UNALIGNED))
{
op1->gtFlags |= GTF_BLK_UNALIGNED;
}
goto SPILL_APPEND;
}
case CEE_MKREFANY:
assert(!compIsForInlining());
// Being lazy here. Refanys are tricky in terms of gc tracking.
// Since it is uncommon, just don't perform struct promotion in any method that contains mkrefany.
JITDUMP("disabling struct promotion because of mkrefany\n");
fgNoStructPromotion = true;
oper = GT_MKREFANY;
assertImp(sz == sizeof(unsigned));
_impResolveToken(CORINFO_TOKENKIND_Class);
JITDUMP(" %08X", resolvedToken.token);
op2 = impTokenToHandle(&resolvedToken, nullptr, true);
if (op2 == nullptr)
{ // compDonotInline()
return;
}
accessAllowedResult =
info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
op1 = impPopStack().val;
// @SPECVIOLATION: TYP_INT should not be allowed here by a strict reading of the spec.
// But JIT32 allowed it, so we continue to allow it.
assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL || op1->TypeGet() == TYP_INT);
// MKREFANY returns a struct. op2 is the class token.
op1 = gtNewOperNode(oper, TYP_STRUCT, op1, op2);
impPushOnStack(op1, verMakeTypeInfo(impGetRefAnyClass()));
break;
case CEE_LDOBJ:
{
oper = GT_OBJ;
assertImp(sz == sizeof(unsigned));
_impResolveToken(CORINFO_TOKENKIND_Class);
JITDUMP(" %08X", resolvedToken.token);
OBJ:
tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
if (eeIsValueClass(resolvedToken.hClass))
{
lclTyp = TYP_STRUCT;
}
else
{
lclTyp = TYP_REF;
opcode = CEE_LDIND_REF;
goto LDIND;
}
op1 = impPopStack().val;
assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL);
CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
if (impIsPrimitive(jitTyp))
{
op1 = gtNewOperNode(GT_IND, JITtype2varType(jitTyp), op1);
// Could point anywhere, example a boxed class static int
op1->gtFlags |= GTF_IND_TGTANYWHERE | GTF_GLOB_REF;
assertImp(varTypeIsArithmetic(op1->gtType));
}
else
{
// OBJ returns a struct
// and an inline argument which is the class token of the loaded obj
op1 = gtNewObjNode(resolvedToken.hClass, op1);
}
op1->gtFlags |= GTF_EXCEPT;
if (prefixFlags & PREFIX_UNALIGNED)
{
op1->gtFlags |= GTF_IND_UNALIGNED;
}
impPushOnStack(op1, tiRetVal);
break;
}
case CEE_LDLEN:
op1 = impPopStack().val;
if (opts.OptimizationEnabled())
{
/* Use GT_ARR_LENGTH operator so rng check opts see this */
GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, OFFSETOF__CORINFO_Array__length, block);
op1 = arrLen;
}
else
{
/* Create the expression "*(array_addr + ArrLenOffs)" */
op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
gtNewIconNode(OFFSETOF__CORINFO_Array__length, TYP_I_IMPL));
op1 = gtNewIndir(TYP_INT, op1);
}
/* Push the result back on the stack */
impPushOnStack(op1, tiRetVal);
break;
case CEE_BREAK:
op1 = gtNewHelperCallNode(CORINFO_HELP_USER_BREAKPOINT, TYP_VOID);
goto SPILL_APPEND;
case CEE_NOP:
if (opts.compDbgCode)
{
op1 = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
goto SPILL_APPEND;
}
break;
/******************************** NYI *******************************/
case 0xCC:
OutputDebugStringA("CLR: Invalid x86 breakpoint in IL stream\n");
FALLTHROUGH;
case CEE_ILLEGAL:
case CEE_MACRO_END:
default:
if (compIsForInlining())
{
compInlineResult->NoteFatal(InlineObservation::CALLEE_COMPILATION_ERROR);
return;
}
BADCODE3("unknown opcode", ": %02X", (int)opcode);
}
codeAddr += sz;
prevOpcode = opcode;
prefixFlags = 0;
}
return;
#undef _impResolveToken
}
#ifdef _PREFAST_
#pragma warning(pop)
#endif
// Push a local/argument treeon the operand stack
void Compiler::impPushVar(GenTree* op, typeInfo tiRetVal)
{
tiRetVal.NormaliseForStack();
if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init) && tiRetVal.IsThisPtr())
{
tiRetVal.SetUninitialisedObjRef();
}
impPushOnStack(op, tiRetVal);
}
//------------------------------------------------------------------------
// impCreateLocal: create a GT_LCL_VAR node to access a local that might need to be normalized on load
//
// Arguments:
// lclNum -- The index into lvaTable
// offset -- The offset to associate with the node
//
// Returns:
// The node
//
GenTreeLclVar* Compiler::impCreateLocalNode(unsigned lclNum DEBUGARG(IL_OFFSET offset))
{
var_types lclTyp;
if (lvaTable[lclNum].lvNormalizeOnLoad())
{
lclTyp = lvaGetRealType(lclNum);
}
else
{
lclTyp = lvaGetActualType(lclNum);
}
return gtNewLclvNode(lclNum, lclTyp DEBUGARG(offset));
}
// Load a local/argument on the operand stack
// lclNum is an index into lvaTable *NOT* the arg/lcl index in the IL
void Compiler::impLoadVar(unsigned lclNum, IL_OFFSET offset, const typeInfo& tiRetVal)
{
impPushVar(impCreateLocalNode(lclNum DEBUGARG(offset)), tiRetVal);
}
// Load an argument on the operand stack
// Shared by the various CEE_LDARG opcodes
// ilArgNum is the argument index as specified in IL.
// It will be mapped to the correct lvaTable index
void Compiler::impLoadArg(unsigned ilArgNum, IL_OFFSET offset)
{
Verify(ilArgNum < info.compILargsCount, "bad arg num");
if (compIsForInlining())
{
if (ilArgNum >= info.compArgsCount)
{
compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_ARGUMENT_NUMBER);
return;
}
impPushVar(impInlineFetchArg(ilArgNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo),
impInlineInfo->lclVarInfo[ilArgNum].lclVerTypeInfo);
}
else
{
if (ilArgNum >= info.compArgsCount)
{
BADCODE("Bad IL");
}
unsigned lclNum = compMapILargNum(ilArgNum); // account for possible hidden param
if (lclNum == info.compThisArg)
{
lclNum = lvaArg0Var;
}
impLoadVar(lclNum, offset);
}
}
// Load a local on the operand stack
// Shared by the various CEE_LDLOC opcodes
// ilLclNum is the local index as specified in IL.
// It will be mapped to the correct lvaTable index
void Compiler::impLoadLoc(unsigned ilLclNum, IL_OFFSET offset)
{
if (compIsForInlining())
{
if (ilLclNum >= info.compMethodInfo->locals.numArgs)
{
compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_LOCAL_NUMBER);
return;
}
// Get the local type
var_types lclTyp = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclTypeInfo;
typeInfo tiRetVal = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclVerTypeInfo;
/* Have we allocated a temp for this local? */
unsigned lclNum = impInlineFetchLocal(ilLclNum DEBUGARG("Inline ldloc first use temp"));
// All vars of inlined methods should be !lvNormalizeOnLoad()
assert(!lvaTable[lclNum].lvNormalizeOnLoad());
lclTyp = genActualType(lclTyp);
impPushVar(gtNewLclvNode(lclNum, lclTyp), tiRetVal);
}
else
{
if (ilLclNum >= info.compMethodInfo->locals.numArgs)
{
BADCODE("Bad IL");
}
unsigned lclNum = info.compArgsCount + ilLclNum;
impLoadVar(lclNum, offset);
}
}
#ifdef TARGET_ARM
/**************************************************************************************
*
* When assigning a vararg call src to a HFA lcl dest, mark that we cannot promote the
* dst struct, because struct promotion will turn it into a float/double variable while
* the rhs will be an int/long variable. We don't code generate assignment of int into
* a float, but there is nothing that might prevent us from doing so. The tree however
* would like: (=, (typ_float, typ_int)) or (GT_TRANSFER, (typ_float, typ_int))
*
* tmpNum - the lcl dst variable num that is a struct.
* src - the src tree assigned to the dest that is a struct/int (when varargs call.)
* hClass - the type handle for the struct variable.
*
* TODO-ARM-CQ: [301608] This is a rare scenario with varargs and struct promotion coming into play,
* however, we could do a codegen of transferring from int to float registers
* (transfer, not a cast.)
*
*/
void Compiler::impMarkLclDstNotPromotable(unsigned tmpNum, GenTree* src, CORINFO_CLASS_HANDLE hClass)
{
if (src->gtOper == GT_CALL && src->AsCall()->IsVarargs() && IsHfa(hClass))
{
int hfaSlots = GetHfaCount(hClass);
var_types hfaType = GetHfaType(hClass);
// If we have varargs we morph the method's return type to be "int" irrespective of its original
// type: struct/float at importer because the ABI calls out return in integer registers.
// We don't want struct promotion to replace an expression like this:
// lclFld_int = callvar_int() into lclFld_float = callvar_int();
// This means an int is getting assigned to a float without a cast. Prevent the promotion.
if ((hfaType == TYP_DOUBLE && hfaSlots == sizeof(double) / REGSIZE_BYTES) ||
(hfaType == TYP_FLOAT && hfaSlots == sizeof(float) / REGSIZE_BYTES))
{
// Make sure this struct type stays as struct so we can receive the call in a struct.
lvaTable[tmpNum].lvIsMultiRegRet = true;
}
}
}
#endif // TARGET_ARM
#if FEATURE_MULTIREG_RET
//------------------------------------------------------------------------
// impAssignMultiRegTypeToVar: ensure calls that return structs in multiple
// registers return values to suitable temps.
//
// Arguments:
// op -- call returning a struct in registers
// hClass -- class handle for struct
//
// Returns:
// Tree with reference to struct local to use as call return value.
GenTree* Compiler::impAssignMultiRegTypeToVar(GenTree* op,
CORINFO_CLASS_HANDLE hClass DEBUGARG(CorInfoCallConvExtension callConv))
{
unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for multireg return"));
impAssignTempGen(tmpNum, op, hClass, (unsigned)CHECK_SPILL_ALL);
GenTree* ret = gtNewLclvNode(tmpNum, lvaTable[tmpNum].lvType);
// TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
ret->gtFlags |= GTF_DONT_CSE;
assert(IsMultiRegReturnedType(hClass, callConv));
// Mark the var so that fields are not promoted and stay together.
lvaTable[tmpNum].lvIsMultiRegRet = true;
return ret;
}
#endif // FEATURE_MULTIREG_RET
//------------------------------------------------------------------------
// impReturnInstruction: import a return or an explicit tail call
//
// Arguments:
// prefixFlags -- active IL prefixes
// opcode -- [in, out] IL opcode
//
// Returns:
// True if import was successful (may fail for some inlinees)
//
bool Compiler::impReturnInstruction(int prefixFlags, OPCODE& opcode)
{
const bool isTailCall = (prefixFlags & PREFIX_TAILCALL) != 0;
#ifdef DEBUG
// If we are importing an inlinee and have GC ref locals we always
// need to have a spill temp for the return value. This temp
// should have been set up in advance, over in fgFindBasicBlocks.
if (compIsForInlining() && impInlineInfo->HasGcRefLocals() && (info.compRetType != TYP_VOID))
{
assert(lvaInlineeReturnSpillTemp != BAD_VAR_NUM);
}
#endif // DEBUG
GenTree* op2 = nullptr;
GenTree* op1 = nullptr;
CORINFO_CLASS_HANDLE retClsHnd = nullptr;
if (info.compRetType != TYP_VOID)
{
StackEntry se = impPopStack();
retClsHnd = se.seTypeInfo.GetClassHandle();
op2 = se.val;
if (!compIsForInlining())
{
impBashVarAddrsToI(op2);
op2 = impImplicitIorI4Cast(op2, info.compRetType);
op2 = impImplicitR4orR8Cast(op2, info.compRetType);
// Note that we allow TYP_I_IMPL<->TYP_BYREF transformation, but only TYP_I_IMPL<-TYP_REF.
assertImp((genActualType(op2->TypeGet()) == genActualType(info.compRetType)) ||
((op2->TypeGet() == TYP_I_IMPL) && TypeIs(info.compRetType, TYP_BYREF)) ||
(op2->TypeIs(TYP_BYREF, TYP_REF) && (info.compRetType == TYP_I_IMPL)) ||
(varTypeIsFloating(op2->gtType) && varTypeIsFloating(info.compRetType)) ||
(varTypeIsStruct(op2) && varTypeIsStruct(info.compRetType)));
#ifdef DEBUG
if (!isTailCall && opts.compGcChecks && (info.compRetType == TYP_REF))
{
// DDB 3483 : JIT Stress: early termination of GC ref's life time in exception code path
// VSW 440513: Incorrect gcinfo on the return value under COMPlus_JitGCChecks=1 for methods with
// one-return BB.
assert(op2->gtType == TYP_REF);
// confirm that the argument is a GC pointer (for debugging (GC stress))
GenTreeCall::Use* args = gtNewCallArgs(op2);
op2 = gtNewHelperCallNode(CORINFO_HELP_CHECK_OBJ, TYP_REF, args);
if (verbose)
{
printf("\ncompGcChecks tree:\n");
gtDispTree(op2);
}
}
#endif
}
else
{
if (verCurrentState.esStackDepth != 0)
{
assert(compIsForInlining());
JITDUMP("CALLSITE_COMPILATION_ERROR: inlinee's stack is not empty.");
compInlineResult->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR);
return false;
}
#ifdef DEBUG
if (verbose)
{
printf("\n\n Inlinee Return expression (before normalization) =>\n");
gtDispTree(op2);
}
#endif
// Make sure the type matches the original call.
var_types returnType = genActualType(op2->gtType);
var_types originalCallType = impInlineInfo->inlineCandidateInfo->fncRetType;
if ((returnType != originalCallType) && (originalCallType == TYP_STRUCT))
{
originalCallType = impNormStructType(impInlineInfo->inlineCandidateInfo->methInfo.args.retTypeClass);
}
if (returnType != originalCallType)
{
// Allow TYP_BYREF to be returned as TYP_I_IMPL and vice versa.
// Allow TYP_REF to be returned as TYP_I_IMPL and NOT vice verse.
if ((TypeIs(returnType, TYP_BYREF, TYP_REF) && (originalCallType == TYP_I_IMPL)) ||
((returnType == TYP_I_IMPL) && TypeIs(originalCallType, TYP_BYREF)))
{
JITDUMP("Allowing return type mismatch: have %s, needed %s\n", varTypeName(returnType),
varTypeName(originalCallType));
}
else
{
JITDUMP("Return type mismatch: have %s, needed %s\n", varTypeName(returnType),
varTypeName(originalCallType));
compInlineResult->NoteFatal(InlineObservation::CALLSITE_RETURN_TYPE_MISMATCH);
return false;
}
}
// Below, we are going to set impInlineInfo->retExpr to the tree with the return
// expression. At this point, retExpr could already be set if there are multiple
// return blocks (meaning fgNeedReturnSpillTemp() == true) and one of
// the other blocks already set it. If there is only a single return block,
// retExpr shouldn't be set. However, this is not true if we reimport a block
// with a return. In that case, retExpr will be set, then the block will be
// reimported, but retExpr won't get cleared as part of setting the block to
// be reimported. The reimported retExpr value should be the same, so even if
// we don't unconditionally overwrite it, it shouldn't matter.
if (info.compRetNativeType != TYP_STRUCT)
{
// compRetNativeType is not TYP_STRUCT.
// This implies it could be either a scalar type or SIMD vector type or
// a struct type that can be normalized to a scalar type.
if (varTypeIsStruct(info.compRetType))
{
noway_assert(info.compRetBuffArg == BAD_VAR_NUM);
// adjust the type away from struct to integral
// and no normalizing
op2 = impFixupStructReturnType(op2, retClsHnd, info.compCallConv);
}
else
{
// Do we have to normalize?
var_types fncRealRetType = JITtype2varType(info.compMethodInfo->args.retType);
if ((varTypeIsSmall(op2->TypeGet()) || varTypeIsSmall(fncRealRetType)) &&
fgCastNeeded(op2, fncRealRetType))
{
// Small-typed return values are normalized by the callee
op2 = gtNewCastNode(TYP_INT, op2, false, fncRealRetType);
}
}
if (fgNeedReturnSpillTemp())
{
assert(info.compRetNativeType != TYP_VOID &&
(fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals()));
// If this method returns a ref type, track the actual types seen
// in the returns.
if (info.compRetType == TYP_REF)
{
bool isExact = false;
bool isNonNull = false;
CORINFO_CLASS_HANDLE returnClsHnd = gtGetClassHandle(op2, &isExact, &isNonNull);
if (impInlineInfo->retExpr == nullptr)
{
// This is the first return, so best known type is the type
// of this return value.
impInlineInfo->retExprClassHnd = returnClsHnd;
impInlineInfo->retExprClassHndIsExact = isExact;
}
else if (impInlineInfo->retExprClassHnd != returnClsHnd)
{
// This return site type differs from earlier seen sites,
// so reset the info and we'll fall back to using the method's
// declared return type for the return spill temp.
impInlineInfo->retExprClassHnd = nullptr;
impInlineInfo->retExprClassHndIsExact = false;
}
}
impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
(unsigned)CHECK_SPILL_ALL);
var_types lclRetType = lvaGetDesc(lvaInlineeReturnSpillTemp)->lvType;
GenTree* tmpOp2 = gtNewLclvNode(lvaInlineeReturnSpillTemp, lclRetType);
op2 = tmpOp2;
#ifdef DEBUG
if (impInlineInfo->retExpr)
{
// Some other block(s) have seen the CEE_RET first.
// Better they spilled to the same temp.
assert(impInlineInfo->retExpr->gtOper == GT_LCL_VAR);
assert(impInlineInfo->retExpr->AsLclVarCommon()->GetLclNum() ==
op2->AsLclVarCommon()->GetLclNum());
}
#endif
}
#ifdef DEBUG
if (verbose)
{
printf("\n\n Inlinee Return expression (after normalization) =>\n");
gtDispTree(op2);
}
#endif
// Report the return expression
impInlineInfo->retExpr = op2;
}
else
{
// compRetNativeType is TYP_STRUCT.
// This implies that struct return via RetBuf arg or multi-reg struct return
GenTreeCall* iciCall = impInlineInfo->iciCall->AsCall();
// Assign the inlinee return into a spill temp.
// spill temp only exists if there are multiple return points
if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
{
// in this case we have to insert multiple struct copies to the temp
// and the retexpr is just the temp.
assert(info.compRetNativeType != TYP_VOID);
assert(fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals());
impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
(unsigned)CHECK_SPILL_ALL);
}
#if defined(TARGET_ARM) || defined(UNIX_AMD64_ABI)
#if defined(TARGET_ARM)
// TODO-ARM64-NYI: HFA
// TODO-AMD64-Unix and TODO-ARM once the ARM64 functionality is implemented the
// next ifdefs could be refactored in a single method with the ifdef inside.
if (IsHfa(retClsHnd))
{
// Same as !IsHfa but just don't bother with impAssignStructPtr.
#else // defined(UNIX_AMD64_ABI)
ReturnTypeDesc retTypeDesc;
retTypeDesc.InitializeStructReturnType(this, retClsHnd, info.compCallConv);
unsigned retRegCount = retTypeDesc.GetReturnRegCount();
if (retRegCount != 0)
{
// If single eightbyte, the return type would have been normalized and there won't be a temp var.
// This code will be called only if the struct return has not been normalized (i.e. 2 eightbytes -
// max allowed.)
assert(retRegCount == MAX_RET_REG_COUNT);
// Same as !structDesc.passedInRegisters but just don't bother with impAssignStructPtr.
CLANG_FORMAT_COMMENT_ANCHOR;
#endif // defined(UNIX_AMD64_ABI)
if (fgNeedReturnSpillTemp())
{
if (!impInlineInfo->retExpr)
{
#if defined(TARGET_ARM)
impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType);
#else // defined(UNIX_AMD64_ABI)
// The inlinee compiler has figured out the type of the temp already. Use it here.
impInlineInfo->retExpr =
gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
#endif // defined(UNIX_AMD64_ABI)
}
}
else
{
impInlineInfo->retExpr = op2;
}
}
else
#elif defined(TARGET_ARM64)
ReturnTypeDesc retTypeDesc;
retTypeDesc.InitializeStructReturnType(this, retClsHnd, info.compCallConv);
unsigned retRegCount = retTypeDesc.GetReturnRegCount();
if (retRegCount != 0)
{
assert(!iciCall->HasRetBufArg());
assert(retRegCount >= 2);
if (fgNeedReturnSpillTemp())
{
if (!impInlineInfo->retExpr)
{
// The inlinee compiler has figured out the type of the temp already. Use it here.
impInlineInfo->retExpr =
gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
}
}
else
{
impInlineInfo->retExpr = op2;
}
}
else
#elif defined(TARGET_X86)
ReturnTypeDesc retTypeDesc;
retTypeDesc.InitializeStructReturnType(this, retClsHnd, info.compCallConv);
unsigned retRegCount = retTypeDesc.GetReturnRegCount();
if (retRegCount != 0)
{
assert(!iciCall->HasRetBufArg());
assert(retRegCount == MAX_RET_REG_COUNT);
if (fgNeedReturnSpillTemp())
{
if (!impInlineInfo->retExpr)
{
// The inlinee compiler has figured out the type of the temp already. Use it here.
impInlineInfo->retExpr =
gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
}
}
else
{
impInlineInfo->retExpr = op2;
}
}
else
#endif // defined(TARGET_ARM64)
{
assert(iciCall->HasRetBufArg());
GenTree* dest = gtCloneExpr(iciCall->gtCallArgs->GetNode());
// spill temp only exists if there are multiple return points
if (fgNeedReturnSpillTemp())
{
// if this is the first return we have seen set the retExpr
if (!impInlineInfo->retExpr)
{
impInlineInfo->retExpr =
impAssignStructPtr(dest, gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType),
retClsHnd, (unsigned)CHECK_SPILL_ALL);
}
}
else
{
impInlineInfo->retExpr = impAssignStructPtr(dest, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
}
}
}
if (impInlineInfo->retExpr != nullptr)
{
impInlineInfo->retBB = compCurBB;
}
}
}
if (compIsForInlining())
{
return true;
}
if (info.compRetType == TYP_VOID)
{
// return void
op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
}
else if (info.compRetBuffArg != BAD_VAR_NUM)
{
// Assign value to return buff (first param)
GenTree* retBuffAddr =
gtNewLclvNode(info.compRetBuffArg, TYP_BYREF DEBUGARG(impCurStmtDI.GetLocation().GetOffset()));
op2 = impAssignStructPtr(retBuffAddr, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
impAppendTree(op2, (unsigned)CHECK_SPILL_NONE, impCurStmtDI);
// There are cases where the address of the implicit RetBuf should be returned explicitly (in RAX).
CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_AMD64)
// x64 (System V and Win64) calling convention requires to
// return the implicit return buffer explicitly (in RAX).
// Change the return type to be BYREF.
op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
#else // !defined(TARGET_AMD64)
// In case of non-AMD64 targets the profiler hook requires to return the implicit RetBuf explicitly (in RAX).
// In such case the return value of the function is changed to BYREF.
// If profiler hook is not needed the return type of the function is TYP_VOID.
if (compIsProfilerHookNeeded())
{
op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
}
#if defined(TARGET_ARM64)
// On ARM64, the native instance calling convention variant
// requires the implicit ByRef to be explicitly returned.
else if (TargetOS::IsWindows && callConvIsInstanceMethodCallConv(info.compCallConv))
{
op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
}
#endif
#if defined(TARGET_X86)
else if (info.compCallConv != CorInfoCallConvExtension::Managed)
{
op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
}
#endif
else
{
// return void
op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
}
#endif // !defined(TARGET_AMD64)
}
else if (varTypeIsStruct(info.compRetType))
{
#if !FEATURE_MULTIREG_RET
// For both ARM architectures the HFA native types are maintained as structs.
// Also on System V AMD64 the multireg structs returns are also left as structs.
noway_assert(info.compRetNativeType != TYP_STRUCT);
#endif
op2 = impFixupStructReturnType(op2, retClsHnd, info.compCallConv);
// return op2
var_types returnType = info.compRetType;
op1 = gtNewOperNode(GT_RETURN, genActualType(returnType), op2);
}
else
{
// return op2
op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetType), op2);
}
// We must have imported a tailcall and jumped to RET
if (isTailCall)
{
assert(verCurrentState.esStackDepth == 0 && impOpcodeIsCallOpcode(opcode));
opcode = CEE_RET; // To prevent trying to spill if CALL_SITE_BOUNDARIES
// impImportCall() would have already appended TYP_VOID calls
if (info.compRetType == TYP_VOID)
{
return true;
}
}
impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtDI);
#ifdef DEBUG
// Remember at which BC offset the tree was finished
impNoteLastILoffs();
#endif
return true;
}
/*****************************************************************************
* Mark the block as unimported.
* Note that the caller is responsible for calling impImportBlockPending(),
* with the appropriate stack-state
*/
inline void Compiler::impReimportMarkBlock(BasicBlock* block)
{
#ifdef DEBUG
if (verbose && (block->bbFlags & BBF_IMPORTED))
{
printf("\n" FMT_BB " will be reimported\n", block->bbNum);
}
#endif
block->bbFlags &= ~BBF_IMPORTED;
}
/*****************************************************************************
* Mark the successors of the given block as unimported.
* Note that the caller is responsible for calling impImportBlockPending()
* for all the successors, with the appropriate stack-state.
*/
void Compiler::impReimportMarkSuccessors(BasicBlock* block)
{
for (BasicBlock* const succBlock : block->Succs())
{
impReimportMarkBlock(succBlock);
}
}
/*****************************************************************************
*
* Filter wrapper to handle only passed in exception code
* from it).
*/
LONG FilterVerificationExceptions(PEXCEPTION_POINTERS pExceptionPointers, LPVOID lpvParam)
{
if (pExceptionPointers->ExceptionRecord->ExceptionCode == SEH_VERIFICATION_EXCEPTION)
{
return EXCEPTION_EXECUTE_HANDLER;
}
return EXCEPTION_CONTINUE_SEARCH;
}
void Compiler::impVerifyEHBlock(BasicBlock* block, bool isTryStart)
{
assert(block->hasTryIndex());
assert(!compIsForInlining());
unsigned tryIndex = block->getTryIndex();
EHblkDsc* HBtab = ehGetDsc(tryIndex);
if (isTryStart)
{
assert(block->bbFlags & BBF_TRY_BEG);
// The Stack must be empty
//
if (block->bbStkDepth != 0)
{
BADCODE("Evaluation stack must be empty on entry into a try block");
}
}
// Save the stack contents, we'll need to restore it later
//
SavedStack blockState;
impSaveStackState(&blockState, false);
while (HBtab != nullptr)
{
if (isTryStart)
{
// Are we verifying that an instance constructor properly initializes it's 'this' pointer once?
// We do not allow the 'this' pointer to be uninitialized when entering most kinds try regions
//
if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
{
// We trigger an invalid program exception here unless we have a try/fault region.
//
if (HBtab->HasCatchHandler() || HBtab->HasFinallyHandler() || HBtab->HasFilter())
{
BADCODE(
"The 'this' pointer of an instance constructor is not intialized upon entry to a try region");
}
else
{
// Allow a try/fault region to proceed.
assert(HBtab->HasFaultHandler());
}
}
}
// Recursively process the handler block, if we haven't already done so.
BasicBlock* hndBegBB = HBtab->ebdHndBeg;
if (((hndBegBB->bbFlags & BBF_IMPORTED) == 0) && (impGetPendingBlockMember(hndBegBB) == 0))
{
// Construct the proper verification stack state
// either empty or one that contains just
// the Exception Object that we are dealing with
//
verCurrentState.esStackDepth = 0;
if (handlerGetsXcptnObj(hndBegBB->bbCatchTyp))
{
CORINFO_CLASS_HANDLE clsHnd;
if (HBtab->HasFilter())
{
clsHnd = impGetObjectClass();
}
else
{
CORINFO_RESOLVED_TOKEN resolvedToken;
resolvedToken.tokenContext = impTokenLookupContextHandle;
resolvedToken.tokenScope = info.compScopeHnd;
resolvedToken.token = HBtab->ebdTyp;
resolvedToken.tokenType = CORINFO_TOKENKIND_Class;
info.compCompHnd->resolveToken(&resolvedToken);
clsHnd = resolvedToken.hClass;
}
// push catch arg the stack, spill to a temp if necessary
// Note: can update HBtab->ebdHndBeg!
hndBegBB = impPushCatchArgOnStack(hndBegBB, clsHnd, false);
}
// Queue up the handler for importing
//
impImportBlockPending(hndBegBB);
}
// Process the filter block, if we haven't already done so.
if (HBtab->HasFilter())
{
/* @VERIFICATION : Ideally the end of filter state should get
propagated to the catch handler, this is an incompleteness,
but is not a security/compliance issue, since the only
interesting state is the 'thisInit' state.
*/
BasicBlock* filterBB = HBtab->ebdFilter;
if (((filterBB->bbFlags & BBF_IMPORTED) == 0) && (impGetPendingBlockMember(filterBB) == 0))
{
verCurrentState.esStackDepth = 0;
// push catch arg the stack, spill to a temp if necessary
// Note: can update HBtab->ebdFilter!
const bool isSingleBlockFilter = (filterBB->bbNext == hndBegBB);
filterBB = impPushCatchArgOnStack(filterBB, impGetObjectClass(), isSingleBlockFilter);
impImportBlockPending(filterBB);
}
}
// This seems redundant ....??
if (verTrackObjCtorInitState && HBtab->HasFaultHandler())
{
/* Recursively process the handler block */
verCurrentState.esStackDepth = 0;
// Queue up the fault handler for importing
//
impImportBlockPending(HBtab->ebdHndBeg);
}
// Now process our enclosing try index (if any)
//
tryIndex = HBtab->ebdEnclosingTryIndex;
if (tryIndex == EHblkDsc::NO_ENCLOSING_INDEX)
{
HBtab = nullptr;
}
else
{
HBtab = ehGetDsc(tryIndex);
}
}
// Restore the stack contents
impRestoreStackState(&blockState);
}
//***************************************************************
// Import the instructions for the given basic block. Perform
// verification, throwing an exception on failure. Push any successor blocks that are enabled for the first
// time, or whose verification pre-state is changed.
#ifdef _PREFAST_
#pragma warning(push)
#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
void Compiler::impImportBlock(BasicBlock* block)
{
// BBF_INTERNAL blocks only exist during importation due to EH canonicalization. We need to
// handle them specially. In particular, there is no IL to import for them, but we do need
// to mark them as imported and put their successors on the pending import list.
if (block->bbFlags & BBF_INTERNAL)
{
JITDUMP("Marking BBF_INTERNAL block " FMT_BB " as BBF_IMPORTED\n", block->bbNum);
block->bbFlags |= BBF_IMPORTED;
for (BasicBlock* const succBlock : block->Succs())
{
impImportBlockPending(succBlock);
}
return;
}
bool markImport;
assert(block);
/* Make the block globaly available */
compCurBB = block;
#ifdef DEBUG
/* Initialize the debug variables */
impCurOpcName = "unknown";
impCurOpcOffs = block->bbCodeOffs;
#endif
/* Set the current stack state to the merged result */
verResetCurrentState(block, &verCurrentState);
/* Now walk the code and import the IL into GenTrees */
struct FilterVerificationExceptionsParam
{
Compiler* pThis;
BasicBlock* block;
};
FilterVerificationExceptionsParam param;
param.pThis = this;
param.block = block;
PAL_TRY(FilterVerificationExceptionsParam*, pParam, ¶m)
{
/* @VERIFICATION : For now, the only state propagation from try
to it's handler is "thisInit" state (stack is empty at start of try).
In general, for state that we track in verification, we need to
model the possibility that an exception might happen at any IL
instruction, so we really need to merge all states that obtain
between IL instructions in a try block into the start states of
all handlers.
However we do not allow the 'this' pointer to be uninitialized when
entering most kinds try regions (only try/fault are allowed to have
an uninitialized this pointer on entry to the try)
Fortunately, the stack is thrown away when an exception
leads to a handler, so we don't have to worry about that.
We DO, however, have to worry about the "thisInit" state.
But only for the try/fault case.
The only allowed transition is from TIS_Uninit to TIS_Init.
So for a try/fault region for the fault handler block
we will merge the start state of the try begin
and the post-state of each block that is part of this try region
*/
// merge the start state of the try begin
//
if (pParam->block->bbFlags & BBF_TRY_BEG)
{
pParam->pThis->impVerifyEHBlock(pParam->block, true);
}
pParam->pThis->impImportBlockCode(pParam->block);
// As discussed above:
// merge the post-state of each block that is part of this try region
//
if (pParam->block->hasTryIndex())
{
pParam->pThis->impVerifyEHBlock(pParam->block, false);
}
}
PAL_EXCEPT_FILTER(FilterVerificationExceptions)
{
verHandleVerificationFailure(block DEBUGARG(false));
}
PAL_ENDTRY
if (compDonotInline())
{
return;
}
assert(!compDonotInline());
markImport = false;
SPILLSTACK:
unsigned baseTmp = NO_BASE_TMP; // input temps assigned to successor blocks
bool reimportSpillClique = false;
BasicBlock* tgtBlock = nullptr;
/* If the stack is non-empty, we might have to spill its contents */
if (verCurrentState.esStackDepth != 0)
{
impBoxTemp = BAD_VAR_NUM; // if a box temp is used in a block that leaves something
// on the stack, its lifetime is hard to determine, simply
// don't reuse such temps.
Statement* addStmt = nullptr;
/* Do the successors of 'block' have any other predecessors ?
We do not want to do some of the optimizations related to multiRef
if we can reimport blocks */
unsigned multRef = impCanReimport ? unsigned(~0) : 0;
switch (block->bbJumpKind)
{
case BBJ_COND:
addStmt = impExtractLastStmt();
assert(addStmt->GetRootNode()->gtOper == GT_JTRUE);
/* Note if the next block has more than one ancestor */
multRef |= block->bbNext->bbRefs;
/* Does the next block have temps assigned? */
baseTmp = block->bbNext->bbStkTempsIn;
tgtBlock = block->bbNext;
if (baseTmp != NO_BASE_TMP)
{
break;
}
/* Try the target of the jump then */
multRef |= block->bbJumpDest->bbRefs;
baseTmp = block->bbJumpDest->bbStkTempsIn;
tgtBlock = block->bbJumpDest;
break;
case BBJ_ALWAYS:
multRef |= block->bbJumpDest->bbRefs;
baseTmp = block->bbJumpDest->bbStkTempsIn;
tgtBlock = block->bbJumpDest;
break;
case BBJ_NONE:
multRef |= block->bbNext->bbRefs;
baseTmp = block->bbNext->bbStkTempsIn;
tgtBlock = block->bbNext;
break;
case BBJ_SWITCH:
addStmt = impExtractLastStmt();
assert(addStmt->GetRootNode()->gtOper == GT_SWITCH);
for (BasicBlock* const tgtBlock : block->SwitchTargets())
{
multRef |= tgtBlock->bbRefs;
// Thanks to spill cliques, we should have assigned all or none
assert((baseTmp == NO_BASE_TMP) || (baseTmp == tgtBlock->bbStkTempsIn));
baseTmp = tgtBlock->bbStkTempsIn;
if (multRef > 1)
{
break;
}
}
break;
case BBJ_CALLFINALLY:
case BBJ_EHCATCHRET:
case BBJ_RETURN:
case BBJ_EHFINALLYRET:
case BBJ_EHFILTERRET:
case BBJ_THROW:
NO_WAY("can't have 'unreached' end of BB with non-empty stack");
break;
default:
noway_assert(!"Unexpected bbJumpKind");
break;
}
assert(multRef >= 1);
/* Do we have a base temp number? */
bool newTemps = (baseTmp == NO_BASE_TMP);
if (newTemps)
{
/* Grab enough temps for the whole stack */
baseTmp = impGetSpillTmpBase(block);
}
/* Spill all stack entries into temps */
unsigned level, tempNum;
JITDUMP("\nSpilling stack entries into temps\n");
for (level = 0, tempNum = baseTmp; level < verCurrentState.esStackDepth; level++, tempNum++)
{
GenTree* tree = verCurrentState.esStack[level].val;
/* VC generates code where it pushes a byref from one branch, and an int (ldc.i4 0) from
the other. This should merge to a byref in unverifiable code.
However, if the branch which leaves the TYP_I_IMPL on the stack is imported first, the
successor would be imported assuming there was a TYP_I_IMPL on
the stack. Thus the value would not get GC-tracked. Hence,
change the temp to TYP_BYREF and reimport the successors.
Note: We should only allow this in unverifiable code.
*/
if (tree->gtType == TYP_BYREF && lvaTable[tempNum].lvType == TYP_I_IMPL)
{
lvaTable[tempNum].lvType = TYP_BYREF;
impReimportMarkSuccessors(block);
markImport = true;
}
#ifdef TARGET_64BIT
if (genActualType(tree->gtType) == TYP_I_IMPL && lvaTable[tempNum].lvType == TYP_INT)
{
// Some other block in the spill clique set this to "int", but now we have "native int".
// Change the type and go back to re-import any blocks that used the wrong type.
lvaTable[tempNum].lvType = TYP_I_IMPL;
reimportSpillClique = true;
}
else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_I_IMPL)
{
// Spill clique has decided this should be "native int", but this block only pushes an "int".
// Insert a sign-extension to "native int" so we match the clique.
verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL);
}
// Consider the case where one branch left a 'byref' on the stack and the other leaves
// an 'int'. On 32-bit, this is allowed (in non-verifiable code) since they are the same
// size. JIT64 managed to make this work on 64-bit. For compatibility, we support JIT64
// behavior instead of asserting and then generating bad code (where we save/restore the
// low 32 bits of a byref pointer to an 'int' sized local). If the 'int' side has been
// imported already, we need to change the type of the local and reimport the spill clique.
// If the 'byref' side has imported, we insert a cast from int to 'native int' to match
// the 'byref' size.
if (genActualType(tree->gtType) == TYP_BYREF && lvaTable[tempNum].lvType == TYP_INT)
{
// Some other block in the spill clique set this to "int", but now we have "byref".
// Change the type and go back to re-import any blocks that used the wrong type.
lvaTable[tempNum].lvType = TYP_BYREF;
reimportSpillClique = true;
}
else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_BYREF)
{
// Spill clique has decided this should be "byref", but this block only pushes an "int".
// Insert a sign-extension to "native int" so we match the clique size.
verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL);
}
#endif // TARGET_64BIT
if (tree->gtType == TYP_DOUBLE && lvaTable[tempNum].lvType == TYP_FLOAT)
{
// Some other block in the spill clique set this to "float", but now we have "double".
// Change the type and go back to re-import any blocks that used the wrong type.
lvaTable[tempNum].lvType = TYP_DOUBLE;
reimportSpillClique = true;
}
else if (tree->gtType == TYP_FLOAT && lvaTable[tempNum].lvType == TYP_DOUBLE)
{
// Spill clique has decided this should be "double", but this block only pushes a "float".
// Insert a cast to "double" so we match the clique.
verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, false, TYP_DOUBLE);
}
/* If addStmt has a reference to tempNum (can only happen if we
are spilling to the temps already used by a previous block),
we need to spill addStmt */
if (addStmt != nullptr && !newTemps && gtHasRef(addStmt->GetRootNode(), tempNum))
{
GenTree* addTree = addStmt->GetRootNode();
if (addTree->gtOper == GT_JTRUE)
{
GenTree* relOp = addTree->AsOp()->gtOp1;
assert(relOp->OperIsCompare());
var_types type = genActualType(relOp->AsOp()->gtOp1->TypeGet());
if (gtHasRef(relOp->AsOp()->gtOp1, tempNum))
{
unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op1"));
impAssignTempGen(temp, relOp->AsOp()->gtOp1, level);
type = genActualType(lvaTable[temp].TypeGet());
relOp->AsOp()->gtOp1 = gtNewLclvNode(temp, type);
}
if (gtHasRef(relOp->AsOp()->gtOp2, tempNum))
{
unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op2"));
impAssignTempGen(temp, relOp->AsOp()->gtOp2, level);
type = genActualType(lvaTable[temp].TypeGet());
relOp->AsOp()->gtOp2 = gtNewLclvNode(temp, type);
}
}
else
{
assert(addTree->gtOper == GT_SWITCH && genActualTypeIsIntOrI(addTree->AsOp()->gtOp1->TypeGet()));
unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt SWITCH"));
impAssignTempGen(temp, addTree->AsOp()->gtOp1, level);
addTree->AsOp()->gtOp1 = gtNewLclvNode(temp, genActualType(addTree->AsOp()->gtOp1->TypeGet()));
}
}
/* Spill the stack entry, and replace with the temp */
if (!impSpillStackEntry(level, tempNum
#ifdef DEBUG
,
true, "Spill Stack Entry"
#endif
))
{
if (markImport)
{
BADCODE("bad stack state");
}
// Oops. Something went wrong when spilling. Bad code.
verHandleVerificationFailure(block DEBUGARG(true));
goto SPILLSTACK;
}
}
/* Put back the 'jtrue'/'switch' if we removed it earlier */
if (addStmt != nullptr)
{
impAppendStmt(addStmt, (unsigned)CHECK_SPILL_NONE);
}
}
// Some of the append/spill logic works on compCurBB
assert(compCurBB == block);
/* Save the tree list in the block */
impEndTreeList(block);
// impEndTreeList sets BBF_IMPORTED on the block
// We do *NOT* want to set it later than this because
// impReimportSpillClique might clear it if this block is both a
// predecessor and successor in the current spill clique
assert(block->bbFlags & BBF_IMPORTED);
// If we had a int/native int, or float/double collision, we need to re-import
if (reimportSpillClique)
{
// This will re-import all the successors of block (as well as each of their predecessors)
impReimportSpillClique(block);
// For blocks that haven't been imported yet, we still need to mark them as pending import.
for (BasicBlock* const succ : block->Succs())
{
if ((succ->bbFlags & BBF_IMPORTED) == 0)
{
impImportBlockPending(succ);
}
}
}
else // the normal case
{
// otherwise just import the successors of block
/* Does this block jump to any other blocks? */
for (BasicBlock* const succ : block->Succs())
{
impImportBlockPending(succ);
}
}
}
#ifdef _PREFAST_
#pragma warning(pop)
#endif
/*****************************************************************************/
//
// Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
// necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
// impPendingBlockMembers). Merges the current verification state into the verification state of "block"
// (its "pre-state").
void Compiler::impImportBlockPending(BasicBlock* block)
{
#ifdef DEBUG
if (verbose)
{
printf("\nimpImportBlockPending for " FMT_BB "\n", block->bbNum);
}
#endif
// We will add a block to the pending set if it has not already been imported (or needs to be re-imported),
// or if it has, but merging in a predecessor's post-state changes the block's pre-state.
// (When we're doing verification, we always attempt the merge to detect verification errors.)
// If the block has not been imported, add to pending set.
bool addToPending = ((block->bbFlags & BBF_IMPORTED) == 0);
// Initialize bbEntryState just the first time we try to add this block to the pending list
// Just because bbEntryState is NULL, doesn't mean the pre-state wasn't previously set
// We use NULL to indicate the 'common' state to avoid memory allocation
if ((block->bbEntryState == nullptr) && ((block->bbFlags & (BBF_IMPORTED | BBF_FAILED_VERIFICATION)) == 0) &&
(impGetPendingBlockMember(block) == 0))
{
verInitBBEntryState(block, &verCurrentState);
assert(block->bbStkDepth == 0);
block->bbStkDepth = static_cast<unsigned short>(verCurrentState.esStackDepth);
assert(addToPending);
assert(impGetPendingBlockMember(block) == 0);
}
else
{
// The stack should have the same height on entry to the block from all its predecessors.
if (block->bbStkDepth != verCurrentState.esStackDepth)
{
#ifdef DEBUG
char buffer[400];
sprintf_s(buffer, sizeof(buffer),
"Block at offset %4.4x to %4.4x in %0.200s entered with different stack depths.\n"
"Previous depth was %d, current depth is %d",
block->bbCodeOffs, block->bbCodeOffsEnd, info.compFullName, block->bbStkDepth,
verCurrentState.esStackDepth);
buffer[400 - 1] = 0;
NO_WAY(buffer);
#else
NO_WAY("Block entered with different stack depths");
#endif
}
if (!addToPending)
{
return;
}
if (block->bbStkDepth > 0)
{
// We need to fix the types of any spill temps that might have changed:
// int->native int, float->double, int->byref, etc.
impRetypeEntryStateTemps(block);
}
// OK, we must add to the pending list, if it's not already in it.
if (impGetPendingBlockMember(block) != 0)
{
return;
}
}
// Get an entry to add to the pending list
PendingDsc* dsc;
if (impPendingFree)
{
// We can reuse one of the freed up dscs.
dsc = impPendingFree;
impPendingFree = dsc->pdNext;
}
else
{
// We have to create a new dsc
dsc = new (this, CMK_Unknown) PendingDsc;
}
dsc->pdBB = block;
dsc->pdSavedStack.ssDepth = verCurrentState.esStackDepth;
dsc->pdThisPtrInit = verCurrentState.thisInitialized;
// Save the stack trees for later
if (verCurrentState.esStackDepth)
{
impSaveStackState(&dsc->pdSavedStack, false);
}
// Add the entry to the pending list
dsc->pdNext = impPendingList;
impPendingList = dsc;
impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
// Various assertions require us to now to consider the block as not imported (at least for
// the final time...)
block->bbFlags &= ~BBF_IMPORTED;
#ifdef DEBUG
if (verbose && 0)
{
printf("Added PendingDsc - %08p for " FMT_BB "\n", dspPtr(dsc), block->bbNum);
}
#endif
}
/*****************************************************************************/
//
// Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
// necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
// impPendingBlockMembers). Does *NOT* change the existing "pre-state" of the block.
void Compiler::impReimportBlockPending(BasicBlock* block)
{
JITDUMP("\nimpReimportBlockPending for " FMT_BB, block->bbNum);
assert(block->bbFlags & BBF_IMPORTED);
// OK, we must add to the pending list, if it's not already in it.
if (impGetPendingBlockMember(block) != 0)
{
return;
}
// Get an entry to add to the pending list
PendingDsc* dsc;
if (impPendingFree)
{
// We can reuse one of the freed up dscs.
dsc = impPendingFree;
impPendingFree = dsc->pdNext;
}
else
{
// We have to create a new dsc
dsc = new (this, CMK_ImpStack) PendingDsc;
}
dsc->pdBB = block;
if (block->bbEntryState)
{
dsc->pdThisPtrInit = block->bbEntryState->thisInitialized;
dsc->pdSavedStack.ssDepth = block->bbEntryState->esStackDepth;
dsc->pdSavedStack.ssTrees = block->bbEntryState->esStack;
}
else
{
dsc->pdThisPtrInit = TIS_Bottom;
dsc->pdSavedStack.ssDepth = 0;
dsc->pdSavedStack.ssTrees = nullptr;
}
// Add the entry to the pending list
dsc->pdNext = impPendingList;
impPendingList = dsc;
impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
// Various assertions require us to now to consider the block as not imported (at least for
// the final time...)
block->bbFlags &= ~BBF_IMPORTED;
#ifdef DEBUG
if (verbose && 0)
{
printf("Added PendingDsc - %08p for " FMT_BB "\n", dspPtr(dsc), block->bbNum);
}
#endif
}
void* Compiler::BlockListNode::operator new(size_t sz, Compiler* comp)
{
if (comp->impBlockListNodeFreeList == nullptr)
{
return comp->getAllocator(CMK_BasicBlock).allocate<BlockListNode>(1);
}
else
{
BlockListNode* res = comp->impBlockListNodeFreeList;
comp->impBlockListNodeFreeList = res->m_next;
return res;
}
}
void Compiler::FreeBlockListNode(Compiler::BlockListNode* node)
{
node->m_next = impBlockListNodeFreeList;
impBlockListNodeFreeList = node;
}
void Compiler::impWalkSpillCliqueFromPred(BasicBlock* block, SpillCliqueWalker* callback)
{
bool toDo = true;
noway_assert(!fgComputePredsDone);
if (!fgCheapPredsValid)
{
fgComputeCheapPreds();
}
BlockListNode* succCliqueToDo = nullptr;
BlockListNode* predCliqueToDo = new (this) BlockListNode(block);
while (toDo)
{
toDo = false;
// Look at the successors of every member of the predecessor to-do list.
while (predCliqueToDo != nullptr)
{
BlockListNode* node = predCliqueToDo;
predCliqueToDo = node->m_next;
BasicBlock* blk = node->m_blk;
FreeBlockListNode(node);
for (BasicBlock* const succ : blk->Succs())
{
// If it's not already in the clique, add it, and also add it
// as a member of the successor "toDo" set.
if (impSpillCliqueGetMember(SpillCliqueSucc, succ) == 0)
{
callback->Visit(SpillCliqueSucc, succ);
impSpillCliqueSetMember(SpillCliqueSucc, succ, 1);
succCliqueToDo = new (this) BlockListNode(succ, succCliqueToDo);
toDo = true;
}
}
}
// Look at the predecessors of every member of the successor to-do list.
while (succCliqueToDo != nullptr)
{
BlockListNode* node = succCliqueToDo;
succCliqueToDo = node->m_next;
BasicBlock* blk = node->m_blk;
FreeBlockListNode(node);
for (BasicBlockList* pred = blk->bbCheapPreds; pred != nullptr; pred = pred->next)
{
BasicBlock* predBlock = pred->block;
// If it's not already in the clique, add it, and also add it
// as a member of the predecessor "toDo" set.
if (impSpillCliqueGetMember(SpillCliquePred, predBlock) == 0)
{
callback->Visit(SpillCliquePred, predBlock);
impSpillCliqueSetMember(SpillCliquePred, predBlock, 1);
predCliqueToDo = new (this) BlockListNode(predBlock, predCliqueToDo);
toDo = true;
}
}
}
}
// If this fails, it means we didn't walk the spill clique properly and somehow managed
// miss walking back to include the predecessor we started from.
// This most likely cause: missing or out of date bbPreds
assert(impSpillCliqueGetMember(SpillCliquePred, block) != 0);
}
void Compiler::SetSpillTempsBase::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
{
if (predOrSucc == SpillCliqueSucc)
{
assert(blk->bbStkTempsIn == NO_BASE_TMP); // Should not already be a member of a clique as a successor.
blk->bbStkTempsIn = m_baseTmp;
}
else
{
assert(predOrSucc == SpillCliquePred);
assert(blk->bbStkTempsOut == NO_BASE_TMP); // Should not already be a member of a clique as a predecessor.
blk->bbStkTempsOut = m_baseTmp;
}
}
void Compiler::ReimportSpillClique::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
{
// For Preds we could be a little smarter and just find the existing store
// and re-type it/add a cast, but that is complicated and hopefully very rare, so
// just re-import the whole block (just like we do for successors)
if (((blk->bbFlags & BBF_IMPORTED) == 0) && (m_pComp->impGetPendingBlockMember(blk) == 0))
{
// If we haven't imported this block and we're not going to (because it isn't on
// the pending list) then just ignore it for now.
// This block has either never been imported (EntryState == NULL) or it failed
// verification. Neither state requires us to force it to be imported now.
assert((blk->bbEntryState == nullptr) || (blk->bbFlags & BBF_FAILED_VERIFICATION));
return;
}
// For successors we have a valid verCurrentState, so just mark them for reimport
// the 'normal' way
// Unlike predecessors, we *DO* need to reimport the current block because the
// initial import had the wrong entry state types.
// Similarly, blocks that are currently on the pending list, still need to call
// impImportBlockPending to fixup their entry state.
if (predOrSucc == SpillCliqueSucc)
{
m_pComp->impReimportMarkBlock(blk);
// Set the current stack state to that of the blk->bbEntryState
m_pComp->verResetCurrentState(blk, &m_pComp->verCurrentState);
assert(m_pComp->verCurrentState.thisInitialized == blk->bbThisOnEntry());
m_pComp->impImportBlockPending(blk);
}
else if ((blk != m_pComp->compCurBB) && ((blk->bbFlags & BBF_IMPORTED) != 0))
{
// As described above, we are only visiting predecessors so they can
// add the appropriate casts, since we have already done that for the current
// block, it does not need to be reimported.
// Nor do we need to reimport blocks that are still pending, but not yet
// imported.
//
// For predecessors, we have no state to seed the EntryState, so we just have
// to assume the existing one is correct.
// If the block is also a successor, it will get the EntryState properly
// updated when it is visited as a successor in the above "if" block.
assert(predOrSucc == SpillCliquePred);
m_pComp->impReimportBlockPending(blk);
}
}
// Re-type the incoming lclVar nodes to match the varDsc.
void Compiler::impRetypeEntryStateTemps(BasicBlock* blk)
{
if (blk->bbEntryState != nullptr)
{
EntryState* es = blk->bbEntryState;
for (unsigned level = 0; level < es->esStackDepth; level++)
{
GenTree* tree = es->esStack[level].val;
if ((tree->gtOper == GT_LCL_VAR) || (tree->gtOper == GT_LCL_FLD))
{
es->esStack[level].val->gtType = lvaGetDesc(tree->AsLclVarCommon())->TypeGet();
}
}
}
}
unsigned Compiler::impGetSpillTmpBase(BasicBlock* block)
{
if (block->bbStkTempsOut != NO_BASE_TMP)
{
return block->bbStkTempsOut;
}
#ifdef DEBUG
if (verbose)
{
printf("\n*************** In impGetSpillTmpBase(" FMT_BB ")\n", block->bbNum);
}
#endif // DEBUG
// Otherwise, choose one, and propagate to all members of the spill clique.
// Grab enough temps for the whole stack.
unsigned baseTmp = lvaGrabTemps(verCurrentState.esStackDepth DEBUGARG("IL Stack Entries"));
SetSpillTempsBase callback(baseTmp);
// We do *NOT* need to reset the SpillClique*Members because a block can only be the predecessor
// to one spill clique, and similarly can only be the successor to one spill clique
impWalkSpillCliqueFromPred(block, &callback);
return baseTmp;
}
void Compiler::impReimportSpillClique(BasicBlock* block)
{
#ifdef DEBUG
if (verbose)
{
printf("\n*************** In impReimportSpillClique(" FMT_BB ")\n", block->bbNum);
}
#endif // DEBUG
// If we get here, it is because this block is already part of a spill clique
// and one predecessor had an outgoing live stack slot of type int, and this
// block has an outgoing live stack slot of type native int.
// We need to reset these before traversal because they have already been set
// by the previous walk to determine all the members of the spill clique.
impInlineRoot()->impSpillCliquePredMembers.Reset();
impInlineRoot()->impSpillCliqueSuccMembers.Reset();
ReimportSpillClique callback(this);
impWalkSpillCliqueFromPred(block, &callback);
}
// Set the pre-state of "block" (which should not have a pre-state allocated) to
// a copy of "srcState", cloning tree pointers as required.
void Compiler::verInitBBEntryState(BasicBlock* block, EntryState* srcState)
{
if (srcState->esStackDepth == 0 && srcState->thisInitialized == TIS_Bottom)
{
block->bbEntryState = nullptr;
return;
}
block->bbEntryState = getAllocator(CMK_Unknown).allocate<EntryState>(1);
// block->bbEntryState.esRefcount = 1;
block->bbEntryState->esStackDepth = srcState->esStackDepth;
block->bbEntryState->thisInitialized = TIS_Bottom;
if (srcState->esStackDepth > 0)
{
block->bbSetStack(new (this, CMK_Unknown) StackEntry[srcState->esStackDepth]);
unsigned stackSize = srcState->esStackDepth * sizeof(StackEntry);
memcpy(block->bbEntryState->esStack, srcState->esStack, stackSize);
for (unsigned level = 0; level < srcState->esStackDepth; level++)
{
GenTree* tree = srcState->esStack[level].val;
block->bbEntryState->esStack[level].val = gtCloneExpr(tree);
}
}
if (verTrackObjCtorInitState)
{
verSetThisInit(block, srcState->thisInitialized);
}
return;
}
void Compiler::verSetThisInit(BasicBlock* block, ThisInitState tis)
{
assert(tis != TIS_Bottom); // Precondition.
if (block->bbEntryState == nullptr)
{
block->bbEntryState = new (this, CMK_Unknown) EntryState();
}
block->bbEntryState->thisInitialized = tis;
}
/*
* Resets the current state to the state at the start of the basic block
*/
void Compiler::verResetCurrentState(BasicBlock* block, EntryState* destState)
{
if (block->bbEntryState == nullptr)
{
destState->esStackDepth = 0;
destState->thisInitialized = TIS_Bottom;
return;
}
destState->esStackDepth = block->bbEntryState->esStackDepth;
if (destState->esStackDepth > 0)
{
unsigned stackSize = destState->esStackDepth * sizeof(StackEntry);
memcpy(destState->esStack, block->bbStackOnEntry(), stackSize);
}
destState->thisInitialized = block->bbThisOnEntry();
return;
}
ThisInitState BasicBlock::bbThisOnEntry() const
{
return bbEntryState ? bbEntryState->thisInitialized : TIS_Bottom;
}
unsigned BasicBlock::bbStackDepthOnEntry() const
{
return (bbEntryState ? bbEntryState->esStackDepth : 0);
}
void BasicBlock::bbSetStack(void* stackBuffer)
{
assert(bbEntryState);
assert(stackBuffer);
bbEntryState->esStack = (StackEntry*)stackBuffer;
}
StackEntry* BasicBlock::bbStackOnEntry() const
{
assert(bbEntryState);
return bbEntryState->esStack;
}
void Compiler::verInitCurrentState()
{
verTrackObjCtorInitState = false;
verCurrentState.thisInitialized = TIS_Bottom;
// initialize stack info
verCurrentState.esStackDepth = 0;
assert(verCurrentState.esStack != nullptr);
// copy current state to entry state of first BB
verInitBBEntryState(fgFirstBB, &verCurrentState);
}
Compiler* Compiler::impInlineRoot()
{
if (impInlineInfo == nullptr)
{
return this;
}
else
{
return impInlineInfo->InlineRoot;
}
}
BYTE Compiler::impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk)
{
if (predOrSucc == SpillCliquePred)
{
return impInlineRoot()->impSpillCliquePredMembers.Get(blk->bbInd());
}
else
{
assert(predOrSucc == SpillCliqueSucc);
return impInlineRoot()->impSpillCliqueSuccMembers.Get(blk->bbInd());
}
}
void Compiler::impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val)
{
if (predOrSucc == SpillCliquePred)
{
impInlineRoot()->impSpillCliquePredMembers.Set(blk->bbInd(), val);
}
else
{
assert(predOrSucc == SpillCliqueSucc);
impInlineRoot()->impSpillCliqueSuccMembers.Set(blk->bbInd(), val);
}
}
/*****************************************************************************
*
* Convert the instrs ("import") into our internal format (trees). The
* basic flowgraph has already been constructed and is passed in.
*/
void Compiler::impImport()
{
#ifdef DEBUG
if (verbose)
{
printf("*************** In impImport() for %s\n", info.compFullName);
}
#endif
Compiler* inlineRoot = impInlineRoot();
if (info.compMaxStack <= SMALL_STACK_SIZE)
{
impStkSize = SMALL_STACK_SIZE;
}
else
{
impStkSize = info.compMaxStack;
}
if (this == inlineRoot)
{
// Allocate the stack contents
verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize];
}
else
{
// This is the inlinee compiler, steal the stack from the inliner compiler
// (after ensuring that it is large enough).
if (inlineRoot->impStkSize < impStkSize)
{
inlineRoot->impStkSize = impStkSize;
inlineRoot->verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize];
}
verCurrentState.esStack = inlineRoot->verCurrentState.esStack;
}
// initialize the entry state at start of method
verInitCurrentState();
// Initialize stuff related to figuring "spill cliques" (see spec comment for impGetSpillTmpBase).
if (this == inlineRoot) // These are only used on the root of the inlining tree.
{
// We have initialized these previously, but to size 0. Make them larger.
impPendingBlockMembers.Init(getAllocator(), fgBBNumMax * 2);
impSpillCliquePredMembers.Init(getAllocator(), fgBBNumMax * 2);
impSpillCliqueSuccMembers.Init(getAllocator(), fgBBNumMax * 2);
}
inlineRoot->impPendingBlockMembers.Reset(fgBBNumMax * 2);
inlineRoot->impSpillCliquePredMembers.Reset(fgBBNumMax * 2);
inlineRoot->impSpillCliqueSuccMembers.Reset(fgBBNumMax * 2);
impBlockListNodeFreeList = nullptr;
#ifdef DEBUG
impLastILoffsStmt = nullptr;
impNestedStackSpill = false;
#endif
impBoxTemp = BAD_VAR_NUM;
impPendingList = impPendingFree = nullptr;
// Skip leading internal blocks.
// These can arise from needing a leading scratch BB, from EH normalization, and from OSR entry redirects.
//
BasicBlock* entryBlock = fgFirstBB;
while (entryBlock->bbFlags & BBF_INTERNAL)
{
JITDUMP("Marking leading BBF_INTERNAL block " FMT_BB " as BBF_IMPORTED\n", entryBlock->bbNum);
entryBlock->bbFlags |= BBF_IMPORTED;
if (entryBlock->bbJumpKind == BBJ_NONE)
{
entryBlock = entryBlock->bbNext;
}
else if (opts.IsOSR() && (entryBlock->bbJumpKind == BBJ_ALWAYS))
{
entryBlock = entryBlock->bbJumpDest;
}
else
{
assert(!"unexpected bbJumpKind in entry sequence");
}
}
// Note for OSR we'd like to be able to verify this block must be
// stack empty, but won't know that until we've imported...so instead
// we'll BADCODE out if we mess up.
//
// (the concern here is that the runtime asks us to OSR a
// different IL version than the one that matched the method that
// triggered OSR). This should not happen but I might have the
// IL versioning stuff wrong.
//
// TODO: we also currently expect this block to be a join point,
// which we should verify over when we find jump targets.
impImportBlockPending(entryBlock);
/* Import blocks in the worker-list until there are no more */
while (impPendingList)
{
/* Remove the entry at the front of the list */
PendingDsc* dsc = impPendingList;
impPendingList = impPendingList->pdNext;
impSetPendingBlockMember(dsc->pdBB, 0);
/* Restore the stack state */
verCurrentState.thisInitialized = dsc->pdThisPtrInit;
verCurrentState.esStackDepth = dsc->pdSavedStack.ssDepth;
if (verCurrentState.esStackDepth)
{
impRestoreStackState(&dsc->pdSavedStack);
}
/* Add the entry to the free list for reuse */
dsc->pdNext = impPendingFree;
impPendingFree = dsc;
/* Now import the block */
if (dsc->pdBB->bbFlags & BBF_FAILED_VERIFICATION)
{
verConvertBBToThrowVerificationException(dsc->pdBB DEBUGARG(true));
impEndTreeList(dsc->pdBB);
}
else
{
impImportBlock(dsc->pdBB);
if (compDonotInline())
{
return;
}
if (compIsForImportOnly())
{
return;
}
}
}
#ifdef DEBUG
if (verbose && info.compXcptnsCount)
{
printf("\nAfter impImport() added block for try,catch,finally");
fgDispBasicBlocks();
printf("\n");
}
// Used in impImportBlockPending() for STRESS_CHK_REIMPORT
for (BasicBlock* const block : Blocks())
{
block->bbFlags &= ~BBF_VISITED;
}
#endif
}
// Checks if a typeinfo (usually stored in the type stack) is a struct.
// The invariant here is that if it's not a ref or a method and has a class handle
// it's a valuetype
bool Compiler::impIsValueType(typeInfo* pTypeInfo)
{
if (pTypeInfo && pTypeInfo->IsValueClassWithClsHnd())
{
return true;
}
else
{
return false;
}
}
/*****************************************************************************
* Check to see if the tree is the address of a local or
the address of a field in a local.
*lclVarTreeOut will contain the GT_LCL_VAR tree when it returns true.
*/
bool Compiler::impIsAddressInLocal(const GenTree* tree, GenTree** lclVarTreeOut)
{
if (tree->gtOper != GT_ADDR)
{
return false;
}
GenTree* op = tree->AsOp()->gtOp1;
while (op->gtOper == GT_FIELD)
{
op = op->AsField()->GetFldObj();
if (op && op->gtOper == GT_ADDR) // Skip static fields where op will be NULL.
{
op = op->AsOp()->gtOp1;
}
else
{
return false;
}
}
if (op->gtOper == GT_LCL_VAR)
{
if (lclVarTreeOut != nullptr)
{
*lclVarTreeOut = op;
}
return true;
}
else
{
return false;
}
}
//------------------------------------------------------------------------
// impMakeDiscretionaryInlineObservations: make observations that help
// determine the profitability of a discretionary inline
//
// Arguments:
// pInlineInfo -- InlineInfo for the inline, or null for the prejit root
// inlineResult -- InlineResult accumulating information about this inline
//
// Notes:
// If inlining or prejitting the root, this method also makes
// various observations about the method that factor into inline
// decisions. It sets `compNativeSizeEstimate` as a side effect.
void Compiler::impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult)
{
assert((pInlineInfo != nullptr && compIsForInlining()) || // Perform the actual inlining.
(pInlineInfo == nullptr && !compIsForInlining()) // Calculate the static inlining hint for ngen.
);
// If we're really inlining, we should just have one result in play.
assert((pInlineInfo == nullptr) || (inlineResult == pInlineInfo->inlineResult));
// If this is a "forceinline" method, the JIT probably shouldn't have gone
// to the trouble of estimating the native code size. Even if it did, it
// shouldn't be relying on the result of this method.
assert(inlineResult->GetObservation() == InlineObservation::CALLEE_IS_DISCRETIONARY_INLINE);
// Note if the caller contains NEWOBJ or NEWARR.
Compiler* rootCompiler = impInlineRoot();
if ((rootCompiler->optMethodFlags & OMF_HAS_NEWARRAY) != 0)
{
inlineResult->Note(InlineObservation::CALLER_HAS_NEWARRAY);
}
if ((rootCompiler->optMethodFlags & OMF_HAS_NEWOBJ) != 0)
{
inlineResult->Note(InlineObservation::CALLER_HAS_NEWOBJ);
}
bool calleeIsStatic = (info.compFlags & CORINFO_FLG_STATIC) != 0;
bool isSpecialMethod = (info.compFlags & CORINFO_FLG_CONSTRUCTOR) != 0;
if (isSpecialMethod)
{
if (calleeIsStatic)
{
inlineResult->Note(InlineObservation::CALLEE_IS_CLASS_CTOR);
}
else
{
inlineResult->Note(InlineObservation::CALLEE_IS_INSTANCE_CTOR);
}
}
else if (!calleeIsStatic)
{
// Callee is an instance method.
//
// Check if the callee has the same 'this' as the root.
if (pInlineInfo != nullptr)
{
GenTree* thisArg = pInlineInfo->iciCall->AsCall()->gtCallThisArg->GetNode();
assert(thisArg);
bool isSameThis = impIsThis(thisArg);
inlineResult->NoteBool(InlineObservation::CALLSITE_IS_SAME_THIS, isSameThis);
}
}
bool callsiteIsGeneric = (rootCompiler->info.compMethodInfo->args.sigInst.methInstCount != 0) ||
(rootCompiler->info.compMethodInfo->args.sigInst.classInstCount != 0);
bool calleeIsGeneric = (info.compMethodInfo->args.sigInst.methInstCount != 0) ||
(info.compMethodInfo->args.sigInst.classInstCount != 0);
if (!callsiteIsGeneric && calleeIsGeneric)
{
inlineResult->Note(InlineObservation::CALLSITE_NONGENERIC_CALLS_GENERIC);
}
// Inspect callee's arguments (and the actual values at the callsite for them)
CORINFO_SIG_INFO sig = info.compMethodInfo->args;
CORINFO_ARG_LIST_HANDLE sigArg = sig.args;
GenTreeCall::Use* argUse = pInlineInfo == nullptr ? nullptr : pInlineInfo->iciCall->AsCall()->gtCallArgs;
for (unsigned i = 0; i < info.compMethodInfo->args.numArgs; i++)
{
CORINFO_CLASS_HANDLE sigClass;
CorInfoType corType = strip(info.compCompHnd->getArgType(&sig, sigArg, &sigClass));
GenTree* argNode = argUse == nullptr ? nullptr : argUse->GetNode()->gtSkipPutArgType();
if (corType == CORINFO_TYPE_CLASS)
{
sigClass = info.compCompHnd->getArgClass(&sig, sigArg);
}
else if (corType == CORINFO_TYPE_VALUECLASS)
{
inlineResult->Note(InlineObservation::CALLEE_ARG_STRUCT);
}
else if (corType == CORINFO_TYPE_BYREF)
{
sigClass = info.compCompHnd->getArgClass(&sig, sigArg);
corType = info.compCompHnd->getChildType(sigClass, &sigClass);
}
if (argNode != nullptr)
{
bool isExact = false;
bool isNonNull = false;
CORINFO_CLASS_HANDLE argCls = gtGetClassHandle(argNode, &isExact, &isNonNull);
if (argCls != nullptr)
{
const bool isArgValueType = eeIsValueClass(argCls);
// Exact class of the arg is known
if (isExact && !isArgValueType)
{
inlineResult->Note(InlineObservation::CALLSITE_ARG_EXACT_CLS);
if ((argCls != sigClass) && (sigClass != nullptr))
{
// .. but the signature accepts a less concrete type.
inlineResult->Note(InlineObservation::CALLSITE_ARG_EXACT_CLS_SIG_IS_NOT);
}
}
// Arg is a reference type in the signature and a boxed value type was passed.
else if (isArgValueType && (corType == CORINFO_TYPE_CLASS))
{
inlineResult->Note(InlineObservation::CALLSITE_ARG_BOXED);
}
}
if (argNode->OperIsConst())
{
inlineResult->Note(InlineObservation::CALLSITE_ARG_CONST);
}
argUse = argUse->GetNext();
}
sigArg = info.compCompHnd->getArgNext(sigArg);
}
// Note if the callee's return type is a value type
if (info.compMethodInfo->args.retType == CORINFO_TYPE_VALUECLASS)
{
inlineResult->Note(InlineObservation::CALLEE_RETURNS_STRUCT);
}
// Note if the callee's class is a promotable struct
if ((info.compClassAttr & CORINFO_FLG_VALUECLASS) != 0)
{
assert(structPromotionHelper != nullptr);
if (structPromotionHelper->CanPromoteStructType(info.compClassHnd))
{
inlineResult->Note(InlineObservation::CALLEE_CLASS_PROMOTABLE);
}
inlineResult->Note(InlineObservation::CALLEE_CLASS_VALUETYPE);
}
#ifdef FEATURE_SIMD
// Note if this method is has SIMD args or return value
if (pInlineInfo != nullptr && pInlineInfo->hasSIMDTypeArgLocalOrReturn)
{
inlineResult->Note(InlineObservation::CALLEE_HAS_SIMD);
}
#endif // FEATURE_SIMD
// Roughly classify callsite frequency.
InlineCallsiteFrequency frequency = InlineCallsiteFrequency::UNUSED;
// If this is a prejit root, or a maximally hot block...
if ((pInlineInfo == nullptr) || (pInlineInfo->iciBlock->isMaxBBWeight()))
{
frequency = InlineCallsiteFrequency::HOT;
}
// No training data. Look for loop-like things.
// We consider a recursive call loop-like. Do not give the inlining boost to the method itself.
// However, give it to things nearby.
else if ((pInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) &&
(pInlineInfo->fncHandle != pInlineInfo->inlineCandidateInfo->ilCallerHandle))
{
frequency = InlineCallsiteFrequency::LOOP;
}
else if (pInlineInfo->iciBlock->hasProfileWeight() && (pInlineInfo->iciBlock->bbWeight > BB_ZERO_WEIGHT))
{
frequency = InlineCallsiteFrequency::WARM;
}
// Now modify the multiplier based on where we're called from.
else if (pInlineInfo->iciBlock->isRunRarely() || ((info.compFlags & FLG_CCTOR) == FLG_CCTOR))
{
frequency = InlineCallsiteFrequency::RARE;
}
else
{
frequency = InlineCallsiteFrequency::BORING;
}
// Also capture the block weight of the call site.
//
// In the prejit root case, assume at runtime there might be a hot call site
// for this method, so we won't prematurely conclude this method should never
// be inlined.
//
weight_t weight = 0;
if (pInlineInfo != nullptr)
{
weight = pInlineInfo->iciBlock->bbWeight;
}
else
{
const weight_t prejitHotCallerWeight = 1000000.0;
weight = prejitHotCallerWeight;
}
inlineResult->NoteInt(InlineObservation::CALLSITE_FREQUENCY, static_cast<int>(frequency));
inlineResult->NoteInt(InlineObservation::CALLSITE_WEIGHT, (int)(weight));
bool hasProfile = false;
double profileFreq = 0.0;
// If the call site has profile data, report the relative frequency of the site.
//
if ((pInlineInfo != nullptr) && rootCompiler->fgHaveSufficientProfileData())
{
const weight_t callSiteWeight = pInlineInfo->iciBlock->bbWeight;
const weight_t entryWeight = rootCompiler->fgFirstBB->bbWeight;
profileFreq = fgProfileWeightsEqual(entryWeight, 0.0) ? 0.0 : callSiteWeight / entryWeight;
hasProfile = true;
assert(callSiteWeight >= 0);
assert(entryWeight >= 0);
}
else if (pInlineInfo == nullptr)
{
// Simulate a hot callsite for PrejitRoot mode.
hasProfile = true;
profileFreq = 1.0;
}
inlineResult->NoteBool(InlineObservation::CALLSITE_HAS_PROFILE, hasProfile);
inlineResult->NoteDouble(InlineObservation::CALLSITE_PROFILE_FREQUENCY, profileFreq);
}
/*****************************************************************************
This method makes STATIC inlining decision based on the IL code.
It should not make any inlining decision based on the context.
If forceInline is true, then the inlining decision should not depend on
performance heuristics (code size, etc.).
*/
void Compiler::impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle,
CORINFO_METHOD_INFO* methInfo,
bool forceInline,
InlineResult* inlineResult)
{
unsigned codeSize = methInfo->ILCodeSize;
// We shouldn't have made up our minds yet...
assert(!inlineResult->IsDecided());
if (methInfo->EHcount)
{
inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_EH);
return;
}
if ((methInfo->ILCode == nullptr) || (codeSize == 0))
{
inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NO_BODY);
return;
}
// For now we don't inline varargs (import code can't handle it)
if (methInfo->args.isVarArg())
{
inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
return;
}
// Reject if it has too many locals.
// This is currently an implementation limit due to fixed-size arrays in the
// inline info, rather than a performance heuristic.
inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_LOCALS, methInfo->locals.numArgs);
if (methInfo->locals.numArgs > MAX_INL_LCLS)
{
inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_LOCALS);
return;
}
// Make sure there aren't too many arguments.
// This is currently an implementation limit due to fixed-size arrays in the
// inline info, rather than a performance heuristic.
inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_ARGUMENTS, methInfo->args.numArgs);
if (methInfo->args.numArgs > MAX_INL_ARGS)
{
inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_ARGUMENTS);
return;
}
// Note force inline state
inlineResult->NoteBool(InlineObservation::CALLEE_IS_FORCE_INLINE, forceInline);
// Note IL code size
inlineResult->NoteInt(InlineObservation::CALLEE_IL_CODE_SIZE, codeSize);
if (inlineResult->IsFailure())
{
return;
}
// Make sure maxstack is not too big
inlineResult->NoteInt(InlineObservation::CALLEE_MAXSTACK, methInfo->maxStack);
if (inlineResult->IsFailure())
{
return;
}
}
/*****************************************************************************
*/
void Compiler::impCheckCanInline(GenTreeCall* call,
CORINFO_METHOD_HANDLE fncHandle,
unsigned methAttr,
CORINFO_CONTEXT_HANDLE exactContextHnd,
InlineCandidateInfo** ppInlineCandidateInfo,
InlineResult* inlineResult)
{
// Either EE or JIT might throw exceptions below.
// If that happens, just don't inline the method.
struct Param
{
Compiler* pThis;
GenTreeCall* call;
CORINFO_METHOD_HANDLE fncHandle;
unsigned methAttr;
CORINFO_CONTEXT_HANDLE exactContextHnd;
InlineResult* result;
InlineCandidateInfo** ppInlineCandidateInfo;
} param;
memset(¶m, 0, sizeof(param));
param.pThis = this;
param.call = call;
param.fncHandle = fncHandle;
param.methAttr = methAttr;
param.exactContextHnd = (exactContextHnd != nullptr) ? exactContextHnd : MAKE_METHODCONTEXT(fncHandle);
param.result = inlineResult;
param.ppInlineCandidateInfo = ppInlineCandidateInfo;
bool success = eeRunWithErrorTrap<Param>(
[](Param* pParam) {
CorInfoInitClassResult initClassResult;
#ifdef DEBUG
const char* methodName;
const char* className;
methodName = pParam->pThis->eeGetMethodName(pParam->fncHandle, &className);
if (JitConfig.JitNoInline())
{
pParam->result->NoteFatal(InlineObservation::CALLEE_IS_JIT_NOINLINE);
goto _exit;
}
#endif
/* Try to get the code address/size for the method */
CORINFO_METHOD_INFO methInfo;
if (!pParam->pThis->info.compCompHnd->getMethodInfo(pParam->fncHandle, &methInfo))
{
pParam->result->NoteFatal(InlineObservation::CALLEE_NO_METHOD_INFO);
goto _exit;
}
// Profile data allows us to avoid early "too many IL bytes" outs.
pParam->result->NoteBool(InlineObservation::CALLSITE_HAS_PROFILE,
pParam->pThis->fgHaveSufficientProfileData());
bool forceInline;
forceInline = !!(pParam->methAttr & CORINFO_FLG_FORCEINLINE);
pParam->pThis->impCanInlineIL(pParam->fncHandle, &methInfo, forceInline, pParam->result);
if (pParam->result->IsFailure())
{
assert(pParam->result->IsNever());
goto _exit;
}
// Speculatively check if initClass() can be done.
// If it can be done, we will try to inline the method.
initClassResult =
pParam->pThis->info.compCompHnd->initClass(nullptr /* field */, pParam->fncHandle /* method */,
pParam->exactContextHnd /* context */);
if (initClassResult & CORINFO_INITCLASS_DONT_INLINE)
{
pParam->result->NoteFatal(InlineObservation::CALLSITE_CANT_CLASS_INIT);
goto _exit;
}
// Given the EE the final say in whether to inline or not.
// This should be last since for verifiable code, this can be expensive
/* VM Inline check also ensures that the method is verifiable if needed */
CorInfoInline vmResult;
vmResult = pParam->pThis->info.compCompHnd->canInline(pParam->pThis->info.compMethodHnd, pParam->fncHandle);
if (vmResult == INLINE_FAIL)
{
pParam->result->NoteFatal(InlineObservation::CALLSITE_IS_VM_NOINLINE);
}
else if (vmResult == INLINE_NEVER)
{
pParam->result->NoteFatal(InlineObservation::CALLEE_IS_VM_NOINLINE);
}
if (pParam->result->IsFailure())
{
// Make sure not to report this one. It was already reported by the VM.
pParam->result->SetReported();
goto _exit;
}
/* Get the method properties */
CORINFO_CLASS_HANDLE clsHandle;
clsHandle = pParam->pThis->info.compCompHnd->getMethodClass(pParam->fncHandle);
unsigned clsAttr;
clsAttr = pParam->pThis->info.compCompHnd->getClassAttribs(clsHandle);
/* Get the return type */
var_types fncRetType;
fncRetType = pParam->call->TypeGet();
#ifdef DEBUG
var_types fncRealRetType;
fncRealRetType = JITtype2varType(methInfo.args.retType);
assert((genActualType(fncRealRetType) == genActualType(fncRetType)) ||
// <BUGNUM> VSW 288602 </BUGNUM>
// In case of IJW, we allow to assign a native pointer to a BYREF.
(fncRetType == TYP_BYREF && methInfo.args.retType == CORINFO_TYPE_PTR) ||
(varTypeIsStruct(fncRetType) && (fncRealRetType == TYP_STRUCT)));
#endif
// Allocate an InlineCandidateInfo structure,
//
// Or, reuse the existing GuardedDevirtualizationCandidateInfo,
// which was pre-allocated to have extra room.
//
InlineCandidateInfo* pInfo;
if (pParam->call->IsGuardedDevirtualizationCandidate())
{
pInfo = pParam->call->gtInlineCandidateInfo;
}
else
{
pInfo = new (pParam->pThis, CMK_Inlining) InlineCandidateInfo;
// Null out bits we don't use when we're just inlining
pInfo->guardedClassHandle = nullptr;
pInfo->guardedMethodHandle = nullptr;
pInfo->guardedMethodUnboxedEntryHandle = nullptr;
pInfo->likelihood = 0;
pInfo->requiresInstMethodTableArg = false;
}
pInfo->methInfo = methInfo;
pInfo->ilCallerHandle = pParam->pThis->info.compMethodHnd;
pInfo->clsHandle = clsHandle;
pInfo->exactContextHnd = pParam->exactContextHnd;
pInfo->retExpr = nullptr;
pInfo->preexistingSpillTemp = BAD_VAR_NUM;
pInfo->clsAttr = clsAttr;
pInfo->methAttr = pParam->methAttr;
pInfo->initClassResult = initClassResult;
pInfo->fncRetType = fncRetType;
pInfo->exactContextNeedsRuntimeLookup = false;
pInfo->inlinersContext = pParam->pThis->compInlineContext;
// Note exactContextNeedsRuntimeLookup is reset later on,
// over in impMarkInlineCandidate.
*(pParam->ppInlineCandidateInfo) = pInfo;
_exit:;
},
¶m);
if (!success)
{
param.result->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR);
}
}
//------------------------------------------------------------------------
// impInlineRecordArgInfo: record information about an inline candidate argument
//
// Arguments:
// pInlineInfo - inline info for the inline candidate
// curArgVal - tree for the caller actual argument value
// argNum - logical index of this argument
// inlineResult - result of ongoing inline evaluation
//
// Notes:
//
// Checks for various inline blocking conditions and makes notes in
// the inline info arg table about the properties of the actual. These
// properties are used later by impInlineFetchArg to determine how best to
// pass the argument into the inlinee.
void Compiler::impInlineRecordArgInfo(InlineInfo* pInlineInfo,
GenTree* curArgVal,
unsigned argNum,
InlineResult* inlineResult)
{
InlArgInfo* inlCurArgInfo = &pInlineInfo->inlArgInfo[argNum];
inlCurArgInfo->argNode = curArgVal; // Save the original tree, with PUT_ARG and RET_EXPR.
curArgVal = curArgVal->gtSkipPutArgType();
curArgVal = curArgVal->gtRetExprVal();
if (curArgVal->gtOper == GT_MKREFANY)
{
inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_IS_MKREFANY);
return;
}
GenTree* lclVarTree;
const bool isAddressInLocal = impIsAddressInLocal(curArgVal, &lclVarTree);
if (isAddressInLocal && varTypeIsStruct(lclVarTree))
{
inlCurArgInfo->argIsByRefToStructLocal = true;
#ifdef FEATURE_SIMD
if (lvaTable[lclVarTree->AsLclVarCommon()->GetLclNum()].lvSIMDType)
{
pInlineInfo->hasSIMDTypeArgLocalOrReturn = true;
}
#endif // FEATURE_SIMD
}
if (curArgVal->gtFlags & GTF_ALL_EFFECT)
{
inlCurArgInfo->argHasGlobRef = (curArgVal->gtFlags & GTF_GLOB_REF) != 0;
inlCurArgInfo->argHasSideEff = (curArgVal->gtFlags & (GTF_ALL_EFFECT & ~GTF_GLOB_REF)) != 0;
}
if (curArgVal->gtOper == GT_LCL_VAR)
{
inlCurArgInfo->argIsLclVar = true;
/* Remember the "original" argument number */
INDEBUG(curArgVal->AsLclVar()->gtLclILoffs = argNum;)
}
if (curArgVal->IsInvariant())
{
inlCurArgInfo->argIsInvariant = true;
if (inlCurArgInfo->argIsThis && (curArgVal->gtOper == GT_CNS_INT) && (curArgVal->AsIntCon()->gtIconVal == 0))
{
// Abort inlining at this call site
inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_HAS_NULL_THIS);
return;
}
}
bool isExact = false;
bool isNonNull = false;
inlCurArgInfo->argIsExact = (gtGetClassHandle(curArgVal, &isExact, &isNonNull) != NO_CLASS_HANDLE) && isExact;
// If the arg is a local that is address-taken, we can't safely
// directly substitute it into the inlinee.
//
// Previously we'd accomplish this by setting "argHasLdargaOp" but
// that has a stronger meaning: that the arg value can change in
// the method body. Using that flag prevents type propagation,
// which is safe in this case.
//
// Instead mark the arg as having a caller local ref.
if (!inlCurArgInfo->argIsInvariant && gtHasLocalsWithAddrOp(curArgVal))
{
inlCurArgInfo->argHasCallerLocalRef = true;
}
#ifdef DEBUG
if (verbose)
{
if (inlCurArgInfo->argIsThis)
{
printf("thisArg:");
}
else
{
printf("\nArgument #%u:", argNum);
}
if (inlCurArgInfo->argIsLclVar)
{
printf(" is a local var");
}
if (inlCurArgInfo->argIsInvariant)
{
printf(" is a constant");
}
if (inlCurArgInfo->argHasGlobRef)
{
printf(" has global refs");
}
if (inlCurArgInfo->argHasCallerLocalRef)
{
printf(" has caller local ref");
}
if (inlCurArgInfo->argHasSideEff)
{
printf(" has side effects");
}
if (inlCurArgInfo->argHasLdargaOp)
{
printf(" has ldarga effect");
}
if (inlCurArgInfo->argHasStargOp)
{
printf(" has starg effect");
}
if (inlCurArgInfo->argIsByRefToStructLocal)
{
printf(" is byref to a struct local");
}
printf("\n");
gtDispTree(curArgVal);
printf("\n");
}
#endif
}
//------------------------------------------------------------------------
// impInlineInitVars: setup inline information for inlinee args and locals
//
// Arguments:
// pInlineInfo - inline info for the inline candidate
//
// Notes:
// This method primarily adds caller-supplied info to the inlArgInfo
// and sets up the lclVarInfo table.
//
// For args, the inlArgInfo records properties of the actual argument
// including the tree node that produces the arg value. This node is
// usually the tree node present at the call, but may also differ in
// various ways:
// - when the call arg is a GT_RET_EXPR, we search back through the ret
// expr chain for the actual node. Note this will either be the original
// call (which will be a failed inline by this point), or the return
// expression from some set of inlines.
// - when argument type casting is needed the necessary casts are added
// around the argument node.
// - if an argument can be simplified by folding then the node here is the
// folded value.
//
// The method may make observations that lead to marking this candidate as
// a failed inline. If this happens the initialization is abandoned immediately
// to try and reduce the jit time cost for a failed inline.
void Compiler::impInlineInitVars(InlineInfo* pInlineInfo)
{
assert(!compIsForInlining());
GenTreeCall* call = pInlineInfo->iciCall;
CORINFO_METHOD_INFO* methInfo = &pInlineInfo->inlineCandidateInfo->methInfo;
unsigned clsAttr = pInlineInfo->inlineCandidateInfo->clsAttr;
InlArgInfo* inlArgInfo = pInlineInfo->inlArgInfo;
InlLclVarInfo* lclVarInfo = pInlineInfo->lclVarInfo;
InlineResult* inlineResult = pInlineInfo->inlineResult;
// Inlined methods always use the managed calling convention
const bool hasRetBuffArg = impMethodInfo_hasRetBuffArg(methInfo, CorInfoCallConvExtension::Managed);
/* init the argument stuct */
memset(inlArgInfo, 0, (MAX_INL_ARGS + 1) * sizeof(inlArgInfo[0]));
GenTreeCall::Use* thisArg = call->gtCallThisArg;
unsigned argCnt = 0; // Count of the arguments
assert((methInfo->args.hasThis()) == (thisArg != nullptr));
if (thisArg != nullptr)
{
inlArgInfo[0].argIsThis = true;
impInlineRecordArgInfo(pInlineInfo, thisArg->GetNode(), argCnt, inlineResult);
if (inlineResult->IsFailure())
{
return;
}
/* Increment the argument count */
argCnt++;
}
/* Record some information about each of the arguments */
bool hasTypeCtxtArg = (methInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) != 0;
#if USER_ARGS_COME_LAST
unsigned typeCtxtArg = (thisArg != nullptr) ? 1 : 0;
#else // USER_ARGS_COME_LAST
unsigned typeCtxtArg = methInfo->args.totalILArgs();
#endif // USER_ARGS_COME_LAST
for (GenTreeCall::Use& use : call->Args())
{
if (hasRetBuffArg && (&use == call->gtCallArgs))
{
continue;
}
// Ignore the type context argument
if (hasTypeCtxtArg && (argCnt == typeCtxtArg))
{
pInlineInfo->typeContextArg = typeCtxtArg;
typeCtxtArg = 0xFFFFFFFF;
continue;
}
GenTree* actualArg = gtFoldExpr(use.GetNode());
impInlineRecordArgInfo(pInlineInfo, actualArg, argCnt, inlineResult);
if (inlineResult->IsFailure())
{
return;
}
/* Increment the argument count */
argCnt++;
}
/* Make sure we got the arg number right */
assert(argCnt == methInfo->args.totalILArgs());
#ifdef FEATURE_SIMD
bool foundSIMDType = pInlineInfo->hasSIMDTypeArgLocalOrReturn;
#endif // FEATURE_SIMD
/* We have typeless opcodes, get type information from the signature */
if (thisArg != nullptr)
{
lclVarInfo[0].lclVerTypeInfo = verMakeTypeInfo(pInlineInfo->inlineCandidateInfo->clsHandle);
lclVarInfo[0].lclHasLdlocaOp = false;
#ifdef FEATURE_SIMD
// We always want to check isSIMDClass, since we want to set foundSIMDType (to increase
// the inlining multiplier) for anything in that assembly.
// But we only need to normalize it if it is a TYP_STRUCT
// (which we need to do even if we have already set foundSIMDType).
if (!foundSIMDType && isSIMDorHWSIMDClass(&(lclVarInfo[0].lclVerTypeInfo)))
{
foundSIMDType = true;
}
#endif // FEATURE_SIMD
var_types sigType = ((clsAttr & CORINFO_FLG_VALUECLASS) != 0) ? TYP_BYREF : TYP_REF;
lclVarInfo[0].lclTypeInfo = sigType;
GenTree* thisArgNode = thisArg->GetNode();
assert(varTypeIsGC(thisArgNode->TypeGet()) || // "this" is managed
((thisArgNode->TypeGet() == TYP_I_IMPL) && // "this" is unmgd but the method's class doesnt care
(clsAttr & CORINFO_FLG_VALUECLASS)));
if (genActualType(thisArgNode->TypeGet()) != genActualType(sigType))
{
if (sigType == TYP_REF)
{
/* The argument cannot be bashed into a ref (see bug 750871) */
inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_REF);
return;
}
/* This can only happen with byrefs <-> ints/shorts */
assert(sigType == TYP_BYREF);
assert((genActualType(thisArgNode->TypeGet()) == TYP_I_IMPL) || (thisArgNode->TypeGet() == TYP_BYREF));
lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
}
}
/* Init the types of the arguments and make sure the types
* from the trees match the types in the signature */
CORINFO_ARG_LIST_HANDLE argLst;
argLst = methInfo->args.args;
unsigned i;
for (i = (thisArg ? 1 : 0); i < argCnt; i++, argLst = info.compCompHnd->getArgNext(argLst))
{
var_types sigType = (var_types)eeGetArgType(argLst, &methInfo->args);
lclVarInfo[i].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->args, argLst);
#ifdef FEATURE_SIMD
if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[i].lclVerTypeInfo)))
{
// If this is a SIMD class (i.e. in the SIMD assembly), then we will consider that we've
// found a SIMD type, even if this may not be a type we recognize (the assumption is that
// it is likely to use a SIMD type, and therefore we want to increase the inlining multiplier).
foundSIMDType = true;
if (sigType == TYP_STRUCT)
{
var_types structType = impNormStructType(lclVarInfo[i].lclVerTypeInfo.GetClassHandle());
sigType = structType;
}
}
#endif // FEATURE_SIMD
lclVarInfo[i].lclTypeInfo = sigType;
lclVarInfo[i].lclHasLdlocaOp = false;
/* Does the tree type match the signature type? */
GenTree* inlArgNode = inlArgInfo[i].argNode;
if ((sigType != inlArgNode->gtType) || inlArgNode->OperIs(GT_PUTARG_TYPE))
{
assert(impCheckImplicitArgumentCoercion(sigType, inlArgNode->gtType));
assert(!varTypeIsStruct(inlArgNode->gtType) && !varTypeIsStruct(sigType));
/* In valid IL, this can only happen for short integer types or byrefs <-> [native] ints,
but in bad IL cases with caller-callee signature mismatches we can see other types.
Intentionally reject cases with mismatches so the jit is more flexible when
encountering bad IL. */
bool isPlausibleTypeMatch = (genActualType(sigType) == genActualType(inlArgNode->gtType)) ||
(genActualTypeIsIntOrI(sigType) && inlArgNode->gtType == TYP_BYREF) ||
(sigType == TYP_BYREF && genActualTypeIsIntOrI(inlArgNode->gtType));
if (!isPlausibleTypeMatch)
{
inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_TYPES_INCOMPATIBLE);
return;
}
GenTree** pInlArgNode;
if (inlArgNode->OperIs(GT_PUTARG_TYPE))
{
// There was a widening or narrowing cast.
GenTreeUnOp* putArgType = inlArgNode->AsUnOp();
pInlArgNode = &putArgType->gtOp1;
inlArgNode = putArgType->gtOp1;
}
else
{
// The same size but different type of the arguments.
pInlArgNode = &inlArgInfo[i].argNode;
}
/* Is it a narrowing or widening cast?
* Widening casts are ok since the value computed is already
* normalized to an int (on the IL stack) */
if (genTypeSize(inlArgNode->gtType) >= genTypeSize(sigType))
{
if (sigType == TYP_BYREF)
{
lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
}
else if (inlArgNode->gtType == TYP_BYREF)
{
assert(varTypeIsIntOrI(sigType));
/* If possible bash the BYREF to an int */
if (inlArgNode->IsLocalAddrExpr() != nullptr)
{
inlArgNode->gtType = TYP_I_IMPL;
lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
}
else
{
/* Arguments 'int <- byref' cannot be changed */
inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
return;
}
}
else if (genTypeSize(sigType) < TARGET_POINTER_SIZE)
{
// Narrowing cast.
if (inlArgNode->OperIs(GT_LCL_VAR))
{
const unsigned lclNum = inlArgNode->AsLclVarCommon()->GetLclNum();
if (!lvaTable[lclNum].lvNormalizeOnLoad() && sigType == lvaGetRealType(lclNum))
{
// We don't need to insert a cast here as the variable
// was assigned a normalized value of the right type.
continue;
}
}
inlArgNode = gtNewCastNode(TYP_INT, inlArgNode, false, sigType);
inlArgInfo[i].argIsLclVar = false;
// Try to fold the node in case we have constant arguments.
if (inlArgInfo[i].argIsInvariant)
{
inlArgNode = gtFoldExprConst(inlArgNode);
assert(inlArgNode->OperIsConst());
}
*pInlArgNode = inlArgNode;
}
#ifdef TARGET_64BIT
else if (genTypeSize(genActualType(inlArgNode->gtType)) < genTypeSize(sigType))
{
// This should only happen for int -> native int widening
inlArgNode = gtNewCastNode(genActualType(sigType), inlArgNode, false, sigType);
inlArgInfo[i].argIsLclVar = false;
/* Try to fold the node in case we have constant arguments */
if (inlArgInfo[i].argIsInvariant)
{
inlArgNode = gtFoldExprConst(inlArgNode);
assert(inlArgNode->OperIsConst());
}
*pInlArgNode = inlArgNode;
}
#endif // TARGET_64BIT
}
}
}
/* Init the types of the local variables */
CORINFO_ARG_LIST_HANDLE localsSig;
localsSig = methInfo->locals.args;
for (i = 0; i < methInfo->locals.numArgs; i++)
{
bool isPinned;
var_types type = (var_types)eeGetArgType(localsSig, &methInfo->locals, &isPinned);
lclVarInfo[i + argCnt].lclHasLdlocaOp = false;
lclVarInfo[i + argCnt].lclTypeInfo = type;
if (varTypeIsGC(type))
{
if (isPinned)
{
JITDUMP("Inlinee local #%02u is pinned\n", i);
lclVarInfo[i + argCnt].lclIsPinned = true;
// Pinned locals may cause inlines to fail.
inlineResult->Note(InlineObservation::CALLEE_HAS_PINNED_LOCALS);
if (inlineResult->IsFailure())
{
return;
}
}
pInlineInfo->numberOfGcRefLocals++;
}
else if (isPinned)
{
JITDUMP("Ignoring pin on inlinee local #%02u -- not a GC type\n", i);
}
lclVarInfo[i + argCnt].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->locals, localsSig);
// If this local is a struct type with GC fields, inform the inliner. It may choose to bail
// out on the inline.
if (type == TYP_STRUCT)
{
CORINFO_CLASS_HANDLE lclHandle = lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle();
DWORD typeFlags = info.compCompHnd->getClassAttribs(lclHandle);
if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
{
inlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
if (inlineResult->IsFailure())
{
return;
}
// Do further notification in the case where the call site is rare; some policies do
// not track the relative hotness of call sites for "always" inline cases.
if (pInlineInfo->iciBlock->isRunRarely())
{
inlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
if (inlineResult->IsFailure())
{
return;
}
}
}
}
localsSig = info.compCompHnd->getArgNext(localsSig);
#ifdef FEATURE_SIMD
if ((!foundSIMDType || (type == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[i + argCnt].lclVerTypeInfo)))
{
foundSIMDType = true;
if (type == TYP_STRUCT)
{
var_types structType = impNormStructType(lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle());
lclVarInfo[i + argCnt].lclTypeInfo = structType;
}
}
#endif // FEATURE_SIMD
}
#ifdef FEATURE_SIMD
if (!foundSIMDType && (call->AsCall()->gtRetClsHnd != nullptr) && isSIMDorHWSIMDClass(call->AsCall()->gtRetClsHnd))
{
foundSIMDType = true;
}
pInlineInfo->hasSIMDTypeArgLocalOrReturn = foundSIMDType;
#endif // FEATURE_SIMD
}
//------------------------------------------------------------------------
// impInlineFetchLocal: get a local var that represents an inlinee local
//
// Arguments:
// lclNum -- number of the inlinee local
// reason -- debug string describing purpose of the local var
//
// Returns:
// Number of the local to use
//
// Notes:
// This method is invoked only for locals actually used in the
// inlinee body.
//
// Allocates a new temp if necessary, and copies key properties
// over from the inlinee local var info.
unsigned Compiler::impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason))
{
assert(compIsForInlining());
unsigned tmpNum = impInlineInfo->lclTmpNum[lclNum];
if (tmpNum == BAD_VAR_NUM)
{
const InlLclVarInfo& inlineeLocal = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt];
const var_types lclTyp = inlineeLocal.lclTypeInfo;
// The lifetime of this local might span multiple BBs.
// So it is a long lifetime local.
impInlineInfo->lclTmpNum[lclNum] = tmpNum = lvaGrabTemp(false DEBUGARG(reason));
// Copy over key info
lvaTable[tmpNum].lvType = lclTyp;
lvaTable[tmpNum].lvHasLdAddrOp = inlineeLocal.lclHasLdlocaOp;
lvaTable[tmpNum].lvPinned = inlineeLocal.lclIsPinned;
lvaTable[tmpNum].lvHasILStoreOp = inlineeLocal.lclHasStlocOp;
lvaTable[tmpNum].lvHasMultipleILStoreOp = inlineeLocal.lclHasMultipleStlocOp;
// Copy over class handle for ref types. Note this may be a
// shared type -- someday perhaps we can get the exact
// signature and pass in a more precise type.
if (lclTyp == TYP_REF)
{
assert(lvaTable[tmpNum].lvSingleDef == 0);
lvaTable[tmpNum].lvSingleDef = !inlineeLocal.lclHasMultipleStlocOp && !inlineeLocal.lclHasLdlocaOp;
if (lvaTable[tmpNum].lvSingleDef)
{
JITDUMP("Marked V%02u as a single def temp\n", tmpNum);
}
lvaSetClass(tmpNum, inlineeLocal.lclVerTypeInfo.GetClassHandleForObjRef());
}
if (inlineeLocal.lclVerTypeInfo.IsStruct())
{
if (varTypeIsStruct(lclTyp))
{
lvaSetStruct(tmpNum, inlineeLocal.lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */);
}
else
{
// This is a wrapped primitive. Make sure the verstate knows that
lvaTable[tmpNum].lvVerTypeInfo = inlineeLocal.lclVerTypeInfo;
}
}
#ifdef DEBUG
// Sanity check that we're properly prepared for gc ref locals.
if (varTypeIsGC(lclTyp))
{
// Since there are gc locals we should have seen them earlier
// and if there was a return value, set up the spill temp.
assert(impInlineInfo->HasGcRefLocals());
assert((info.compRetNativeType == TYP_VOID) || fgNeedReturnSpillTemp());
}
else
{
// Make sure all pinned locals count as gc refs.
assert(!inlineeLocal.lclIsPinned);
}
#endif // DEBUG
}
return tmpNum;
}
//------------------------------------------------------------------------
// impInlineFetchArg: return tree node for argument value in an inlinee
//
// Arguments:
// lclNum -- argument number in inlinee IL
// inlArgInfo -- argument info for inlinee
// lclVarInfo -- var info for inlinee
//
// Returns:
// Tree for the argument's value. Often an inlinee-scoped temp
// GT_LCL_VAR but can be other tree kinds, if the argument
// expression from the caller can be directly substituted into the
// inlinee body.
//
// Notes:
// Must be used only for arguments -- use impInlineFetchLocal for
// inlinee locals.
//
// Direct substitution is performed when the formal argument cannot
// change value in the inlinee body (no starg or ldarga), and the
// actual argument expression's value cannot be changed if it is
// substituted it into the inlinee body.
//
// Even if an inlinee-scoped temp is returned here, it may later be
// "bashed" to a caller-supplied tree when arguments are actually
// passed (see fgInlinePrependStatements). Bashing can happen if
// the argument ends up being single use and other conditions are
// met. So the contents of the tree returned here may not end up
// being the ones ultimately used for the argument.
//
// This method will side effect inlArgInfo. It should only be called
// for actual uses of the argument in the inlinee.
GenTree* Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclVarInfo)
{
// Cache the relevant arg and lcl info for this argument.
// We will modify argInfo but not lclVarInfo.
InlArgInfo& argInfo = inlArgInfo[lclNum];
const InlLclVarInfo& lclInfo = lclVarInfo[lclNum];
const bool argCanBeModified = argInfo.argHasLdargaOp || argInfo.argHasStargOp;
const var_types lclTyp = lclInfo.lclTypeInfo;
GenTree* op1 = nullptr;
GenTree* argNode = argInfo.argNode->gtSkipPutArgType()->gtRetExprVal();
if (argInfo.argIsInvariant && !argCanBeModified)
{
// Directly substitute constants or addresses of locals
//
// Clone the constant. Note that we cannot directly use
// argNode in the trees even if !argInfo.argIsUsed as this
// would introduce aliasing between inlArgInfo[].argNode and
// impInlineExpr. Then gtFoldExpr() could change it, causing
// further references to the argument working off of the
// bashed copy.
op1 = gtCloneExpr(argNode);
PREFIX_ASSUME(op1 != nullptr);
argInfo.argTmpNum = BAD_VAR_NUM;
// We may need to retype to ensure we match the callee's view of the type.
// Otherwise callee-pass throughs of arguments can create return type
// mismatches that block inlining.
//
// Note argument type mismatches that prevent inlining should
// have been caught in impInlineInitVars.
if (op1->TypeGet() != lclTyp)
{
op1->gtType = genActualType(lclTyp);
}
}
else if (argInfo.argIsLclVar && !argCanBeModified && !argInfo.argHasCallerLocalRef)
{
// Directly substitute unaliased caller locals for args that cannot be modified
//
// Use the caller-supplied node if this is the first use.
op1 = argNode;
unsigned argLclNum = op1->AsLclVarCommon()->GetLclNum();
argInfo.argTmpNum = argLclNum;
// Use an equivalent copy if this is the second or subsequent
// use.
//
// Note argument type mismatches that prevent inlining should
// have been caught in impInlineInitVars. If inlining is not prevented
// but a cast is necessary, we similarly expect it to have been inserted then.
// So here we may have argument type mismatches that are benign, for instance
// passing a TYP_SHORT local (eg. normalized-on-load) as a TYP_INT arg.
// The exception is when the inlining means we should start tracking the argument.
if (argInfo.argIsUsed || ((lclTyp == TYP_BYREF) && (op1->TypeGet() != TYP_BYREF)))
{
assert(op1->gtOper == GT_LCL_VAR);
assert(lclNum == op1->AsLclVar()->gtLclILoffs);
// Create a new lcl var node - remember the argument lclNum
op1 = impCreateLocalNode(argLclNum DEBUGARG(op1->AsLclVar()->gtLclILoffs));
// Start tracking things as a byref if the parameter is a byref.
if (lclTyp == TYP_BYREF)
{
op1->gtType = TYP_BYREF;
}
}
}
else if (argInfo.argIsByRefToStructLocal && !argInfo.argHasStargOp)
{
/* Argument is a by-ref address to a struct, a normed struct, or its field.
In these cases, don't spill the byref to a local, simply clone the tree and use it.
This way we will increase the chance for this byref to be optimized away by
a subsequent "dereference" operation.
From Dev11 bug #139955: Argument node can also be TYP_I_IMPL if we've bashed the tree
(in impInlineInitVars()), if the arg has argHasLdargaOp as well as argIsByRefToStructLocal.
For example, if the caller is:
ldloca.s V_1 // V_1 is a local struct
call void Test.ILPart::RunLdargaOnPointerArg(int32*)
and the callee being inlined has:
.method public static void RunLdargaOnPointerArg(int32* ptrToInts) cil managed
ldarga.s ptrToInts
call void Test.FourInts::NotInlined_SetExpectedValuesThroughPointerToPointer(int32**)
then we change the argument tree (of "ldloca.s V_1") to TYP_I_IMPL to match the callee signature. We'll
soon afterwards reject the inlining anyway, since the tree we return isn't a GT_LCL_VAR.
*/
assert(argNode->TypeGet() == TYP_BYREF || argNode->TypeGet() == TYP_I_IMPL);
op1 = gtCloneExpr(argNode);
}
else
{
/* Argument is a complex expression - it must be evaluated into a temp */
if (argInfo.argHasTmp)
{
assert(argInfo.argIsUsed);
assert(argInfo.argTmpNum < lvaCount);
/* Create a new lcl var node - remember the argument lclNum */
op1 = gtNewLclvNode(argInfo.argTmpNum, genActualType(lclTyp));
/* This is the second or later use of the this argument,
so we have to use the temp (instead of the actual arg) */
argInfo.argBashTmpNode = nullptr;
}
else
{
/* First time use */
assert(!argInfo.argIsUsed);
/* Reserve a temp for the expression.
* Use a large size node as we may change it later */
const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Inlining Arg"));
lvaTable[tmpNum].lvType = lclTyp;
// For ref types, determine the type of the temp.
if (lclTyp == TYP_REF)
{
if (!argCanBeModified)
{
// If the arg can't be modified in the method
// body, use the type of the value, if
// known. Otherwise, use the declared type.
assert(lvaTable[tmpNum].lvSingleDef == 0);
lvaTable[tmpNum].lvSingleDef = 1;
JITDUMP("Marked V%02u as a single def temp\n", tmpNum);
lvaSetClass(tmpNum, argNode, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef());
}
else
{
// Arg might be modified, use the declared type of
// the argument.
lvaSetClass(tmpNum, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef());
}
}
assert(!lvaTable[tmpNum].IsAddressExposed());
if (argInfo.argHasLdargaOp)
{
lvaTable[tmpNum].lvHasLdAddrOp = 1;
}
if (lclInfo.lclVerTypeInfo.IsStruct())
{
if (varTypeIsStruct(lclTyp))
{
lvaSetStruct(tmpNum, lclInfo.lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */);
if (info.compIsVarArgs)
{
lvaSetStructUsedAsVarArg(tmpNum);
}
}
else
{
// This is a wrapped primitive. Make sure the verstate knows that
lvaTable[tmpNum].lvVerTypeInfo = lclInfo.lclVerTypeInfo;
}
}
argInfo.argHasTmp = true;
argInfo.argTmpNum = tmpNum;
// If we require strict exception order, then arguments must
// be evaluated in sequence before the body of the inlined method.
// So we need to evaluate them to a temp.
// Also, if arguments have global or local references, we need to
// evaluate them to a temp before the inlined body as the
// inlined body may be modifying the global ref.
// TODO-1stClassStructs: We currently do not reuse an existing lclVar
// if it is a struct, because it requires some additional handling.
if ((!varTypeIsStruct(lclTyp) && !argInfo.argHasSideEff && !argInfo.argHasGlobRef &&
!argInfo.argHasCallerLocalRef))
{
/* Get a *LARGE* LCL_VAR node */
op1 = gtNewLclLNode(tmpNum, genActualType(lclTyp) DEBUGARG(lclNum));
/* Record op1 as the very first use of this argument.
If there are no further uses of the arg, we may be
able to use the actual arg node instead of the temp.
If we do see any further uses, we will clear this. */
argInfo.argBashTmpNode = op1;
}
else
{
/* Get a small LCL_VAR node */
op1 = gtNewLclvNode(tmpNum, genActualType(lclTyp));
/* No bashing of this argument */
argInfo.argBashTmpNode = nullptr;
}
}
}
// Mark this argument as used.
argInfo.argIsUsed = true;
return op1;
}
/******************************************************************************
Is this the original "this" argument to the call being inlined?
Note that we do not inline methods with "starg 0", and so we do not need to
worry about it.
*/
bool Compiler::impInlineIsThis(GenTree* tree, InlArgInfo* inlArgInfo)
{
assert(compIsForInlining());
return (tree->gtOper == GT_LCL_VAR && tree->AsLclVarCommon()->GetLclNum() == inlArgInfo[0].argTmpNum);
}
//-----------------------------------------------------------------------------
// impInlineIsGuaranteedThisDerefBeforeAnySideEffects: Check if a dereference in
// the inlinee can guarantee that the "this" pointer is non-NULL.
//
// Arguments:
// additionalTree - a tree to check for side effects
// additionalCallArgs - a list of call args to check for side effects
// dereferencedAddress - address expression being dereferenced
// inlArgInfo - inlinee argument information
//
// Notes:
// If we haven't hit a branch or a side effect, and we are dereferencing
// from 'this' to access a field or make GTF_CALL_NULLCHECK call,
// then we can avoid a separate null pointer check.
//
// The importer stack and current statement list are searched for side effects.
// Trees that have been popped of the stack but haven't been appended to the
// statement list and have to be checked for side effects may be provided via
// additionalTree and additionalCallArgs.
//
bool Compiler::impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTree* additionalTree,
GenTreeCall::Use* additionalCallArgs,
GenTree* dereferencedAddress,
InlArgInfo* inlArgInfo)
{
assert(compIsForInlining());
assert(opts.OptEnabled(CLFLG_INLINING));
BasicBlock* block = compCurBB;
if (block != fgFirstBB)
{
return false;
}
if (!impInlineIsThis(dereferencedAddress, inlArgInfo))
{
return false;
}
if ((additionalTree != nullptr) && GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(additionalTree->gtFlags))
{
return false;
}
for (GenTreeCall::Use& use : GenTreeCall::UseList(additionalCallArgs))
{
if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(use.GetNode()->gtFlags))
{
return false;
}
}
for (Statement* stmt : StatementList(impStmtList))
{
GenTree* expr = stmt->GetRootNode();
if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(expr->gtFlags))
{
return false;
}
}
for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
{
GenTreeFlags stackTreeFlags = verCurrentState.esStack[level].val->gtFlags;
if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(stackTreeFlags))
{
return false;
}
}
return true;
}
//------------------------------------------------------------------------
// impMarkInlineCandidate: determine if this call can be subsequently inlined
//
// Arguments:
// callNode -- call under scrutiny
// exactContextHnd -- context handle for inlining
// exactContextNeedsRuntimeLookup -- true if context required runtime lookup
// callInfo -- call info from VM
//
// Notes:
// Mostly a wrapper for impMarkInlineCandidateHelper that also undoes
// guarded devirtualization for virtual calls where the method we'd
// devirtualize to cannot be inlined.
void Compiler::impMarkInlineCandidate(GenTree* callNode,
CORINFO_CONTEXT_HANDLE exactContextHnd,
bool exactContextNeedsRuntimeLookup,
CORINFO_CALL_INFO* callInfo)
{
GenTreeCall* call = callNode->AsCall();
// Do the actual evaluation
impMarkInlineCandidateHelper(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo);
// If this call is an inline candidate or is not a guarded devirtualization
// candidate, we're done.
if (call->IsInlineCandidate() || !call->IsGuardedDevirtualizationCandidate())
{
return;
}
// If we can't inline the call we'd guardedly devirtualize to,
// we undo the guarded devirtualization, as the benefit from
// just guarded devirtualization alone is likely not worth the
// extra jit time and code size.
//
// TODO: it is possibly interesting to allow this, but requires
// fixes elsewhere too...
JITDUMP("Revoking guarded devirtualization candidacy for call [%06u]: target method can't be inlined\n",
dspTreeID(call));
call->ClearGuardedDevirtualizationCandidate();
}
//------------------------------------------------------------------------
// impMarkInlineCandidateHelper: determine if this call can be subsequently
// inlined
//
// Arguments:
// callNode -- call under scrutiny
// exactContextHnd -- context handle for inlining
// exactContextNeedsRuntimeLookup -- true if context required runtime lookup
// callInfo -- call info from VM
//
// Notes:
// If callNode is an inline candidate, this method sets the flag
// GTF_CALL_INLINE_CANDIDATE, and ensures that helper methods have
// filled in the associated InlineCandidateInfo.
//
// If callNode is not an inline candidate, and the reason is
// something that is inherent to the method being called, the
// method may be marked as "noinline" to short-circuit any
// future assessments of calls to this method.
void Compiler::impMarkInlineCandidateHelper(GenTreeCall* call,
CORINFO_CONTEXT_HANDLE exactContextHnd,
bool exactContextNeedsRuntimeLookup,
CORINFO_CALL_INFO* callInfo)
{
// Let the strategy know there's another call
impInlineRoot()->m_inlineStrategy->NoteCall();
if (!opts.OptEnabled(CLFLG_INLINING))
{
/* XXX Mon 8/18/2008
* This assert is misleading. The caller does not ensure that we have CLFLG_INLINING set before
* calling impMarkInlineCandidate. However, if this assert trips it means that we're an inlinee and
* CLFLG_MINOPT is set. That doesn't make a lot of sense. If you hit this assert, work back and
* figure out why we did not set MAXOPT for this compile.
*/
assert(!compIsForInlining());
return;
}
if (compIsForImportOnly())
{
// Don't bother creating the inline candidate during verification.
// Otherwise the call to info.compCompHnd->canInline will trigger a recursive verification
// that leads to the creation of multiple instances of Compiler.
return;
}
InlineResult inlineResult(this, call, nullptr, "impMarkInlineCandidate");
// Don't inline if not optimizing root method
if (opts.compDbgCode)
{
inlineResult.NoteFatal(InlineObservation::CALLER_DEBUG_CODEGEN);
return;
}
// Don't inline if inlining into this method is disabled.
if (impInlineRoot()->m_inlineStrategy->IsInliningDisabled())
{
inlineResult.NoteFatal(InlineObservation::CALLER_IS_JIT_NOINLINE);
return;
}
// Don't inline into callers that use the NextCallReturnAddress intrinsic.
if (info.compHasNextCallRetAddr)
{
inlineResult.NoteFatal(InlineObservation::CALLER_USES_NEXT_CALL_RET_ADDR);
return;
}
// Inlining candidate determination needs to honor only IL tail prefix.
// Inlining takes precedence over implicit tail call optimization (if the call is not directly recursive).
if (call->IsTailPrefixedCall())
{
inlineResult.NoteFatal(InlineObservation::CALLSITE_EXPLICIT_TAIL_PREFIX);
return;
}
// Delegate Invoke method doesn't have a body and gets special cased instead.
// Don't even bother trying to inline it.
if (call->IsDelegateInvoke())
{
inlineResult.NoteFatal(InlineObservation::CALLEE_HAS_NO_BODY);
return;
}
// Tail recursion elimination takes precedence over inlining.
// TODO: We may want to do some of the additional checks from fgMorphCall
// here to reduce the chance we don't inline a call that won't be optimized
// as a fast tail call or turned into a loop.
if (gtIsRecursiveCall(call) && call->IsImplicitTailCall())
{
inlineResult.NoteFatal(InlineObservation::CALLSITE_IMPLICIT_REC_TAIL_CALL);
return;
}
if (call->IsVirtual())
{
// Allow guarded devirt calls to be treated as inline candidates,
// but reject all other virtual calls.
if (!call->IsGuardedDevirtualizationCandidate())
{
inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT);
return;
}
}
/* Ignore helper calls */
if (call->gtCallType == CT_HELPER)
{
assert(!call->IsGuardedDevirtualizationCandidate());
inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_CALL_TO_HELPER);
return;
}
/* Ignore indirect calls */
if (call->gtCallType == CT_INDIRECT)
{
inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT_MANAGED);
return;
}
/* I removed the check for BBJ_THROW. BBJ_THROW is usually marked as rarely run. This more or less
* restricts the inliner to non-expanding inlines. I removed the check to allow for non-expanding
* inlining in throw blocks. I should consider the same thing for catch and filter regions. */
CORINFO_METHOD_HANDLE fncHandle;
unsigned methAttr;
if (call->IsGuardedDevirtualizationCandidate())
{
if (call->gtGuardedDevirtualizationCandidateInfo->guardedMethodUnboxedEntryHandle != nullptr)
{
fncHandle = call->gtGuardedDevirtualizationCandidateInfo->guardedMethodUnboxedEntryHandle;
}
else
{
fncHandle = call->gtGuardedDevirtualizationCandidateInfo->guardedMethodHandle;
}
methAttr = info.compCompHnd->getMethodAttribs(fncHandle);
}
else
{
fncHandle = call->gtCallMethHnd;
// Reuse method flags from the original callInfo if possible
if (fncHandle == callInfo->hMethod)
{
methAttr = callInfo->methodFlags;
}
else
{
methAttr = info.compCompHnd->getMethodAttribs(fncHandle);
}
}
#ifdef DEBUG
if (compStressCompile(STRESS_FORCE_INLINE, 0))
{
methAttr |= CORINFO_FLG_FORCEINLINE;
}
#endif
// Check for COMPlus_AggressiveInlining
if (compDoAggressiveInlining)
{
methAttr |= CORINFO_FLG_FORCEINLINE;
}
if (!(methAttr & CORINFO_FLG_FORCEINLINE))
{
/* Don't bother inline blocks that are in the filter region */
if (bbInCatchHandlerILRange(compCurBB))
{
#ifdef DEBUG
if (verbose)
{
printf("\nWill not inline blocks that are in the catch handler region\n");
}
#endif
inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_CATCH);
return;
}
if (bbInFilterILRange(compCurBB))
{
#ifdef DEBUG
if (verbose)
{
printf("\nWill not inline blocks that are in the filter region\n");
}
#endif
inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_FILTER);
return;
}
}
/* Check if we tried to inline this method before */
if (methAttr & CORINFO_FLG_DONT_INLINE)
{
inlineResult.NoteFatal(InlineObservation::CALLEE_IS_NOINLINE);
return;
}
/* Cannot inline synchronized methods */
if (methAttr & CORINFO_FLG_SYNCH)
{
inlineResult.NoteFatal(InlineObservation::CALLEE_IS_SYNCHRONIZED);
return;
}
/* Check legality of PInvoke callsite (for inlining of marshalling code) */
if (methAttr & CORINFO_FLG_PINVOKE)
{
// See comment in impCheckForPInvokeCall
BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB;
if (!impCanPInvokeInlineCallSite(block))
{
inlineResult.NoteFatal(InlineObservation::CALLSITE_PINVOKE_EH);
return;
}
}
InlineCandidateInfo* inlineCandidateInfo = nullptr;
impCheckCanInline(call, fncHandle, methAttr, exactContextHnd, &inlineCandidateInfo, &inlineResult);
if (inlineResult.IsFailure())
{
return;
}
// The old value should be null OR this call should be a guarded devirtualization candidate.
assert((call->gtInlineCandidateInfo == nullptr) || call->IsGuardedDevirtualizationCandidate());
// The new value should not be null.
assert(inlineCandidateInfo != nullptr);
inlineCandidateInfo->exactContextNeedsRuntimeLookup = exactContextNeedsRuntimeLookup;
call->gtInlineCandidateInfo = inlineCandidateInfo;
// If we're in an inlinee compiler, and have a return spill temp, and this inline candidate
// is also a tail call candidate, it can use the same return spill temp.
//
if (compIsForInlining() && call->CanTailCall() &&
(impInlineInfo->inlineCandidateInfo->preexistingSpillTemp != BAD_VAR_NUM))
{
inlineCandidateInfo->preexistingSpillTemp = impInlineInfo->inlineCandidateInfo->preexistingSpillTemp;
JITDUMP("Inline candidate [%06u] can share spill temp V%02u\n", dspTreeID(call),
inlineCandidateInfo->preexistingSpillTemp);
}
// Mark the call node as inline candidate.
call->gtFlags |= GTF_CALL_INLINE_CANDIDATE;
// Let the strategy know there's another candidate.
impInlineRoot()->m_inlineStrategy->NoteCandidate();
// Since we're not actually inlining yet, and this call site is
// still just an inline candidate, there's nothing to report.
inlineResult.SetReported();
}
/******************************************************************************/
// Returns true if the given intrinsic will be implemented by target-specific
// instructions
bool Compiler::IsTargetIntrinsic(NamedIntrinsic intrinsicName)
{
#if defined(TARGET_XARCH)
switch (intrinsicName)
{
// AMD64/x86 has SSE2 instructions to directly compute sqrt/abs and SSE4.1
// instructions to directly compute round/ceiling/floor/truncate.
case NI_System_Math_Abs:
case NI_System_Math_Sqrt:
return true;
case NI_System_Math_Ceiling:
case NI_System_Math_Floor:
case NI_System_Math_Truncate:
case NI_System_Math_Round:
return compOpportunisticallyDependsOn(InstructionSet_SSE41);
case NI_System_Math_FusedMultiplyAdd:
return compOpportunisticallyDependsOn(InstructionSet_FMA);
default:
return false;
}
#elif defined(TARGET_ARM64)
switch (intrinsicName)
{
case NI_System_Math_Abs:
case NI_System_Math_Ceiling:
case NI_System_Math_Floor:
case NI_System_Math_Truncate:
case NI_System_Math_Round:
case NI_System_Math_Sqrt:
case NI_System_Math_Max:
case NI_System_Math_Min:
return true;
case NI_System_Math_FusedMultiplyAdd:
return compOpportunisticallyDependsOn(InstructionSet_AdvSimd);
default:
return false;
}
#elif defined(TARGET_ARM)
switch (intrinsicName)
{
case NI_System_Math_Abs:
case NI_System_Math_Round:
case NI_System_Math_Sqrt:
return true;
default:
return false;
}
#else
// TODO: This portion of logic is not implemented for other arch.
// The reason for returning true is that on all other arch the only intrinsic
// enabled are target intrinsics.
return true;
#endif
}
/******************************************************************************/
// Returns true if the given intrinsic will be implemented by calling System.Math
// methods.
bool Compiler::IsIntrinsicImplementedByUserCall(NamedIntrinsic intrinsicName)
{
// Currently, if a math intrinsic is not implemented by target-specific
// instructions, it will be implemented by a System.Math call. In the
// future, if we turn to implementing some of them with helper calls,
// this predicate needs to be revisited.
return !IsTargetIntrinsic(intrinsicName);
}
bool Compiler::IsMathIntrinsic(NamedIntrinsic intrinsicName)
{
switch (intrinsicName)
{
case NI_System_Math_Abs:
case NI_System_Math_Acos:
case NI_System_Math_Acosh:
case NI_System_Math_Asin:
case NI_System_Math_Asinh:
case NI_System_Math_Atan:
case NI_System_Math_Atanh:
case NI_System_Math_Atan2:
case NI_System_Math_Cbrt:
case NI_System_Math_Ceiling:
case NI_System_Math_Cos:
case NI_System_Math_Cosh:
case NI_System_Math_Exp:
case NI_System_Math_Floor:
case NI_System_Math_FMod:
case NI_System_Math_FusedMultiplyAdd:
case NI_System_Math_ILogB:
case NI_System_Math_Log:
case NI_System_Math_Log2:
case NI_System_Math_Log10:
case NI_System_Math_Max:
case NI_System_Math_Min:
case NI_System_Math_Pow:
case NI_System_Math_Round:
case NI_System_Math_Sin:
case NI_System_Math_Sinh:
case NI_System_Math_Sqrt:
case NI_System_Math_Tan:
case NI_System_Math_Tanh:
case NI_System_Math_Truncate:
{
assert((intrinsicName > NI_SYSTEM_MATH_START) && (intrinsicName < NI_SYSTEM_MATH_END));
return true;
}
default:
{
assert((intrinsicName < NI_SYSTEM_MATH_START) || (intrinsicName > NI_SYSTEM_MATH_END));
return false;
}
}
}
bool Compiler::IsMathIntrinsic(GenTree* tree)
{
return (tree->OperGet() == GT_INTRINSIC) && IsMathIntrinsic(tree->AsIntrinsic()->gtIntrinsicName);
}
//------------------------------------------------------------------------
// impDevirtualizeCall: Attempt to change a virtual vtable call into a
// normal call
//
// Arguments:
// call -- the call node to examine/modify
// pResolvedToken -- [IN] the resolved token used to create the call. Used for R2R.
// method -- [IN/OUT] the method handle for call. Updated iff call devirtualized.
// methodFlags -- [IN/OUT] flags for the method to call. Updated iff call devirtualized.
// pContextHandle -- [IN/OUT] context handle for the call. Updated iff call devirtualized.
// pExactContextHandle -- [OUT] updated context handle iff call devirtualized
// isLateDevirtualization -- if devirtualization is happening after importation
// isExplicitTailCalll -- [IN] true if we plan on using an explicit tail call
// ilOffset -- IL offset of the call
//
// Notes:
// Virtual calls in IL will always "invoke" the base class method.
//
// This transformation looks for evidence that the type of 'this'
// in the call is exactly known, is a final class or would invoke
// a final method, and if that and other safety checks pan out,
// modifies the call and the call info to create a direct call.
//
// This transformation is initially done in the importer and not
// in some subsequent optimization pass because we want it to be
// upstream of inline candidate identification.
//
// However, later phases may supply improved type information that
// can enable further devirtualization. We currently reinvoke this
// code after inlining, if the return value of the inlined call is
// the 'this obj' of a subsequent virtual call.
//
// If devirtualization succeeds and the call's this object is a
// (boxed) value type, the jit will ask the EE for the unboxed entry
// point. If this exists, the jit will invoke the unboxed entry
// on the box payload. In addition if the boxing operation is
// visible to the jit and the call is the only consmer of the box,
// the jit will try analyze the box to see if the call can be instead
// instead made on a local copy. If that is doable, the call is
// updated to invoke the unboxed entry on the local copy and the
// boxing operation is removed.
//
// When guarded devirtualization is enabled, this method will mark
// calls as guarded devirtualization candidates, if the type of `this`
// is not exactly known, and there is a plausible guess for the type.
void Compiler::impDevirtualizeCall(GenTreeCall* call,
CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_METHOD_HANDLE* method,
unsigned* methodFlags,
CORINFO_CONTEXT_HANDLE* pContextHandle,
CORINFO_CONTEXT_HANDLE* pExactContextHandle,
bool isLateDevirtualization,
bool isExplicitTailCall,
IL_OFFSET ilOffset)
{
assert(call != nullptr);
assert(method != nullptr);
assert(methodFlags != nullptr);
assert(pContextHandle != nullptr);
// This should be a virtual vtable or virtual stub call.
//
assert(call->IsVirtual());
// Possibly instrument. Note for OSR+PGO we will instrument when
// optimizing and (currently) won't devirtualize. We may want
// to revisit -- if we can devirtualize we should be able to
// suppress the probe.
//
// We strip BBINSTR from inlinees currently, so we'll only
// do this for the root method calls.
//
if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR))
{
assert(opts.OptimizationDisabled() || opts.IsOSR());
assert(!compIsForInlining());
// During importation, optionally flag this block as one that
// contains calls requiring class profiling. Ideally perhaps
// we'd just keep track of the calls themselves, so we don't
// have to search for them later.
//
if ((call->gtCallType != CT_INDIRECT) && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR) &&
!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT) && (JitConfig.JitClassProfiling() > 0) &&
!isLateDevirtualization)
{
JITDUMP("\n ... marking [%06u] in " FMT_BB " for class profile instrumentation\n", dspTreeID(call),
compCurBB->bbNum);
ClassProfileCandidateInfo* pInfo = new (this, CMK_Inlining) ClassProfileCandidateInfo;
// Record some info needed for the class profiling probe.
//
pInfo->ilOffset = ilOffset;
pInfo->probeIndex = info.compClassProbeCount++;
call->gtClassProfileCandidateInfo = pInfo;
// Flag block as needing scrutiny
//
compCurBB->bbFlags |= BBF_HAS_CLASS_PROFILE;
}
return;
}
// Bail if optimizations are disabled.
if (opts.OptimizationDisabled())
{
return;
}
#if defined(DEBUG)
// Bail if devirt is disabled.
if (JitConfig.JitEnableDevirtualization() == 0)
{
return;
}
// Optionally, print info on devirtualization
Compiler* const rootCompiler = impInlineRoot();
const bool doPrint = JitConfig.JitPrintDevirtualizedMethods().contains(rootCompiler->info.compMethodName,
rootCompiler->info.compClassName,
&rootCompiler->info.compMethodInfo->args);
#endif // DEBUG
// Fetch information about the virtual method we're calling.
CORINFO_METHOD_HANDLE baseMethod = *method;
unsigned baseMethodAttribs = *methodFlags;
if (baseMethodAttribs == 0)
{
// For late devirt we may not have method attributes, so fetch them.
baseMethodAttribs = info.compCompHnd->getMethodAttribs(baseMethod);
}
else
{
#if defined(DEBUG)
// Validate that callInfo has up to date method flags
const DWORD freshBaseMethodAttribs = info.compCompHnd->getMethodAttribs(baseMethod);
// All the base method attributes should agree, save that
// CORINFO_FLG_DONT_INLINE may have changed from 0 to 1
// because of concurrent jitting activity.
//
// Note we don't look at this particular flag bit below, and
// later on (if we do try and inline) we will rediscover why
// the method can't be inlined, so there's no danger here in
// seeing this particular flag bit in different states between
// the cached and fresh values.
if ((freshBaseMethodAttribs & ~CORINFO_FLG_DONT_INLINE) != (baseMethodAttribs & ~CORINFO_FLG_DONT_INLINE))
{
assert(!"mismatched method attributes");
}
#endif // DEBUG
}
// In R2R mode, we might see virtual stub calls to
// non-virtuals. For instance cases where the non-virtual method
// is in a different assembly but is called via CALLVIRT. For
// verison resilience we must allow for the fact that the method
// might become virtual in some update.
//
// In non-R2R modes CALLVIRT <nonvirtual> will be turned into a
// regular call+nullcheck upstream, so we won't reach this
// point.
if ((baseMethodAttribs & CORINFO_FLG_VIRTUAL) == 0)
{
assert(call->IsVirtualStub());
assert(opts.IsReadyToRun());
JITDUMP("\nimpDevirtualizeCall: [R2R] base method not virtual, sorry\n");
return;
}
// Fetch information about the class that introduced the virtual method.
CORINFO_CLASS_HANDLE baseClass = info.compCompHnd->getMethodClass(baseMethod);
const DWORD baseClassAttribs = info.compCompHnd->getClassAttribs(baseClass);
// Is the call an interface call?
const bool isInterface = (baseClassAttribs & CORINFO_FLG_INTERFACE) != 0;
// See what we know about the type of 'this' in the call.
GenTree* thisObj = call->gtCallThisArg->GetNode()->gtEffectiveVal(false);
bool isExact = false;
bool objIsNonNull = false;
CORINFO_CLASS_HANDLE objClass = gtGetClassHandle(thisObj, &isExact, &objIsNonNull);
// Bail if we know nothing.
if (objClass == NO_CLASS_HANDLE)
{
JITDUMP("\nimpDevirtualizeCall: no type available (op=%s)\n", GenTree::OpName(thisObj->OperGet()));
// Don't try guarded devirtualiztion when we're doing late devirtualization.
//
if (isLateDevirtualization)
{
JITDUMP("No guarded devirt during late devirtualization\n");
return;
}
considerGuardedDevirtualization(call, ilOffset, isInterface, baseMethod, baseClass,
pContextHandle DEBUGARG(objClass) DEBUGARG("unknown"));
return;
}
// If the objClass is sealed (final), then we may be able to devirtualize.
const DWORD objClassAttribs = info.compCompHnd->getClassAttribs(objClass);
const bool objClassIsFinal = (objClassAttribs & CORINFO_FLG_FINAL) != 0;
#if defined(DEBUG)
const char* callKind = isInterface ? "interface" : "virtual";
const char* objClassNote = "[?]";
const char* objClassName = "?objClass";
const char* baseClassName = "?baseClass";
const char* baseMethodName = "?baseMethod";
if (verbose || doPrint)
{
objClassNote = isExact ? " [exact]" : objClassIsFinal ? " [final]" : "";
objClassName = eeGetClassName(objClass);
baseClassName = eeGetClassName(baseClass);
baseMethodName = eeGetMethodName(baseMethod, nullptr);
if (verbose)
{
printf("\nimpDevirtualizeCall: Trying to devirtualize %s call:\n"
" class for 'this' is %s%s (attrib %08x)\n"
" base method is %s::%s\n",
callKind, objClassName, objClassNote, objClassAttribs, baseClassName, baseMethodName);
}
}
#endif // defined(DEBUG)
// See if the jit's best type for `obj` is an interface.
// See for instance System.ValueTuple`8::GetHashCode, where lcl 0 is System.IValueTupleInternal
// IL_021d: ldloc.0
// IL_021e: callvirt instance int32 System.Object::GetHashCode()
//
// If so, we can't devirtualize, but we may be able to do guarded devirtualization.
//
if ((objClassAttribs & CORINFO_FLG_INTERFACE) != 0)
{
// Don't try guarded devirtualiztion when we're doing late devirtualization.
//
if (isLateDevirtualization)
{
JITDUMP("No guarded devirt during late devirtualization\n");
return;
}
considerGuardedDevirtualization(call, ilOffset, isInterface, baseMethod, baseClass,
pContextHandle DEBUGARG(objClass) DEBUGARG(objClassName));
return;
}
// If we get this far, the jit has a lower bound class type for the `this` object being used for dispatch.
// It may or may not know enough to devirtualize...
if (isInterface)
{
assert(call->IsVirtualStub());
JITDUMP("--- base class is interface\n");
}
// Fetch the method that would be called based on the declared type of 'this',
// and prepare to fetch the method attributes.
//
CORINFO_DEVIRTUALIZATION_INFO dvInfo;
dvInfo.virtualMethod = baseMethod;
dvInfo.objClass = objClass;
dvInfo.context = *pContextHandle;
dvInfo.detail = CORINFO_DEVIRTUALIZATION_UNKNOWN;
dvInfo.pResolvedTokenVirtualMethod = pResolvedToken;
info.compCompHnd->resolveVirtualMethod(&dvInfo);
CORINFO_METHOD_HANDLE derivedMethod = dvInfo.devirtualizedMethod;
CORINFO_CONTEXT_HANDLE exactContext = dvInfo.exactContext;
CORINFO_CLASS_HANDLE derivedClass = NO_CLASS_HANDLE;
CORINFO_RESOLVED_TOKEN* pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedMethod;
if (derivedMethod != nullptr)
{
assert(exactContext != nullptr);
assert(((size_t)exactContext & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS);
derivedClass = (CORINFO_CLASS_HANDLE)((size_t)exactContext & ~CORINFO_CONTEXTFLAGS_MASK);
}
DWORD derivedMethodAttribs = 0;
bool derivedMethodIsFinal = false;
bool canDevirtualize = false;
#if defined(DEBUG)
const char* derivedClassName = "?derivedClass";
const char* derivedMethodName = "?derivedMethod";
const char* note = "inexact or not final";
#endif
// If we failed to get a method handle, we can't directly devirtualize.
//
// This can happen when prejitting, if the devirtualization crosses
// servicing bubble boundaries, or if objClass is a shared class.
//
if (derivedMethod == nullptr)
{
JITDUMP("--- no derived method: %s\n", devirtualizationDetailToString(dvInfo.detail));
}
else
{
// Fetch method attributes to see if method is marked final.
derivedMethodAttribs = info.compCompHnd->getMethodAttribs(derivedMethod);
derivedMethodIsFinal = ((derivedMethodAttribs & CORINFO_FLG_FINAL) != 0);
#if defined(DEBUG)
if (isExact)
{
note = "exact";
}
else if (objClassIsFinal)
{
note = "final class";
}
else if (derivedMethodIsFinal)
{
note = "final method";
}
if (verbose || doPrint)
{
derivedMethodName = eeGetMethodName(derivedMethod, nullptr);
derivedClassName = eeGetClassName(derivedClass);
if (verbose)
{
printf(" devirt to %s::%s -- %s\n", derivedClassName, derivedMethodName, note);
gtDispTree(call);
}
}
#endif // defined(DEBUG)
canDevirtualize = isExact || objClassIsFinal || (!isInterface && derivedMethodIsFinal);
}
// We still might be able to do a guarded devirtualization.
// Note the call might be an interface call or a virtual call.
//
if (!canDevirtualize)
{
JITDUMP(" Class not final or exact%s\n", isInterface ? "" : ", and method not final");
#if defined(DEBUG)
// If we know the object type exactly, we generally expect we can devirtualize.
// (don't when doing late devirt as we won't have an owner type (yet))
//
if (!isLateDevirtualization && (isExact || objClassIsFinal) && JitConfig.JitNoteFailedExactDevirtualization())
{
printf("@@@ Exact/Final devirt failure in %s at [%06u] $ %s\n", info.compFullName, dspTreeID(call),
devirtualizationDetailToString(dvInfo.detail));
}
#endif
// Don't try guarded devirtualiztion if we're doing late devirtualization.
//
if (isLateDevirtualization)
{
JITDUMP("No guarded devirt during late devirtualization\n");
return;
}
considerGuardedDevirtualization(call, ilOffset, isInterface, baseMethod, baseClass,
pContextHandle DEBUGARG(objClass) DEBUGARG(objClassName));
return;
}
// All checks done. Time to transform the call.
//
// We should always have an exact class context.
//
// Note that wouldnt' be true if the runtime side supported array interface devirt,
// the resulting method would be a generic method of the non-generic SZArrayHelper class.
//
assert(canDevirtualize);
JITDUMP(" %s; can devirtualize\n", note);
// Make the updates.
call->gtFlags &= ~GTF_CALL_VIRT_VTABLE;
call->gtFlags &= ~GTF_CALL_VIRT_STUB;
call->gtCallMethHnd = derivedMethod;
call->gtCallType = CT_USER_FUNC;
call->gtCallMoreFlags |= GTF_CALL_M_DEVIRTUALIZED;
// Virtual calls include an implicit null check, which we may
// now need to make explicit.
if (!objIsNonNull)
{
call->gtFlags |= GTF_CALL_NULLCHECK;
}
// Clear the inline candidate info (may be non-null since
// it's a union field used for other things by virtual
// stubs)
call->gtInlineCandidateInfo = nullptr;
#if defined(DEBUG)
if (verbose)
{
printf("... after devirt...\n");
gtDispTree(call);
}
if (doPrint)
{
printf("Devirtualized %s call to %s:%s; now direct call to %s:%s [%s]\n", callKind, baseClassName,
baseMethodName, derivedClassName, derivedMethodName, note);
}
// If we successfully devirtualized based on an exact or final class,
// and we have dynamic PGO data describing the likely class, make sure they agree.
//
// If pgo source is not dynamic we may see likely classes from other versions of this code
// where types had different properties.
//
// If method is an inlinee we may be specializing to a class that wasn't seen at runtime.
//
const bool canSensiblyCheck =
(isExact || objClassIsFinal) && (fgPgoSource == ICorJitInfo::PgoSource::Dynamic) && !compIsForInlining();
if (JitConfig.JitCrossCheckDevirtualizationAndPGO() && canSensiblyCheck)
{
// We only can handle a single likely class for now
const int maxLikelyClasses = 1;
LikelyClassRecord likelyClasses[maxLikelyClasses];
UINT32 numberOfClasses =
getLikelyClasses(likelyClasses, maxLikelyClasses, fgPgoSchema, fgPgoSchemaCount, fgPgoData, ilOffset);
UINT32 likelihood = likelyClasses[0].likelihood;
CORINFO_CLASS_HANDLE likelyClass = likelyClasses[0].clsHandle;
if (numberOfClasses > 0)
{
// PGO had better agree the class we devirtualized to is plausible.
//
if (likelyClass != derivedClass)
{
// Managed type system may report different addresses for a class handle
// at different times....?
//
// Also, AOT may have a more nuanced notion of class equality.
//
if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT))
{
bool mismatch = true;
// derivedClass will be the introducer of derived method, so it's possible
// likelyClass is a non-overriding subclass. Check up the hierarchy.
//
CORINFO_CLASS_HANDLE parentClass = likelyClass;
while (parentClass != NO_CLASS_HANDLE)
{
if (parentClass == derivedClass)
{
mismatch = false;
break;
}
parentClass = info.compCompHnd->getParentType(parentClass);
}
if (mismatch || (numberOfClasses != 1) || (likelihood != 100))
{
printf("@@@ Likely %p (%s) != Derived %p (%s) [n=%u, l=%u, il=%u] in %s \n", likelyClass,
eeGetClassName(likelyClass), derivedClass, eeGetClassName(derivedClass), numberOfClasses,
likelihood, ilOffset, info.compFullName);
}
assert(!(mismatch || (numberOfClasses != 1) || (likelihood != 100)));
}
}
}
}
#endif // defined(DEBUG)
// If the 'this' object is a value class, see if we can rework the call to invoke the
// unboxed entry. This effectively inlines the normally un-inlineable wrapper stub
// and exposes the potentially inlinable unboxed entry method.
//
// We won't optimize explicit tail calls, as ensuring we get the right tail call info
// is tricky (we'd need to pass an updated sig and resolved token back to some callers).
//
// Note we may not have a derived class in some cases (eg interface call on an array)
//
if (info.compCompHnd->isValueClass(derivedClass))
{
if (isExplicitTailCall)
{
JITDUMP("Have a direct explicit tail call to boxed entry point; can't optimize further\n");
}
else
{
JITDUMP("Have a direct call to boxed entry point. Trying to optimize to call an unboxed entry point\n");
// Note for some shared methods the unboxed entry point requires an extra parameter.
bool requiresInstMethodTableArg = false;
CORINFO_METHOD_HANDLE unboxedEntryMethod =
info.compCompHnd->getUnboxedEntry(derivedMethod, &requiresInstMethodTableArg);
if (unboxedEntryMethod != nullptr)
{
bool optimizedTheBox = false;
// If the 'this' object is a local box, see if we can revise things
// to not require boxing.
//
if (thisObj->IsBoxedValue() && !isExplicitTailCall)
{
// Since the call is the only consumer of the box, we know the box can't escape
// since it is being passed an interior pointer.
//
// So, revise the box to simply create a local copy, use the address of that copy
// as the this pointer, and update the entry point to the unboxed entry.
//
// Ideally, we then inline the boxed method and and if it turns out not to modify
// the copy, we can undo the copy too.
if (requiresInstMethodTableArg)
{
// Perform a trial box removal and ask for the type handle tree that fed the box.
//
JITDUMP("Unboxed entry needs method table arg...\n");
GenTree* methodTableArg =
gtTryRemoveBoxUpstreamEffects(thisObj, BR_DONT_REMOVE_WANT_TYPE_HANDLE);
if (methodTableArg != nullptr)
{
// If that worked, turn the box into a copy to a local var
//
JITDUMP("Found suitable method table arg tree [%06u]\n", dspTreeID(methodTableArg));
GenTree* localCopyThis = gtTryRemoveBoxUpstreamEffects(thisObj, BR_MAKE_LOCAL_COPY);
if (localCopyThis != nullptr)
{
// Pass the local var as this and the type handle as a new arg
//
JITDUMP("Success! invoking unboxed entry point on local copy, and passing method table "
"arg\n");
call->gtCallThisArg = gtNewCallArgs(localCopyThis);
call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED;
// Prepend for R2L arg passing or empty L2R passing
// Append for non-empty L2R
//
if ((Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) || (call->gtCallArgs == nullptr))
{
// If there's a ret buf, the method table is the second arg.
//
if (call->HasRetBufArg())
{
gtInsertNewCallArgAfter(methodTableArg, call->gtCallArgs);
}
else
{
call->gtCallArgs = gtPrependNewCallArg(methodTableArg, call->gtCallArgs);
}
}
else
{
GenTreeCall::Use* beforeArg = call->gtCallArgs;
while (beforeArg->GetNext() != nullptr)
{
beforeArg = beforeArg->GetNext();
}
beforeArg->SetNext(gtNewCallArgs(methodTableArg));
}
call->gtCallMethHnd = unboxedEntryMethod;
derivedMethod = unboxedEntryMethod;
pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedUnboxedMethod;
// Method attributes will differ because unboxed entry point is shared
//
const DWORD unboxedMethodAttribs =
info.compCompHnd->getMethodAttribs(unboxedEntryMethod);
JITDUMP("Updating method attribs from 0x%08x to 0x%08x\n", derivedMethodAttribs,
unboxedMethodAttribs);
derivedMethodAttribs = unboxedMethodAttribs;
optimizedTheBox = true;
}
else
{
JITDUMP("Sorry, failed to undo the box -- can't convert to local copy\n");
}
}
else
{
JITDUMP("Sorry, failed to undo the box -- can't find method table arg\n");
}
}
else
{
JITDUMP("Found unboxed entry point, trying to simplify box to a local copy\n");
GenTree* localCopyThis = gtTryRemoveBoxUpstreamEffects(thisObj, BR_MAKE_LOCAL_COPY);
if (localCopyThis != nullptr)
{
JITDUMP("Success! invoking unboxed entry point on local copy\n");
call->gtCallThisArg = gtNewCallArgs(localCopyThis);
call->gtCallMethHnd = unboxedEntryMethod;
call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED;
derivedMethod = unboxedEntryMethod;
pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedUnboxedMethod;
optimizedTheBox = true;
}
else
{
JITDUMP("Sorry, failed to undo the box\n");
}
}
if (optimizedTheBox)
{
#if FEATURE_TAILCALL_OPT
if (call->IsImplicitTailCall())
{
JITDUMP("Clearing the implicit tail call flag\n");
// If set, we clear the implicit tail call flag
// as we just introduced a new address taken local variable
//
call->gtCallMoreFlags &= ~GTF_CALL_M_IMPLICIT_TAILCALL;
}
#endif // FEATURE_TAILCALL_OPT
}
}
if (!optimizedTheBox)
{
// If we get here, we have a boxed value class that either wasn't boxed
// locally, or was boxed locally but we were unable to remove the box for
// various reasons.
//
// We can still update the call to invoke the unboxed entry, if the
// boxed value is simple.
//
if (requiresInstMethodTableArg)
{
// Get the method table from the boxed object.
//
GenTree* const thisArg = call->gtCallThisArg->GetNode();
GenTree* const clonedThisArg = gtClone(thisArg);
if (clonedThisArg == nullptr)
{
JITDUMP(
"unboxed entry needs MT arg, but `this` was too complex to clone. Deferring update.\n");
}
else
{
JITDUMP("revising call to invoke unboxed entry with additional method table arg\n");
GenTree* const methodTableArg = gtNewMethodTableLookup(clonedThisArg);
// Update the 'this' pointer to refer to the box payload
//
GenTree* const payloadOffset = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL);
GenTree* const boxPayload = gtNewOperNode(GT_ADD, TYP_BYREF, thisArg, payloadOffset);
call->gtCallThisArg = gtNewCallArgs(boxPayload);
call->gtCallMethHnd = unboxedEntryMethod;
call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED;
// Method attributes will differ because unboxed entry point is shared
//
const DWORD unboxedMethodAttribs = info.compCompHnd->getMethodAttribs(unboxedEntryMethod);
JITDUMP("Updating method attribs from 0x%08x to 0x%08x\n", derivedMethodAttribs,
unboxedMethodAttribs);
derivedMethod = unboxedEntryMethod;
pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedUnboxedMethod;
derivedMethodAttribs = unboxedMethodAttribs;
// Add the method table argument.
//
// Prepend for R2L arg passing or empty L2R passing
// Append for non-empty L2R
//
if ((Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) || (call->gtCallArgs == nullptr))
{
// If there's a ret buf, the method table is the second arg.
//
if (call->HasRetBufArg())
{
gtInsertNewCallArgAfter(methodTableArg, call->gtCallArgs);
}
else
{
call->gtCallArgs = gtPrependNewCallArg(methodTableArg, call->gtCallArgs);
}
}
else
{
GenTreeCall::Use* beforeArg = call->gtCallArgs;
while (beforeArg->GetNext() != nullptr)
{
beforeArg = beforeArg->GetNext();
}
beforeArg->SetNext(gtNewCallArgs(methodTableArg));
}
}
}
else
{
JITDUMP("revising call to invoke unboxed entry\n");
GenTree* const thisArg = call->gtCallThisArg->GetNode();
GenTree* const payloadOffset = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL);
GenTree* const boxPayload = gtNewOperNode(GT_ADD, TYP_BYREF, thisArg, payloadOffset);
call->gtCallThisArg = gtNewCallArgs(boxPayload);
call->gtCallMethHnd = unboxedEntryMethod;
call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED;
derivedMethod = unboxedEntryMethod;
pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedUnboxedMethod;
}
}
}
else
{
// Many of the low-level methods on value classes won't have unboxed entries,
// as they need access to the type of the object.
//
// Note this may be a cue for us to stack allocate the boxed object, since
// we probably know that these objects don't escape.
JITDUMP("Sorry, failed to find unboxed entry point\n");
}
}
}
// Need to update call info too.
//
*method = derivedMethod;
*methodFlags = derivedMethodAttribs;
// Update context handle
//
*pContextHandle = MAKE_METHODCONTEXT(derivedMethod);
// Update exact context handle.
//
if (pExactContextHandle != nullptr)
{
*pExactContextHandle = MAKE_CLASSCONTEXT(derivedClass);
}
#ifdef FEATURE_READYTORUN
if (opts.IsReadyToRun())
{
// For R2R, getCallInfo triggers bookkeeping on the zap
// side and acquires the actual symbol to call so we need to call it here.
// Look up the new call info.
CORINFO_CALL_INFO derivedCallInfo;
eeGetCallInfo(pDerivedResolvedToken, nullptr, CORINFO_CALLINFO_ALLOWINSTPARAM, &derivedCallInfo);
// Update the call.
call->gtCallMoreFlags &= ~GTF_CALL_M_VIRTSTUB_REL_INDIRECT;
call->gtCallMoreFlags &= ~GTF_CALL_M_R2R_REL_INDIRECT;
call->setEntryPoint(derivedCallInfo.codePointerLookup.constLookup);
}
#endif // FEATURE_READYTORUN
}
//------------------------------------------------------------------------
// impGetSpecialIntrinsicExactReturnType: Look for special cases where a call
// to an intrinsic returns an exact type
//
// Arguments:
// methodHnd -- handle for the special intrinsic method
//
// Returns:
// Exact class handle returned by the intrinsic call, if known.
// Nullptr if not known, or not likely to lead to beneficial optimization.
CORINFO_CLASS_HANDLE Compiler::impGetSpecialIntrinsicExactReturnType(CORINFO_METHOD_HANDLE methodHnd)
{
JITDUMP("Special intrinsic: looking for exact type returned by %s\n", eeGetMethodFullName(methodHnd));
CORINFO_CLASS_HANDLE result = nullptr;
// See what intrinisc we have...
const NamedIntrinsic ni = lookupNamedIntrinsic(methodHnd);
switch (ni)
{
case NI_System_Collections_Generic_Comparer_get_Default:
case NI_System_Collections_Generic_EqualityComparer_get_Default:
{
// Expect one class generic parameter; figure out which it is.
CORINFO_SIG_INFO sig;
info.compCompHnd->getMethodSig(methodHnd, &sig);
assert(sig.sigInst.classInstCount == 1);
CORINFO_CLASS_HANDLE typeHnd = sig.sigInst.classInst[0];
assert(typeHnd != nullptr);
// Lookup can incorrect when we have __Canon as it won't appear
// to implement any interface types.
//
// And if we do not have a final type, devirt & inlining is
// unlikely to result in much simplification.
//
// We can use CORINFO_FLG_FINAL to screen out both of these cases.
const DWORD typeAttribs = info.compCompHnd->getClassAttribs(typeHnd);
const bool isFinalType = ((typeAttribs & CORINFO_FLG_FINAL) != 0);
if (isFinalType)
{
if (ni == NI_System_Collections_Generic_EqualityComparer_get_Default)
{
result = info.compCompHnd->getDefaultEqualityComparerClass(typeHnd);
}
else
{
assert(ni == NI_System_Collections_Generic_Comparer_get_Default);
result = info.compCompHnd->getDefaultComparerClass(typeHnd);
}
JITDUMP("Special intrinsic for type %s: return type is %s\n", eeGetClassName(typeHnd),
result != nullptr ? eeGetClassName(result) : "unknown");
}
else
{
JITDUMP("Special intrinsic for type %s: type not final, so deferring opt\n", eeGetClassName(typeHnd));
}
break;
}
default:
{
JITDUMP("This special intrinsic not handled, sorry...\n");
break;
}
}
return result;
}
//------------------------------------------------------------------------
// impAllocateMethodPointerInfo: create methodPointerInfo into jit-allocated memory and init it.
//
// Arguments:
// token - init value for the allocated token.
// tokenConstrained - init value for the constraint associated with the token
//
// Return Value:
// pointer to token into jit-allocated memory.
methodPointerInfo* Compiler::impAllocateMethodPointerInfo(const CORINFO_RESOLVED_TOKEN& token, mdToken tokenConstrained)
{
methodPointerInfo* memory = getAllocator(CMK_Unknown).allocate<methodPointerInfo>(1);
memory->m_token = token;
memory->m_tokenConstraint = tokenConstrained;
return memory;
}
//------------------------------------------------------------------------
// SpillRetExprHelper: iterate through arguments tree and spill ret_expr to local variables.
//
class SpillRetExprHelper
{
public:
SpillRetExprHelper(Compiler* comp) : comp(comp)
{
}
void StoreRetExprResultsInArgs(GenTreeCall* call)
{
for (GenTreeCall::Use& use : call->Args())
{
comp->fgWalkTreePre(&use.NodeRef(), SpillRetExprVisitor, this);
}
if (call->gtCallThisArg != nullptr)
{
comp->fgWalkTreePre(&call->gtCallThisArg->NodeRef(), SpillRetExprVisitor, this);
}
}
private:
static Compiler::fgWalkResult SpillRetExprVisitor(GenTree** pTree, Compiler::fgWalkData* fgWalkPre)
{
assert((pTree != nullptr) && (*pTree != nullptr));
GenTree* tree = *pTree;
if ((tree->gtFlags & GTF_CALL) == 0)
{
// Trees with ret_expr are marked as GTF_CALL.
return Compiler::WALK_SKIP_SUBTREES;
}
if (tree->OperGet() == GT_RET_EXPR)
{
SpillRetExprHelper* walker = static_cast<SpillRetExprHelper*>(fgWalkPre->pCallbackData);
walker->StoreRetExprAsLocalVar(pTree);
}
return Compiler::WALK_CONTINUE;
}
void StoreRetExprAsLocalVar(GenTree** pRetExpr)
{
GenTree* retExpr = *pRetExpr;
assert(retExpr->OperGet() == GT_RET_EXPR);
const unsigned tmp = comp->lvaGrabTemp(true DEBUGARG("spilling ret_expr"));
JITDUMP("Storing return expression [%06u] to a local var V%02u.\n", comp->dspTreeID(retExpr), tmp);
comp->impAssignTempGen(tmp, retExpr, (unsigned)Compiler::CHECK_SPILL_NONE);
*pRetExpr = comp->gtNewLclvNode(tmp, retExpr->TypeGet());
if (retExpr->TypeGet() == TYP_REF)
{
assert(comp->lvaTable[tmp].lvSingleDef == 0);
comp->lvaTable[tmp].lvSingleDef = 1;
JITDUMP("Marked V%02u as a single def temp\n", tmp);
bool isExact = false;
bool isNonNull = false;
CORINFO_CLASS_HANDLE retClsHnd = comp->gtGetClassHandle(retExpr, &isExact, &isNonNull);
if (retClsHnd != nullptr)
{
comp->lvaSetClass(tmp, retClsHnd, isExact);
}
}
}
private:
Compiler* comp;
};
//------------------------------------------------------------------------
// addFatPointerCandidate: mark the call and the method, that they have a fat pointer candidate.
// Spill ret_expr in the call node, because they can't be cloned.
//
// Arguments:
// call - fat calli candidate
//
void Compiler::addFatPointerCandidate(GenTreeCall* call)
{
JITDUMP("Marking call [%06u] as fat pointer candidate\n", dspTreeID(call));
setMethodHasFatPointer();
call->SetFatPointerCandidate();
SpillRetExprHelper helper(this);
helper.StoreRetExprResultsInArgs(call);
}
//------------------------------------------------------------------------
// considerGuardedDevirtualization: see if we can profitably guess at the
// class involved in an interface or virtual call.
//
// Arguments:
//
// call - potential guarded devirtualization candidate
// ilOffset - IL ofset of the call instruction
// isInterface - true if this is an interface call
// baseMethod - target method of the call
// baseClass - class that introduced the target method
// pContextHandle - context handle for the call
// objClass - class of 'this' in the call
// objClassName - name of the obj Class
//
// Notes:
// Consults with VM to see if there's a likely class at runtime,
// if so, adds a candidate for guarded devirtualization.
//
void Compiler::considerGuardedDevirtualization(
GenTreeCall* call,
IL_OFFSET ilOffset,
bool isInterface,
CORINFO_METHOD_HANDLE baseMethod,
CORINFO_CLASS_HANDLE baseClass,
CORINFO_CONTEXT_HANDLE* pContextHandle DEBUGARG(CORINFO_CLASS_HANDLE objClass) DEBUGARG(const char* objClassName))
{
#if defined(DEBUG)
const char* callKind = isInterface ? "interface" : "virtual";
#endif
JITDUMP("Considering guarded devirtualization at IL offset %u (0x%x)\n", ilOffset, ilOffset);
// We currently only get likely class guesses when there is PGO data
// with class profiles.
//
if (fgPgoClassProfiles == 0)
{
JITDUMP("Not guessing for class: no class profile pgo data, or pgo disabled\n");
return;
}
// See if there's a likely guess for the class.
//
const unsigned likelihoodThreshold = isInterface ? 25 : 30;
unsigned likelihood = 0;
unsigned numberOfClasses = 0;
CORINFO_CLASS_HANDLE likelyClass = NO_CLASS_HANDLE;
bool doRandomDevirt = false;
const int maxLikelyClasses = 32;
LikelyClassRecord likelyClasses[maxLikelyClasses];
#ifdef DEBUG
// Optional stress mode to pick a random known class, rather than
// the most likely known class.
//
doRandomDevirt = JitConfig.JitRandomGuardedDevirtualization() != 0;
if (doRandomDevirt)
{
// Reuse the random inliner's random state.
//
CLRRandom* const random =
impInlineRoot()->m_inlineStrategy->GetRandom(JitConfig.JitRandomGuardedDevirtualization());
likelyClasses[0].clsHandle = getRandomClass(fgPgoSchema, fgPgoSchemaCount, fgPgoData, ilOffset, random);
likelyClasses[0].likelihood = 100;
if (likelyClasses[0].clsHandle != NO_CLASS_HANDLE)
{
numberOfClasses = 1;
}
}
else
#endif
{
numberOfClasses =
getLikelyClasses(likelyClasses, maxLikelyClasses, fgPgoSchema, fgPgoSchemaCount, fgPgoData, ilOffset);
}
// For now we only use the most popular type
likelihood = likelyClasses[0].likelihood;
likelyClass = likelyClasses[0].clsHandle;
if (numberOfClasses < 1)
{
JITDUMP("No likely class, sorry\n");
return;
}
assert(likelyClass != NO_CLASS_HANDLE);
// Print all likely classes
JITDUMP("%s classes for %p (%s):\n", doRandomDevirt ? "Random" : "Likely", dspPtr(objClass), objClassName)
for (UINT32 i = 0; i < numberOfClasses; i++)
{
JITDUMP(" %u) %p (%s) [likelihood:%u%%]\n", i + 1, likelyClasses[i].clsHandle,
eeGetClassName(likelyClasses[i].clsHandle), likelyClasses[i].likelihood);
}
// Todo: a more advanced heuristic using likelihood, number of
// classes, and the profile count for this block.
//
// For now we will guess if the likelihood is at least 25%/30% (intfc/virt), as studies
// have shown this transformation should pay off even if we guess wrong sometimes.
//
if (likelihood < likelihoodThreshold)
{
JITDUMP("Not guessing for class; likelihood is below %s call threshold %u\n", callKind, likelihoodThreshold);
return;
}
uint32_t const likelyClassAttribs = info.compCompHnd->getClassAttribs(likelyClass);
if ((likelyClassAttribs & CORINFO_FLG_ABSTRACT) != 0)
{
// We may see an abstract likely class, if we have a stale profile.
// No point guessing for this.
//
JITDUMP("Not guessing for class; abstract (stale profile)\n");
return;
}
// Figure out which method will be called.
//
CORINFO_DEVIRTUALIZATION_INFO dvInfo;
dvInfo.virtualMethod = baseMethod;
dvInfo.objClass = likelyClass;
dvInfo.context = *pContextHandle;
dvInfo.exactContext = *pContextHandle;
dvInfo.pResolvedTokenVirtualMethod = nullptr;
const bool canResolve = info.compCompHnd->resolveVirtualMethod(&dvInfo);
if (!canResolve)
{
JITDUMP("Can't figure out which method would be invoked, sorry\n");
return;
}
CORINFO_METHOD_HANDLE likelyMethod = dvInfo.devirtualizedMethod;
JITDUMP("%s call would invoke method %s\n", callKind, eeGetMethodName(likelyMethod, nullptr));
// Add this as a potential candidate.
//
uint32_t const likelyMethodAttribs = info.compCompHnd->getMethodAttribs(likelyMethod);
addGuardedDevirtualizationCandidate(call, likelyMethod, likelyClass, likelyMethodAttribs, likelyClassAttribs,
likelihood);
}
//------------------------------------------------------------------------
// addGuardedDevirtualizationCandidate: potentially mark the call as a guarded
// devirtualization candidate
//
// Notes:
//
// Call sites in rare or unoptimized code, and calls that require cookies are
// not marked as candidates.
//
// As part of marking the candidate, the code spills GT_RET_EXPRs anywhere in any
// child tree, because and we need to clone all these trees when we clone the call
// as part of guarded devirtualization, and these IR nodes can't be cloned.
//
// Arguments:
// call - potential guarded devirtualization candidate
// methodHandle - method that will be invoked if the class test succeeds
// classHandle - class that will be tested for at runtime
// methodAttr - attributes of the method
// classAttr - attributes of the class
// likelihood - odds that this class is the class seen at runtime
//
void Compiler::addGuardedDevirtualizationCandidate(GenTreeCall* call,
CORINFO_METHOD_HANDLE methodHandle,
CORINFO_CLASS_HANDLE classHandle,
unsigned methodAttr,
unsigned classAttr,
unsigned likelihood)
{
// This transformation only makes sense for virtual calls
assert(call->IsVirtual());
// Only mark calls if the feature is enabled.
const bool isEnabled = JitConfig.JitEnableGuardedDevirtualization() > 0;
if (!isEnabled)
{
JITDUMP("NOT Marking call [%06u] as guarded devirtualization candidate -- disabled by jit config\n",
dspTreeID(call));
return;
}
// Bail if not optimizing or the call site is very likely cold
if (compCurBB->isRunRarely() || opts.OptimizationDisabled())
{
JITDUMP("NOT Marking call [%06u] as guarded devirtualization candidate -- rare / dbg / minopts\n",
dspTreeID(call));
return;
}
// CT_INDIRECT calls may use the cookie, bail if so...
//
// If transforming these provides a benefit, we could save this off in the same way
// we save the stub address below.
if ((call->gtCallType == CT_INDIRECT) && (call->AsCall()->gtCallCookie != nullptr))
{
JITDUMP("NOT Marking call [%06u] as guarded devirtualization candidate -- CT_INDIRECT with cookie\n",
dspTreeID(call));
return;
}
#ifdef DEBUG
// See if disabled by range
//
static ConfigMethodRange JitGuardedDevirtualizationRange;
JitGuardedDevirtualizationRange.EnsureInit(JitConfig.JitGuardedDevirtualizationRange());
assert(!JitGuardedDevirtualizationRange.Error());
if (!JitGuardedDevirtualizationRange.Contains(impInlineRoot()->info.compMethodHash()))
{
JITDUMP("NOT Marking call [%06u] as guarded devirtualization candidate -- excluded by "
"JitGuardedDevirtualizationRange",
dspTreeID(call));
return;
}
#endif
// We're all set, proceed with candidate creation.
//
JITDUMP("Marking call [%06u] as guarded devirtualization candidate; will guess for class %s\n", dspTreeID(call),
eeGetClassName(classHandle));
setMethodHasGuardedDevirtualization();
call->SetGuardedDevirtualizationCandidate();
// Spill off any GT_RET_EXPR subtrees so we can clone the call.
//
SpillRetExprHelper helper(this);
helper.StoreRetExprResultsInArgs(call);
// Gather some information for later. Note we actually allocate InlineCandidateInfo
// here, as the devirtualized half of this call will likely become an inline candidate.
//
GuardedDevirtualizationCandidateInfo* pInfo = new (this, CMK_Inlining) InlineCandidateInfo;
pInfo->guardedMethodHandle = methodHandle;
pInfo->guardedMethodUnboxedEntryHandle = nullptr;
pInfo->guardedClassHandle = classHandle;
pInfo->likelihood = likelihood;
pInfo->requiresInstMethodTableArg = false;
// If the guarded class is a value class, look for an unboxed entry point.
//
if ((classAttr & CORINFO_FLG_VALUECLASS) != 0)
{
JITDUMP(" ... class is a value class, looking for unboxed entry\n");
bool requiresInstMethodTableArg = false;
CORINFO_METHOD_HANDLE unboxedEntryMethodHandle =
info.compCompHnd->getUnboxedEntry(methodHandle, &requiresInstMethodTableArg);
if (unboxedEntryMethodHandle != nullptr)
{
JITDUMP(" ... updating GDV candidate with unboxed entry info\n");
pInfo->guardedMethodUnboxedEntryHandle = unboxedEntryMethodHandle;
pInfo->requiresInstMethodTableArg = requiresInstMethodTableArg;
}
}
call->gtGuardedDevirtualizationCandidateInfo = pInfo;
}
void Compiler::addExpRuntimeLookupCandidate(GenTreeCall* call)
{
setMethodHasExpRuntimeLookup();
call->SetExpRuntimeLookup();
}
//------------------------------------------------------------------------
// impIsClassExact: check if a class handle can only describe values
// of exactly one class.
//
// Arguments:
// classHnd - handle for class in question
//
// Returns:
// true if class is final and not subject to special casting from
// variance or similar.
//
// Note:
// We are conservative on arrays of primitive types here.
bool Compiler::impIsClassExact(CORINFO_CLASS_HANDLE classHnd)
{
DWORD flags = info.compCompHnd->getClassAttribs(classHnd);
DWORD flagsMask = CORINFO_FLG_FINAL | CORINFO_FLG_VARIANCE | CORINFO_FLG_ARRAY;
if ((flags & flagsMask) == CORINFO_FLG_FINAL)
{
return true;
}
if ((flags & flagsMask) == (CORINFO_FLG_FINAL | CORINFO_FLG_ARRAY))
{
CORINFO_CLASS_HANDLE arrayElementHandle = nullptr;
CorInfoType type = info.compCompHnd->getChildType(classHnd, &arrayElementHandle);
if ((type == CORINFO_TYPE_CLASS) || (type == CORINFO_TYPE_VALUECLASS))
{
return impIsClassExact(arrayElementHandle);
}
}
return false;
}
//------------------------------------------------------------------------
// impCanSkipCovariantStoreCheck: see if storing a ref type value to an array
// can skip the array store covariance check.
//
// Arguments:
// value -- tree producing the value to store
// array -- tree representing the array to store to
//
// Returns:
// true if the store does not require a covariance check.
//
bool Compiler::impCanSkipCovariantStoreCheck(GenTree* value, GenTree* array)
{
// We should only call this when optimizing.
assert(opts.OptimizationEnabled());
// Check for assignment to same array, ie. arrLcl[i] = arrLcl[j]
if (value->OperIs(GT_INDEX) && array->OperIs(GT_LCL_VAR))
{
GenTree* valueIndex = value->AsIndex()->Arr();
if (valueIndex->OperIs(GT_LCL_VAR))
{
unsigned valueLcl = valueIndex->AsLclVar()->GetLclNum();
unsigned arrayLcl = array->AsLclVar()->GetLclNum();
if ((valueLcl == arrayLcl) && !lvaGetDesc(arrayLcl)->IsAddressExposed())
{
JITDUMP("\nstelem of ref from same array: skipping covariant store check\n");
return true;
}
}
}
// Check for assignment of NULL.
if (value->OperIs(GT_CNS_INT))
{
assert(value->gtType == TYP_REF);
if (value->AsIntCon()->gtIconVal == 0)
{
JITDUMP("\nstelem of null: skipping covariant store check\n");
return true;
}
// Non-0 const refs can only occur with frozen objects
assert(value->IsIconHandle(GTF_ICON_STR_HDL));
assert(doesMethodHaveFrozenString() ||
(compIsForInlining() && impInlineInfo->InlinerCompiler->doesMethodHaveFrozenString()));
}
// Try and get a class handle for the array
if (value->gtType != TYP_REF)
{
return false;
}
bool arrayIsExact = false;
bool arrayIsNonNull = false;
CORINFO_CLASS_HANDLE arrayHandle = gtGetClassHandle(array, &arrayIsExact, &arrayIsNonNull);
if (arrayHandle == NO_CLASS_HANDLE)
{
return false;
}
// There are some methods in corelib where we're storing to an array but the IL
// doesn't reflect this (see SZArrayHelper). Avoid.
DWORD attribs = info.compCompHnd->getClassAttribs(arrayHandle);
if ((attribs & CORINFO_FLG_ARRAY) == 0)
{
return false;
}
CORINFO_CLASS_HANDLE arrayElementHandle = nullptr;
CorInfoType arrayElemType = info.compCompHnd->getChildType(arrayHandle, &arrayElementHandle);
// Verify array type handle is really an array of ref type
assert(arrayElemType == CORINFO_TYPE_CLASS);
// Check for exactly object[]
if (arrayIsExact && (arrayElementHandle == impGetObjectClass()))
{
JITDUMP("\nstelem to (exact) object[]: skipping covariant store check\n");
return true;
}
const bool arrayTypeIsSealed = impIsClassExact(arrayElementHandle);
if ((!arrayIsExact && !arrayTypeIsSealed) || (arrayElementHandle == NO_CLASS_HANDLE))
{
// Bail out if we don't know array's exact type
return false;
}
bool valueIsExact = false;
bool valueIsNonNull = false;
CORINFO_CLASS_HANDLE valueHandle = gtGetClassHandle(value, &valueIsExact, &valueIsNonNull);
// Array's type is sealed and equals to value's type
if (arrayTypeIsSealed && (valueHandle == arrayElementHandle))
{
JITDUMP("\nstelem to T[] with T exact: skipping covariant store check\n");
return true;
}
// Array's type is not sealed but we know its exact type
if (arrayIsExact && (valueHandle != NO_CLASS_HANDLE) &&
(info.compCompHnd->compareTypesForCast(valueHandle, arrayElementHandle) == TypeCompareState::Must))
{
JITDUMP("\nstelem to T[] with T exact: skipping covariant store check\n");
return true;
}
return false;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Importer XX
XX XX
XX Imports the given method and converts it to semantic trees XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
#endif
#include "corexcep.h"
#define Verify(cond, msg) \
do \
{ \
if (!(cond)) \
{ \
verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \
} \
} while (0)
#define VerifyOrReturn(cond, msg) \
do \
{ \
if (!(cond)) \
{ \
verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \
return; \
} \
} while (0)
#define VerifyOrReturnSpeculative(cond, msg, speculative) \
do \
{ \
if (speculative) \
{ \
if (!(cond)) \
{ \
return false; \
} \
} \
else \
{ \
if (!(cond)) \
{ \
verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \
return false; \
} \
} \
} while (0)
/*****************************************************************************/
void Compiler::impInit()
{
impStmtList = impLastStmt = nullptr;
#ifdef DEBUG
impInlinedCodeSize = 0;
#endif // DEBUG
}
/*****************************************************************************
*
* Pushes the given tree on the stack.
*/
void Compiler::impPushOnStack(GenTree* tree, typeInfo ti)
{
/* Check for overflow. If inlining, we may be using a bigger stack */
if ((verCurrentState.esStackDepth >= info.compMaxStack) &&
(verCurrentState.esStackDepth >= impStkSize || ((compCurBB->bbFlags & BBF_IMPORTED) == 0)))
{
BADCODE("stack overflow");
}
#ifdef DEBUG
// If we are pushing a struct, make certain we know the precise type!
if (tree->TypeGet() == TYP_STRUCT)
{
assert(ti.IsType(TI_STRUCT));
CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandle();
assert(clsHnd != NO_CLASS_HANDLE);
}
#endif // DEBUG
verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = ti;
verCurrentState.esStack[verCurrentState.esStackDepth++].val = tree;
if ((tree->gtType == TYP_LONG) && (compLongUsed == false))
{
compLongUsed = true;
}
else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
{
compFloatingPointUsed = true;
}
}
inline void Compiler::impPushNullObjRefOnStack()
{
impPushOnStack(gtNewIconNode(0, TYP_REF), typeInfo(TI_NULL));
}
// This method gets called when we run into unverifiable code
// (and we are verifying the method)
inline void Compiler::verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* msg) DEBUGARG(const char* file)
DEBUGARG(unsigned line))
{
#ifdef DEBUG
const char* tail = strrchr(file, '\\');
if (tail)
{
file = tail + 1;
}
if (JitConfig.JitBreakOnUnsafeCode())
{
assert(!"Unsafe code detected");
}
#endif
JITLOG((LL_INFO10000, "Detected unsafe code: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
msg, info.compFullName, impCurOpcName, impCurOpcOffs));
if (compIsForImportOnly())
{
JITLOG((LL_ERROR, "Verification failure: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
msg, info.compFullName, impCurOpcName, impCurOpcOffs));
verRaiseVerifyException(INDEBUG(msg) DEBUGARG(file) DEBUGARG(line));
}
}
inline void DECLSPEC_NORETURN Compiler::verRaiseVerifyException(INDEBUG(const char* msg) DEBUGARG(const char* file)
DEBUGARG(unsigned line))
{
JITLOG((LL_ERROR, "Verification failure: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
msg, info.compFullName, impCurOpcName, impCurOpcOffs));
#ifdef DEBUG
// BreakIfDebuggerPresent();
if (getBreakOnBadCode())
{
assert(!"Typechecking error");
}
#endif
RaiseException(SEH_VERIFICATION_EXCEPTION, EXCEPTION_NONCONTINUABLE, 0, nullptr);
UNREACHABLE();
}
// helper function that will tell us if the IL instruction at the addr passed
// by param consumes an address at the top of the stack. We use it to save
// us lvAddrTaken
bool Compiler::impILConsumesAddr(const BYTE* codeAddr)
{
assert(!compIsForInlining());
OPCODE opcode;
opcode = (OPCODE)getU1LittleEndian(codeAddr);
switch (opcode)
{
// case CEE_LDFLDA: We're taking this one out as if you have a sequence
// like
//
// ldloca.0
// ldflda whatever
//
// of a primitivelike struct, you end up after morphing with addr of a local
// that's not marked as addrtaken, which is wrong. Also ldflda is usually used
// for structs that contain other structs, which isnt a case we handle very
// well now for other reasons.
case CEE_LDFLD:
{
// We won't collapse small fields. This is probably not the right place to have this
// check, but we're only using the function for this purpose, and is easy to factor
// out if we need to do so.
CORINFO_RESOLVED_TOKEN resolvedToken;
impResolveToken(codeAddr + sizeof(__int8), &resolvedToken, CORINFO_TOKENKIND_Field);
var_types lclTyp = JITtype2varType(info.compCompHnd->getFieldType(resolvedToken.hField));
// Preserve 'small' int types
if (!varTypeIsSmall(lclTyp))
{
lclTyp = genActualType(lclTyp);
}
if (varTypeIsSmall(lclTyp))
{
return false;
}
return true;
}
default:
break;
}
return false;
}
void Compiler::impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind)
{
pResolvedToken->tokenContext = impTokenLookupContextHandle;
pResolvedToken->tokenScope = info.compScopeHnd;
pResolvedToken->token = getU4LittleEndian(addr);
pResolvedToken->tokenType = kind;
info.compCompHnd->resolveToken(pResolvedToken);
}
/*****************************************************************************
*
* Pop one tree from the stack.
*/
StackEntry Compiler::impPopStack()
{
if (verCurrentState.esStackDepth == 0)
{
BADCODE("stack underflow");
}
return verCurrentState.esStack[--verCurrentState.esStackDepth];
}
/*****************************************************************************
*
* Peep at n'th (0-based) tree on the top of the stack.
*/
StackEntry& Compiler::impStackTop(unsigned n)
{
if (verCurrentState.esStackDepth <= n)
{
BADCODE("stack underflow");
}
return verCurrentState.esStack[verCurrentState.esStackDepth - n - 1];
}
unsigned Compiler::impStackHeight()
{
return verCurrentState.esStackDepth;
}
/*****************************************************************************
* Some of the trees are spilled specially. While unspilling them, or
* making a copy, these need to be handled specially. The function
* enumerates the operators possible after spilling.
*/
#ifdef DEBUG // only used in asserts
static bool impValidSpilledStackEntry(GenTree* tree)
{
if (tree->gtOper == GT_LCL_VAR)
{
return true;
}
if (tree->OperIsConst())
{
return true;
}
return false;
}
#endif
/*****************************************************************************
*
* The following logic is used to save/restore stack contents.
* If 'copy' is true, then we make a copy of the trees on the stack. These
* have to all be cloneable/spilled values.
*/
void Compiler::impSaveStackState(SavedStack* savePtr, bool copy)
{
savePtr->ssDepth = verCurrentState.esStackDepth;
if (verCurrentState.esStackDepth)
{
savePtr->ssTrees = new (this, CMK_ImpStack) StackEntry[verCurrentState.esStackDepth];
size_t saveSize = verCurrentState.esStackDepth * sizeof(*savePtr->ssTrees);
if (copy)
{
StackEntry* table = savePtr->ssTrees;
/* Make a fresh copy of all the stack entries */
for (unsigned level = 0; level < verCurrentState.esStackDepth; level++, table++)
{
table->seTypeInfo = verCurrentState.esStack[level].seTypeInfo;
GenTree* tree = verCurrentState.esStack[level].val;
assert(impValidSpilledStackEntry(tree));
switch (tree->gtOper)
{
case GT_CNS_INT:
case GT_CNS_LNG:
case GT_CNS_DBL:
case GT_CNS_STR:
case GT_LCL_VAR:
table->val = gtCloneExpr(tree);
break;
default:
assert(!"Bad oper - Not covered by impValidSpilledStackEntry()");
break;
}
}
}
else
{
memcpy(savePtr->ssTrees, verCurrentState.esStack, saveSize);
}
}
}
void Compiler::impRestoreStackState(SavedStack* savePtr)
{
verCurrentState.esStackDepth = savePtr->ssDepth;
if (verCurrentState.esStackDepth)
{
memcpy(verCurrentState.esStack, savePtr->ssTrees,
verCurrentState.esStackDepth * sizeof(*verCurrentState.esStack));
}
}
//------------------------------------------------------------------------
// impBeginTreeList: Get the tree list started for a new basic block.
//
inline void Compiler::impBeginTreeList()
{
assert(impStmtList == nullptr && impLastStmt == nullptr);
}
/*****************************************************************************
*
* Store the given start and end stmt in the given basic block. This is
* mostly called by impEndTreeList(BasicBlock *block). It is called
* directly only for handling CEE_LEAVEs out of finally-protected try's.
*/
inline void Compiler::impEndTreeList(BasicBlock* block, Statement* firstStmt, Statement* lastStmt)
{
/* Make the list circular, so that we can easily walk it backwards */
firstStmt->SetPrevStmt(lastStmt);
/* Store the tree list in the basic block */
block->bbStmtList = firstStmt;
/* The block should not already be marked as imported */
assert((block->bbFlags & BBF_IMPORTED) == 0);
block->bbFlags |= BBF_IMPORTED;
}
inline void Compiler::impEndTreeList(BasicBlock* block)
{
if (impStmtList == nullptr)
{
// The block should not already be marked as imported.
assert((block->bbFlags & BBF_IMPORTED) == 0);
// Empty block. Just mark it as imported.
block->bbFlags |= BBF_IMPORTED;
}
else
{
impEndTreeList(block, impStmtList, impLastStmt);
}
#ifdef DEBUG
if (impLastILoffsStmt != nullptr)
{
impLastILoffsStmt->SetLastILOffset(compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs);
impLastILoffsStmt = nullptr;
}
#endif
impStmtList = impLastStmt = nullptr;
}
/*****************************************************************************
*
* Check that storing the given tree doesnt mess up the semantic order. Note
* that this has only limited value as we can only check [0..chkLevel).
*/
inline void Compiler::impAppendStmtCheck(Statement* stmt, unsigned chkLevel)
{
#ifndef DEBUG
return;
#else
if (chkLevel == (unsigned)CHECK_SPILL_ALL)
{
chkLevel = verCurrentState.esStackDepth;
}
if (verCurrentState.esStackDepth == 0 || chkLevel == 0 || chkLevel == (unsigned)CHECK_SPILL_NONE)
{
return;
}
GenTree* tree = stmt->GetRootNode();
// Calls can only be appended if there are no GTF_GLOB_EFFECT on the stack
if (tree->gtFlags & GTF_CALL)
{
for (unsigned level = 0; level < chkLevel; level++)
{
assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_EFFECT) == 0);
}
}
if (tree->gtOper == GT_ASG)
{
// For an assignment to a local variable, all references of that
// variable have to be spilled. If it is aliased, all calls and
// indirect accesses have to be spilled
if (tree->AsOp()->gtOp1->gtOper == GT_LCL_VAR)
{
unsigned lclNum = tree->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum();
for (unsigned level = 0; level < chkLevel; level++)
{
assert(!gtHasRef(verCurrentState.esStack[level].val, lclNum));
assert(!lvaTable[lclNum].IsAddressExposed() ||
(verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT) == 0);
}
}
// If the access may be to global memory, all side effects have to be spilled.
else if (tree->AsOp()->gtOp1->gtFlags & GTF_GLOB_REF)
{
for (unsigned level = 0; level < chkLevel; level++)
{
assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_REF) == 0);
}
}
}
#endif
}
//------------------------------------------------------------------------
// impAppendStmt: Append the given statement to the current block's tree list.
//
//
// Arguments:
// stmt - The statement to add.
// chkLevel - [0..chkLevel) is the portion of the stack which we will check
// for interference with stmt and spill if needed.
// checkConsumedDebugInfo - Whether to check for consumption of impCurStmtDI. impCurStmtDI
// marks the debug info of the current boundary and is set when we
// start importing IL at that boundary. If this parameter is true,
// then the function checks if 'stmt' has been associated with the
// current boundary, and if so, clears it so that we do not attach
// it to more upcoming statements.
//
void Compiler::impAppendStmt(Statement* stmt, unsigned chkLevel, bool checkConsumedDebugInfo)
{
if (chkLevel == (unsigned)CHECK_SPILL_ALL)
{
chkLevel = verCurrentState.esStackDepth;
}
if ((chkLevel != 0) && (chkLevel != (unsigned)CHECK_SPILL_NONE))
{
assert(chkLevel <= verCurrentState.esStackDepth);
/* If the statement being appended has any side-effects, check the stack
to see if anything needs to be spilled to preserve correct ordering. */
GenTree* expr = stmt->GetRootNode();
GenTreeFlags flags = expr->gtFlags & GTF_GLOB_EFFECT;
// Assignment to (unaliased) locals don't count as a side-effect as
// we handle them specially using impSpillLclRefs(). Temp locals should
// be fine too.
if ((expr->gtOper == GT_ASG) && (expr->AsOp()->gtOp1->gtOper == GT_LCL_VAR) &&
((expr->AsOp()->gtOp1->gtFlags & GTF_GLOB_REF) == 0) && !gtHasLocalsWithAddrOp(expr->AsOp()->gtOp2))
{
GenTreeFlags op2Flags = expr->AsOp()->gtOp2->gtFlags & GTF_GLOB_EFFECT;
assert(flags == (op2Flags | GTF_ASG));
flags = op2Flags;
}
if (flags != 0)
{
bool spillGlobEffects = false;
if ((flags & GTF_CALL) != 0)
{
// If there is a call, we have to spill global refs
spillGlobEffects = true;
}
else if (!expr->OperIs(GT_ASG))
{
if ((flags & GTF_ASG) != 0)
{
// The expression is not an assignment node but it has an assignment side effect, it
// must be an atomic op, HW intrinsic or some other kind of node that stores to memory.
// Since we don't know what it assigns to, we need to spill global refs.
spillGlobEffects = true;
}
}
else
{
GenTree* lhs = expr->gtGetOp1();
GenTree* rhs = expr->gtGetOp2();
if (((rhs->gtFlags | lhs->gtFlags) & GTF_ASG) != 0)
{
// Either side of the assignment node has an assignment side effect.
// Since we don't know what it assigns to, we need to spill global refs.
spillGlobEffects = true;
}
else if ((lhs->gtFlags & GTF_GLOB_REF) != 0)
{
spillGlobEffects = true;
}
}
impSpillSideEffects(spillGlobEffects, chkLevel DEBUGARG("impAppendStmt"));
}
else
{
impSpillSpecialSideEff();
}
}
impAppendStmtCheck(stmt, chkLevel);
impAppendStmt(stmt);
#ifdef FEATURE_SIMD
impMarkContiguousSIMDFieldAssignments(stmt);
#endif
// Once we set the current offset as debug info in an appended tree, we are
// ready to report the following offsets. Note that we need to compare
// offsets here instead of debug info, since we do not set the "is call"
// bit in impCurStmtDI.
if (checkConsumedDebugInfo &&
(impLastStmt->GetDebugInfo().GetLocation().GetOffset() == impCurStmtDI.GetLocation().GetOffset()))
{
impCurStmtOffsSet(BAD_IL_OFFSET);
}
#ifdef DEBUG
if (impLastILoffsStmt == nullptr)
{
impLastILoffsStmt = stmt;
}
if (verbose)
{
printf("\n\n");
gtDispStmt(stmt);
}
#endif
}
//------------------------------------------------------------------------
// impAppendStmt: Add the statement to the current stmts list.
//
// Arguments:
// stmt - the statement to add.
//
inline void Compiler::impAppendStmt(Statement* stmt)
{
if (impStmtList == nullptr)
{
// The stmt is the first in the list.
impStmtList = stmt;
}
else
{
// Append the expression statement to the existing list.
impLastStmt->SetNextStmt(stmt);
stmt->SetPrevStmt(impLastStmt);
}
impLastStmt = stmt;
}
//------------------------------------------------------------------------
// impExtractLastStmt: Extract the last statement from the current stmts list.
//
// Return Value:
// The extracted statement.
//
// Notes:
// It assumes that the stmt will be reinserted later.
//
Statement* Compiler::impExtractLastStmt()
{
assert(impLastStmt != nullptr);
Statement* stmt = impLastStmt;
impLastStmt = impLastStmt->GetPrevStmt();
if (impLastStmt == nullptr)
{
impStmtList = nullptr;
}
return stmt;
}
//-------------------------------------------------------------------------
// impInsertStmtBefore: Insert the given "stmt" before "stmtBefore".
//
// Arguments:
// stmt - a statement to insert;
// stmtBefore - an insertion point to insert "stmt" before.
//
inline void Compiler::impInsertStmtBefore(Statement* stmt, Statement* stmtBefore)
{
assert(stmt != nullptr);
assert(stmtBefore != nullptr);
if (stmtBefore == impStmtList)
{
impStmtList = stmt;
}
else
{
Statement* stmtPrev = stmtBefore->GetPrevStmt();
stmt->SetPrevStmt(stmtPrev);
stmtPrev->SetNextStmt(stmt);
}
stmt->SetNextStmt(stmtBefore);
stmtBefore->SetPrevStmt(stmt);
}
//------------------------------------------------------------------------
// impAppendTree: Append the given expression tree to the current block's tree list.
//
//
// Arguments:
// tree - The tree that will be the root of the newly created statement.
// chkLevel - [0..chkLevel) is the portion of the stack which we will check
// for interference with stmt and spill if needed.
// di - Debug information to associate with the statement.
// checkConsumedDebugInfo - Whether to check for consumption of impCurStmtDI. impCurStmtDI
// marks the debug info of the current boundary and is set when we
// start importing IL at that boundary. If this parameter is true,
// then the function checks if 'stmt' has been associated with the
// current boundary, and if so, clears it so that we do not attach
// it to more upcoming statements.
//
// Return value:
// The newly created statement.
//
Statement* Compiler::impAppendTree(GenTree* tree, unsigned chkLevel, const DebugInfo& di, bool checkConsumedDebugInfo)
{
assert(tree);
/* Allocate an 'expression statement' node */
Statement* stmt = gtNewStmt(tree, di);
/* Append the statement to the current block's stmt list */
impAppendStmt(stmt, chkLevel, checkConsumedDebugInfo);
return stmt;
}
/*****************************************************************************
*
* Insert the given expression tree before "stmtBefore"
*/
void Compiler::impInsertTreeBefore(GenTree* tree, const DebugInfo& di, Statement* stmtBefore)
{
/* Allocate an 'expression statement' node */
Statement* stmt = gtNewStmt(tree, di);
/* Append the statement to the current block's stmt list */
impInsertStmtBefore(stmt, stmtBefore);
}
/*****************************************************************************
*
* Append an assignment of the given value to a temp to the current tree list.
* curLevel is the stack level for which the spill to the temp is being done.
*/
void Compiler::impAssignTempGen(unsigned tmp,
GenTree* val,
unsigned curLevel,
Statement** pAfterStmt, /* = NULL */
const DebugInfo& di, /* = DebugInfo() */
BasicBlock* block /* = NULL */
)
{
GenTree* asg = gtNewTempAssign(tmp, val);
if (!asg->IsNothingNode())
{
if (pAfterStmt)
{
Statement* asgStmt = gtNewStmt(asg, di);
fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
*pAfterStmt = asgStmt;
}
else
{
impAppendTree(asg, curLevel, impCurStmtDI);
}
}
}
/*****************************************************************************
* same as above, but handle the valueclass case too
*/
void Compiler::impAssignTempGen(unsigned tmpNum,
GenTree* val,
CORINFO_CLASS_HANDLE structType,
unsigned curLevel,
Statement** pAfterStmt, /* = NULL */
const DebugInfo& di, /* = DebugInfo() */
BasicBlock* block /* = NULL */
)
{
GenTree* asg;
assert(val->TypeGet() != TYP_STRUCT || structType != NO_CLASS_HANDLE);
if (varTypeIsStruct(val) && (structType != NO_CLASS_HANDLE))
{
assert(tmpNum < lvaCount);
assert(structType != NO_CLASS_HANDLE);
// if the method is non-verifiable the assert is not true
// so at least ignore it in the case when verification is turned on
// since any block that tries to use the temp would have failed verification.
var_types varType = lvaTable[tmpNum].lvType;
assert(varType == TYP_UNDEF || varTypeIsStruct(varType));
lvaSetStruct(tmpNum, structType, false);
varType = lvaTable[tmpNum].lvType;
// Now, set the type of the struct value. Note that lvaSetStruct may modify the type
// of the lclVar to a specialized type (e.g. TYP_SIMD), based on the handle (structType)
// that has been passed in for the value being assigned to the temp, in which case we
// need to set 'val' to that same type.
// Note also that if we always normalized the types of any node that might be a struct
// type, this would not be necessary - but that requires additional JIT/EE interface
// calls that may not actually be required - e.g. if we only access a field of a struct.
GenTree* dst = gtNewLclvNode(tmpNum, varType);
asg = impAssignStruct(dst, val, structType, curLevel, pAfterStmt, di, block);
}
else
{
asg = gtNewTempAssign(tmpNum, val);
}
if (!asg->IsNothingNode())
{
if (pAfterStmt)
{
Statement* asgStmt = gtNewStmt(asg, di);
fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
*pAfterStmt = asgStmt;
}
else
{
impAppendTree(asg, curLevel, impCurStmtDI);
}
}
}
/*****************************************************************************
*
* Pop the given number of values from the stack and return a list node with
* their values.
* The 'prefixTree' argument may optionally contain an argument
* list that is prepended to the list returned from this function.
*
* The notion of prepended is a bit misleading in that the list is backwards
* from the way I would expect: The first element popped is at the end of
* the returned list, and prefixTree is 'before' that, meaning closer to
* the end of the list. To get to prefixTree, you have to walk to the
* end of the list.
*
* For ARG_ORDER_R2L prefixTree is only used to insert extra arguments, as
* such we reverse its meaning such that returnValue has a reversed
* prefixTree at the head of the list.
*/
GenTreeCall::Use* Compiler::impPopCallArgs(unsigned count, CORINFO_SIG_INFO* sig, GenTreeCall::Use* prefixArgs)
{
assert(sig == nullptr || count == sig->numArgs);
CORINFO_CLASS_HANDLE structType;
GenTreeCall::Use* argList;
if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
{
argList = nullptr;
}
else
{ // ARG_ORDER_L2R
argList = prefixArgs;
}
while (count--)
{
StackEntry se = impPopStack();
typeInfo ti = se.seTypeInfo;
GenTree* temp = se.val;
if (varTypeIsStruct(temp))
{
// Morph trees that aren't already OBJs or MKREFANY to be OBJs
assert(ti.IsType(TI_STRUCT));
structType = ti.GetClassHandleForValueClass();
bool forceNormalization = false;
if (varTypeIsSIMD(temp))
{
// We need to ensure that fgMorphArgs will use the correct struct handle to ensure proper
// ABI handling of this argument.
// Note that this can happen, for example, if we have a SIMD intrinsic that returns a SIMD type
// with a different baseType than we've seen.
// We also need to ensure an OBJ node if we have a FIELD node that might be transformed to LCL_FLD
// or a plain GT_IND.
// TODO-Cleanup: Consider whether we can eliminate all of these cases.
if ((gtGetStructHandleIfPresent(temp) != structType) || temp->OperIs(GT_FIELD))
{
forceNormalization = true;
}
}
#ifdef DEBUG
if (verbose)
{
printf("Calling impNormStructVal on:\n");
gtDispTree(temp);
}
#endif
temp = impNormStructVal(temp, structType, (unsigned)CHECK_SPILL_ALL, forceNormalization);
#ifdef DEBUG
if (verbose)
{
printf("resulting tree:\n");
gtDispTree(temp);
}
#endif
}
/* NOTE: we defer bashing the type for I_IMPL to fgMorphArgs */
argList = gtPrependNewCallArg(temp, argList);
}
if (sig != nullptr)
{
if (sig->retTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR && sig->retType != CORINFO_TYPE_VAR)
{
// Make sure that all valuetypes (including enums) that we push are loaded.
// This is to guarantee that if a GC is triggerred from the prestub of this methods,
// all valuetypes in the method signature are already loaded.
// We need to be able to find the size of the valuetypes, but we cannot
// do a class-load from within GC.
info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(sig->retTypeSigClass);
}
CORINFO_ARG_LIST_HANDLE sigArgs = sig->args;
GenTreeCall::Use* arg;
for (arg = argList, count = sig->numArgs; count > 0; arg = arg->GetNext(), count--)
{
PREFIX_ASSUME(arg != nullptr);
CORINFO_CLASS_HANDLE classHnd;
CorInfoType corType = strip(info.compCompHnd->getArgType(sig, sigArgs, &classHnd));
var_types jitSigType = JITtype2varType(corType);
if (!impCheckImplicitArgumentCoercion(jitSigType, arg->GetNode()->TypeGet()))
{
BADCODE("the call argument has a type that can't be implicitly converted to the signature type");
}
// insert implied casts (from float to double or double to float)
if ((jitSigType == TYP_DOUBLE) && (arg->GetNode()->TypeGet() == TYP_FLOAT))
{
arg->SetNode(gtNewCastNode(TYP_DOUBLE, arg->GetNode(), false, TYP_DOUBLE));
}
else if ((jitSigType == TYP_FLOAT) && (arg->GetNode()->TypeGet() == TYP_DOUBLE))
{
arg->SetNode(gtNewCastNode(TYP_FLOAT, arg->GetNode(), false, TYP_FLOAT));
}
// insert any widening or narrowing casts for backwards compatibility
arg->SetNode(impImplicitIorI4Cast(arg->GetNode(), jitSigType));
if (corType != CORINFO_TYPE_CLASS && corType != CORINFO_TYPE_BYREF && corType != CORINFO_TYPE_PTR &&
corType != CORINFO_TYPE_VAR)
{
CORINFO_CLASS_HANDLE argRealClass = info.compCompHnd->getArgClass(sig, sigArgs);
if (argRealClass != nullptr)
{
// Make sure that all valuetypes (including enums) that we push are loaded.
// This is to guarantee that if a GC is triggered from the prestub of this methods,
// all valuetypes in the method signature are already loaded.
// We need to be able to find the size of the valuetypes, but we cannot
// do a class-load from within GC.
info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(argRealClass);
}
}
const var_types nodeArgType = arg->GetNode()->TypeGet();
if (!varTypeIsStruct(jitSigType) && genTypeSize(nodeArgType) != genTypeSize(jitSigType))
{
assert(!varTypeIsStruct(nodeArgType));
// Some ABI require precise size information for call arguments less than target pointer size,
// for example arm64 OSX. Create a special node to keep this information until morph
// consumes it into `fgArgInfo`.
GenTree* putArgType = gtNewOperNode(GT_PUTARG_TYPE, jitSigType, arg->GetNode());
arg->SetNode(putArgType);
}
sigArgs = info.compCompHnd->getArgNext(sigArgs);
}
}
if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
{
// Prepend the prefixTree
// Simple in-place reversal to place treeList
// at the end of a reversed prefixTree
while (prefixArgs != nullptr)
{
GenTreeCall::Use* next = prefixArgs->GetNext();
prefixArgs->SetNext(argList);
argList = prefixArgs;
prefixArgs = next;
}
}
return argList;
}
static bool TypeIs(var_types type1, var_types type2)
{
return type1 == type2;
}
// Check if type1 matches any type from the list.
template <typename... T>
static bool TypeIs(var_types type1, var_types type2, T... rest)
{
return TypeIs(type1, type2) || TypeIs(type1, rest...);
}
//------------------------------------------------------------------------
// impCheckImplicitArgumentCoercion: check that the node's type is compatible with
// the signature's type using ECMA implicit argument coercion table.
//
// Arguments:
// sigType - the type in the call signature;
// nodeType - the node type.
//
// Return Value:
// true if they are compatible, false otherwise.
//
// Notes:
// - it is currently allowing byref->long passing, should be fixed in VM;
// - it can't check long -> native int case on 64-bit platforms,
// so the behavior is different depending on the target bitness.
//
bool Compiler::impCheckImplicitArgumentCoercion(var_types sigType, var_types nodeType) const
{
if (sigType == nodeType)
{
return true;
}
if (TypeIs(sigType, TYP_BOOL, TYP_UBYTE, TYP_BYTE, TYP_USHORT, TYP_SHORT, TYP_UINT, TYP_INT))
{
if (TypeIs(nodeType, TYP_BOOL, TYP_UBYTE, TYP_BYTE, TYP_USHORT, TYP_SHORT, TYP_UINT, TYP_INT, TYP_I_IMPL))
{
return true;
}
}
else if (TypeIs(sigType, TYP_ULONG, TYP_LONG))
{
if (TypeIs(nodeType, TYP_LONG))
{
return true;
}
}
else if (TypeIs(sigType, TYP_FLOAT, TYP_DOUBLE))
{
if (TypeIs(nodeType, TYP_FLOAT, TYP_DOUBLE))
{
return true;
}
}
else if (TypeIs(sigType, TYP_BYREF))
{
if (TypeIs(nodeType, TYP_I_IMPL))
{
return true;
}
// This condition tolerates such IL:
// ; V00 this ref this class-hnd
// ldarg.0
// call(byref)
if (TypeIs(nodeType, TYP_REF))
{
return true;
}
}
else if (varTypeIsStruct(sigType))
{
if (varTypeIsStruct(nodeType))
{
return true;
}
}
// This condition should not be under `else` because `TYP_I_IMPL`
// intersects with `TYP_LONG` or `TYP_INT`.
if (TypeIs(sigType, TYP_I_IMPL, TYP_U_IMPL))
{
// Note that it allows `ldc.i8 1; call(nint)` on 64-bit platforms,
// but we can't distinguish `nint` from `long` there.
if (TypeIs(nodeType, TYP_I_IMPL, TYP_U_IMPL, TYP_INT, TYP_UINT))
{
return true;
}
// It tolerates IL that ECMA does not allow but that is commonly used.
// Example:
// V02 loc1 struct <RTL_OSVERSIONINFOEX, 32>
// ldloca.s 0x2
// call(native int)
if (TypeIs(nodeType, TYP_BYREF))
{
return true;
}
}
return false;
}
/*****************************************************************************
*
* Pop the given number of values from the stack in reverse order (STDCALL/CDECL etc.)
* The first "skipReverseCount" items are not reversed.
*/
GenTreeCall::Use* Compiler::impPopReverseCallArgs(unsigned count, CORINFO_SIG_INFO* sig, unsigned skipReverseCount)
{
assert(skipReverseCount <= count);
GenTreeCall::Use* list = impPopCallArgs(count, sig);
// reverse the list
if (list == nullptr || skipReverseCount == count)
{
return list;
}
GenTreeCall::Use* ptr = nullptr; // Initialized to the first node that needs to be reversed
GenTreeCall::Use* lastSkipNode = nullptr; // Will be set to the last node that does not need to be reversed
if (skipReverseCount == 0)
{
ptr = list;
}
else
{
lastSkipNode = list;
// Get to the first node that needs to be reversed
for (unsigned i = 0; i < skipReverseCount - 1; i++)
{
lastSkipNode = lastSkipNode->GetNext();
}
PREFIX_ASSUME(lastSkipNode != nullptr);
ptr = lastSkipNode->GetNext();
}
GenTreeCall::Use* reversedList = nullptr;
do
{
GenTreeCall::Use* tmp = ptr->GetNext();
ptr->SetNext(reversedList);
reversedList = ptr;
ptr = tmp;
} while (ptr != nullptr);
if (skipReverseCount)
{
lastSkipNode->SetNext(reversedList);
return list;
}
else
{
return reversedList;
}
}
//------------------------------------------------------------------------
// impAssignStruct: Create a struct assignment
//
// Arguments:
// dest - the destination of the assignment
// src - the value to be assigned
// structHnd - handle representing the struct type
// curLevel - stack level for which a spill may be being done
// pAfterStmt - statement to insert any additional statements after
// ilOffset - il offset for new statements
// block - block to insert any additional statements in
//
// Return Value:
// The tree that should be appended to the statement list that represents the assignment.
//
// Notes:
// Temp assignments may be appended to impStmtList if spilling is necessary.
GenTree* Compiler::impAssignStruct(GenTree* dest,
GenTree* src,
CORINFO_CLASS_HANDLE structHnd,
unsigned curLevel,
Statement** pAfterStmt, /* = nullptr */
const DebugInfo& di, /* = DebugInfo() */
BasicBlock* block /* = nullptr */
)
{
assert(varTypeIsStruct(dest));
DebugInfo usedDI = di;
if (!usedDI.IsValid())
{
usedDI = impCurStmtDI;
}
while (dest->gtOper == GT_COMMA)
{
// Second thing is the struct.
assert(varTypeIsStruct(dest->AsOp()->gtOp2));
// Append all the op1 of GT_COMMA trees before we evaluate op2 of the GT_COMMA tree.
if (pAfterStmt)
{
Statement* newStmt = gtNewStmt(dest->AsOp()->gtOp1, usedDI);
fgInsertStmtAfter(block, *pAfterStmt, newStmt);
*pAfterStmt = newStmt;
}
else
{
impAppendTree(dest->AsOp()->gtOp1, curLevel, usedDI); // do the side effect
}
// set dest to the second thing
dest = dest->AsOp()->gtOp2;
}
assert(dest->gtOper == GT_LCL_VAR || dest->gtOper == GT_RETURN || dest->gtOper == GT_FIELD ||
dest->gtOper == GT_IND || dest->gtOper == GT_OBJ || dest->gtOper == GT_INDEX);
// Return a NOP if this is a self-assignment.
if (dest->OperGet() == GT_LCL_VAR && src->OperGet() == GT_LCL_VAR &&
src->AsLclVarCommon()->GetLclNum() == dest->AsLclVarCommon()->GetLclNum())
{
return gtNewNothingNode();
}
// TODO-1stClassStructs: Avoid creating an address if it is not needed,
// or re-creating a Blk node if it is.
GenTree* destAddr;
if (dest->gtOper == GT_IND || dest->OperIsBlk())
{
destAddr = dest->AsOp()->gtOp1;
}
else
{
destAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest);
}
return (impAssignStructPtr(destAddr, src, structHnd, curLevel, pAfterStmt, usedDI, block));
}
//------------------------------------------------------------------------
// impAssignStructPtr: Assign (copy) the structure from 'src' to 'destAddr'.
//
// Arguments:
// destAddr - address of the destination of the assignment
// src - source of the assignment
// structHnd - handle representing the struct type
// curLevel - stack level for which a spill may be being done
// pAfterStmt - statement to insert any additional statements after
// di - debug info for new statements
// block - block to insert any additional statements in
//
// Return Value:
// The tree that should be appended to the statement list that represents the assignment.
//
// Notes:
// Temp assignments may be appended to impStmtList if spilling is necessary.
GenTree* Compiler::impAssignStructPtr(GenTree* destAddr,
GenTree* src,
CORINFO_CLASS_HANDLE structHnd,
unsigned curLevel,
Statement** pAfterStmt, /* = NULL */
const DebugInfo& di, /* = DebugInfo() */
BasicBlock* block /* = NULL */
)
{
GenTree* dest = nullptr;
GenTreeFlags destFlags = GTF_EMPTY;
DebugInfo usedDI = di;
if (!usedDI.IsValid())
{
usedDI = impCurStmtDI;
}
#ifdef DEBUG
#ifdef FEATURE_HW_INTRINSICS
if (src->OperIs(GT_HWINTRINSIC))
{
const GenTreeHWIntrinsic* intrinsic = src->AsHWIntrinsic();
if (HWIntrinsicInfo::IsMultiReg(intrinsic->GetHWIntrinsicId()))
{
assert(src->TypeGet() == TYP_STRUCT);
}
else
{
assert(varTypeIsSIMD(src));
}
}
else
#endif // FEATURE_HW_INTRINSICS
{
assert(src->OperIs(GT_LCL_VAR, GT_LCL_FLD, GT_FIELD, GT_IND, GT_OBJ, GT_CALL, GT_MKREFANY, GT_RET_EXPR,
GT_COMMA) ||
((src->TypeGet() != TYP_STRUCT) && src->OperIsSIMD()));
}
#endif // DEBUG
var_types asgType = src->TypeGet();
if (src->gtOper == GT_CALL)
{
GenTreeCall* srcCall = src->AsCall();
if (srcCall->TreatAsHasRetBufArg(this))
{
// Case of call returning a struct via hidden retbuf arg
CLANG_FORMAT_COMMENT_ANCHOR;
#if !defined(TARGET_ARM)
// Unmanaged instance methods on Windows or Unix X86 need the retbuf arg after the first (this) parameter
if ((TargetOS::IsWindows || compUnixX86Abi()) && srcCall->IsUnmanaged())
{
if (callConvIsInstanceMethodCallConv(srcCall->GetUnmanagedCallConv()))
{
#ifdef TARGET_X86
// The argument list has already been reversed.
// Insert the return buffer as the second-to-last node
// so it will be pushed on to the stack after the user args but before the native this arg
// as required by the native ABI.
GenTreeCall::Use* lastArg = srcCall->gtCallArgs;
if (lastArg == nullptr)
{
srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, srcCall->gtCallArgs);
}
else if (srcCall->GetUnmanagedCallConv() == CorInfoCallConvExtension::Thiscall)
{
// For thiscall, the "this" parameter is not included in the argument list reversal,
// so we need to put the return buffer as the last parameter.
for (; lastArg->GetNext() != nullptr; lastArg = lastArg->GetNext())
;
gtInsertNewCallArgAfter(destAddr, lastArg);
}
else if (lastArg->GetNext() == nullptr)
{
srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, lastArg);
}
else
{
assert(lastArg != nullptr && lastArg->GetNext() != nullptr);
GenTreeCall::Use* secondLastArg = lastArg;
lastArg = lastArg->GetNext();
for (; lastArg->GetNext() != nullptr; secondLastArg = lastArg, lastArg = lastArg->GetNext())
;
assert(secondLastArg->GetNext() != nullptr);
gtInsertNewCallArgAfter(destAddr, secondLastArg);
}
#else
GenTreeCall::Use* thisArg = gtInsertNewCallArgAfter(destAddr, srcCall->gtCallArgs);
#endif
}
else
{
#ifdef TARGET_X86
// The argument list has already been reversed.
// Insert the return buffer as the last node so it will be pushed on to the stack last
// as required by the native ABI.
GenTreeCall::Use* lastArg = srcCall->gtCallArgs;
if (lastArg == nullptr)
{
srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, srcCall->gtCallArgs);
}
else
{
for (; lastArg->GetNext() != nullptr; lastArg = lastArg->GetNext())
;
gtInsertNewCallArgAfter(destAddr, lastArg);
}
#else
// insert the return value buffer into the argument list as first byref parameter
srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, srcCall->gtCallArgs);
#endif
}
}
else
#endif // !defined(TARGET_ARM)
{
// insert the return value buffer into the argument list as first byref parameter
srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, srcCall->gtCallArgs);
}
// now returns void, not a struct
src->gtType = TYP_VOID;
// return the morphed call node
return src;
}
else
{
// Case of call returning a struct in one or more registers.
var_types returnType = (var_types)srcCall->gtReturnType;
// First we try to change this to "LclVar/LclFld = call"
//
if ((destAddr->gtOper == GT_ADDR) && (destAddr->AsOp()->gtOp1->gtOper == GT_LCL_VAR))
{
// If it is a multi-reg struct return, don't change the oper to GT_LCL_FLD.
// That is, the IR will be of the form lclVar = call for multi-reg return
//
GenTreeLclVar* lcl = destAddr->AsOp()->gtOp1->AsLclVar();
unsigned lclNum = lcl->GetLclNum();
LclVarDsc* varDsc = lvaGetDesc(lclNum);
if (src->AsCall()->HasMultiRegRetVal())
{
// Mark the struct LclVar as used in a MultiReg return context
// which currently makes it non promotable.
// TODO-1stClassStructs: Eliminate this pessimization when we can more generally
// handle multireg returns.
lcl->gtFlags |= GTF_DONT_CSE;
varDsc->lvIsMultiRegRet = true;
}
dest = lcl;
#if defined(TARGET_ARM)
// TODO-Cleanup: This should have been taken care of in the above HasMultiRegRetVal() case,
// but that method has not been updadted to include ARM.
impMarkLclDstNotPromotable(lclNum, src, structHnd);
lcl->gtFlags |= GTF_DONT_CSE;
#elif defined(UNIX_AMD64_ABI)
// Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
assert(!src->AsCall()->IsVarargs() && "varargs not allowed for System V OSs.");
// Make the struct non promotable. The eightbytes could contain multiple fields.
// TODO-1stClassStructs: Eliminate this pessimization when we can more generally
// handle multireg returns.
// TODO-Cleanup: Why is this needed here? This seems that it will set this even for
// non-multireg returns.
lcl->gtFlags |= GTF_DONT_CSE;
varDsc->lvIsMultiRegRet = true;
#endif
}
else // we don't have a GT_ADDR of a GT_LCL_VAR
{
// !!! The destination could be on stack. !!!
// This flag will let us choose the correct write barrier.
asgType = returnType;
destFlags = GTF_IND_TGTANYWHERE;
}
}
}
else if (src->gtOper == GT_RET_EXPR)
{
GenTreeCall* call = src->AsRetExpr()->gtInlineCandidate->AsCall();
noway_assert(call->gtOper == GT_CALL);
if (call->HasRetBufArg())
{
// insert the return value buffer into the argument list as first byref parameter
call->gtCallArgs = gtPrependNewCallArg(destAddr, call->gtCallArgs);
// now returns void, not a struct
src->gtType = TYP_VOID;
call->gtType = TYP_VOID;
// We already have appended the write to 'dest' GT_CALL's args
// So now we just return an empty node (pruning the GT_RET_EXPR)
return src;
}
else
{
// Case of inline method returning a struct in one or more registers.
// We won't need a return buffer
asgType = src->gtType;
if ((destAddr->gtOper != GT_ADDR) || (destAddr->AsOp()->gtOp1->gtOper != GT_LCL_VAR))
{
// !!! The destination could be on stack. !!!
// This flag will let us choose the correct write barrier.
destFlags = GTF_IND_TGTANYWHERE;
}
}
}
else if (src->OperIsBlk())
{
asgType = impNormStructType(structHnd);
if (src->gtOper == GT_OBJ)
{
assert(src->AsObj()->GetLayout()->GetClassHandle() == structHnd);
}
}
else if (src->gtOper == GT_INDEX)
{
asgType = impNormStructType(structHnd);
assert(src->AsIndex()->gtStructElemClass == structHnd);
}
else if (src->gtOper == GT_MKREFANY)
{
// Since we are assigning the result of a GT_MKREFANY,
// "destAddr" must point to a refany.
GenTree* destAddrClone;
destAddr =
impCloneExpr(destAddr, &destAddrClone, structHnd, curLevel, pAfterStmt DEBUGARG("MKREFANY assignment"));
assert(OFFSETOF__CORINFO_TypedReference__dataPtr == 0);
assert(destAddr->gtType == TYP_I_IMPL || destAddr->gtType == TYP_BYREF);
fgAddFieldSeqForZeroOffset(destAddr, GetFieldSeqStore()->CreateSingleton(GetRefanyDataField()));
GenTree* ptrSlot = gtNewOperNode(GT_IND, TYP_I_IMPL, destAddr);
GenTreeIntCon* typeFieldOffset = gtNewIconNode(OFFSETOF__CORINFO_TypedReference__type, TYP_I_IMPL);
typeFieldOffset->gtFieldSeq = GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField());
GenTree* typeSlot =
gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewOperNode(GT_ADD, destAddr->gtType, destAddrClone, typeFieldOffset));
// append the assign of the pointer value
GenTree* asg = gtNewAssignNode(ptrSlot, src->AsOp()->gtOp1);
if (pAfterStmt)
{
Statement* newStmt = gtNewStmt(asg, usedDI);
fgInsertStmtAfter(block, *pAfterStmt, newStmt);
*pAfterStmt = newStmt;
}
else
{
impAppendTree(asg, curLevel, usedDI);
}
// return the assign of the type value, to be appended
return gtNewAssignNode(typeSlot, src->AsOp()->gtOp2);
}
else if (src->gtOper == GT_COMMA)
{
// The second thing is the struct or its address.
assert(varTypeIsStruct(src->AsOp()->gtOp2) || src->AsOp()->gtOp2->gtType == TYP_BYREF);
if (pAfterStmt)
{
// Insert op1 after '*pAfterStmt'
Statement* newStmt = gtNewStmt(src->AsOp()->gtOp1, usedDI);
fgInsertStmtAfter(block, *pAfterStmt, newStmt);
*pAfterStmt = newStmt;
}
else if (impLastStmt != nullptr)
{
// Do the side-effect as a separate statement.
impAppendTree(src->AsOp()->gtOp1, curLevel, usedDI);
}
else
{
// In this case we have neither been given a statement to insert after, nor are we
// in the importer where we can append the side effect.
// Instead, we're going to sink the assignment below the COMMA.
src->AsOp()->gtOp2 =
impAssignStructPtr(destAddr, src->AsOp()->gtOp2, structHnd, curLevel, pAfterStmt, usedDI, block);
return src;
}
// Evaluate the second thing using recursion.
return impAssignStructPtr(destAddr, src->AsOp()->gtOp2, structHnd, curLevel, pAfterStmt, usedDI, block);
}
else if (src->IsLocal())
{
asgType = src->TypeGet();
}
else if (asgType == TYP_STRUCT)
{
// It should already have the appropriate type.
assert(asgType == impNormStructType(structHnd));
}
if ((dest == nullptr) && (destAddr->OperGet() == GT_ADDR))
{
GenTree* destNode = destAddr->gtGetOp1();
// If the actual destination is a local, a GT_INDEX or a block node, or is a node that
// will be morphed, don't insert an OBJ(ADDR) if it already has the right type.
if (destNode->OperIs(GT_LCL_VAR, GT_INDEX) || destNode->OperIsBlk())
{
var_types destType = destNode->TypeGet();
// If one or both types are TYP_STRUCT (one may not yet be normalized), they are compatible
// iff their handles are the same.
// Otherwise, they are compatible if their types are the same.
bool typesAreCompatible =
((destType == TYP_STRUCT) || (asgType == TYP_STRUCT))
? ((gtGetStructHandleIfPresent(destNode) == structHnd) && varTypeIsStruct(asgType))
: (destType == asgType);
if (typesAreCompatible)
{
dest = destNode;
if (destType != TYP_STRUCT)
{
// Use a normalized type if available. We know from above that they're equivalent.
asgType = destType;
}
}
}
}
if (dest == nullptr)
{
if (asgType == TYP_STRUCT)
{
dest = gtNewObjNode(structHnd, destAddr);
gtSetObjGcInfo(dest->AsObj());
// Although an obj as a call argument was always assumed to be a globRef
// (which is itself overly conservative), that is not true of the operands
// of a block assignment.
dest->gtFlags &= ~GTF_GLOB_REF;
dest->gtFlags |= (destAddr->gtFlags & GTF_GLOB_REF);
}
else
{
dest = gtNewOperNode(GT_IND, asgType, destAddr);
}
}
if (dest->OperIs(GT_LCL_VAR) &&
(src->IsMultiRegNode() ||
(src->OperIs(GT_RET_EXPR) && src->AsRetExpr()->gtInlineCandidate->AsCall()->HasMultiRegRetVal())))
{
if (lvaEnregMultiRegVars && varTypeIsStruct(dest))
{
dest->AsLclVar()->SetMultiReg();
}
if (src->OperIs(GT_CALL))
{
lvaGetDesc(dest->AsLclVar())->lvIsMultiRegRet = true;
}
}
dest->gtFlags |= destFlags;
destFlags = dest->gtFlags;
// return an assignment node, to be appended
GenTree* asgNode = gtNewAssignNode(dest, src);
gtBlockOpInit(asgNode, dest, src, false);
// TODO-1stClassStructs: Clean up the settings of GTF_DONT_CSE on the lhs
// of assignments.
if ((destFlags & GTF_DONT_CSE) == 0)
{
dest->gtFlags &= ~(GTF_DONT_CSE);
}
return asgNode;
}
/*****************************************************************************
Given a struct value, and the class handle for that structure, return
the expression for the address for that structure value.
willDeref - does the caller guarantee to dereference the pointer.
*/
GenTree* Compiler::impGetStructAddr(GenTree* structVal,
CORINFO_CLASS_HANDLE structHnd,
unsigned curLevel,
bool willDeref)
{
assert(varTypeIsStruct(structVal) || eeIsValueClass(structHnd));
var_types type = structVal->TypeGet();
genTreeOps oper = structVal->gtOper;
if (oper == GT_OBJ && willDeref)
{
assert(structVal->AsObj()->GetLayout()->GetClassHandle() == structHnd);
return (structVal->AsObj()->Addr());
}
else if (oper == GT_CALL || oper == GT_RET_EXPR || oper == GT_OBJ || oper == GT_MKREFANY ||
structVal->OperIsSimdOrHWintrinsic())
{
unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
// The 'return value' is now the temp itself
type = genActualType(lvaTable[tmpNum].TypeGet());
GenTree* temp = gtNewLclvNode(tmpNum, type);
temp = gtNewOperNode(GT_ADDR, TYP_BYREF, temp);
return temp;
}
else if (oper == GT_COMMA)
{
assert(structVal->AsOp()->gtOp2->gtType == type); // Second thing is the struct
Statement* oldLastStmt = impLastStmt;
structVal->AsOp()->gtOp2 = impGetStructAddr(structVal->AsOp()->gtOp2, structHnd, curLevel, willDeref);
structVal->gtType = TYP_BYREF;
if (oldLastStmt != impLastStmt)
{
// Some temp assignment statement was placed on the statement list
// for Op2, but that would be out of order with op1, so we need to
// spill op1 onto the statement list after whatever was last
// before we recursed on Op2 (i.e. before whatever Op2 appended).
Statement* beforeStmt;
if (oldLastStmt == nullptr)
{
// The op1 stmt should be the first in the list.
beforeStmt = impStmtList;
}
else
{
// Insert after the oldLastStmt before the first inserted for op2.
beforeStmt = oldLastStmt->GetNextStmt();
}
impInsertTreeBefore(structVal->AsOp()->gtOp1, impCurStmtDI, beforeStmt);
structVal->AsOp()->gtOp1 = gtNewNothingNode();
}
return (structVal);
}
return (gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
}
//------------------------------------------------------------------------
// impNormStructType: Normalize the type of a (known to be) struct class handle.
//
// Arguments:
// structHnd - The class handle for the struct type of interest.
// pSimdBaseJitType - (optional, default nullptr) - if non-null, and the struct is a SIMD
// type, set to the SIMD base JIT type
//
// Return Value:
// The JIT type for the struct (e.g. TYP_STRUCT, or TYP_SIMD*).
// It may also modify the compFloatingPointUsed flag if the type is a SIMD type.
//
// Notes:
// Normalizing the type involves examining the struct type to determine if it should
// be modified to one that is handled specially by the JIT, possibly being a candidate
// for full enregistration, e.g. TYP_SIMD16. If the size of the struct is already known
// call structSizeMightRepresentSIMDType to determine if this api needs to be called.
var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd, CorInfoType* pSimdBaseJitType)
{
assert(structHnd != NO_CLASS_HANDLE);
var_types structType = TYP_STRUCT;
#ifdef FEATURE_SIMD
const DWORD structFlags = info.compCompHnd->getClassAttribs(structHnd);
// Don't bother if the struct contains GC references of byrefs, it can't be a SIMD type.
if ((structFlags & (CORINFO_FLG_CONTAINS_GC_PTR | CORINFO_FLG_BYREF_LIKE)) == 0)
{
unsigned originalSize = info.compCompHnd->getClassSize(structHnd);
if (structSizeMightRepresentSIMDType(originalSize))
{
unsigned int sizeBytes;
CorInfoType simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(structHnd, &sizeBytes);
if (simdBaseJitType != CORINFO_TYPE_UNDEF)
{
assert(sizeBytes == originalSize);
structType = getSIMDTypeForSize(sizeBytes);
if (pSimdBaseJitType != nullptr)
{
*pSimdBaseJitType = simdBaseJitType;
}
// Also indicate that we use floating point registers.
compFloatingPointUsed = true;
}
}
}
#endif // FEATURE_SIMD
return structType;
}
//------------------------------------------------------------------------
// Compiler::impNormStructVal: Normalize a struct value
//
// Arguments:
// structVal - the node we are going to normalize
// structHnd - the class handle for the node
// curLevel - the current stack level
// forceNormalization - Force the creation of an OBJ node (default is false).
//
// Notes:
// Given struct value 'structVal', make sure it is 'canonical', that is
// it is either:
// - a known struct type (non-TYP_STRUCT, e.g. TYP_SIMD8)
// - an OBJ or a MKREFANY node, or
// - a node (e.g. GT_INDEX) that will be morphed.
// If the node is a CALL or RET_EXPR, a copy will be made to a new temp.
//
GenTree* Compiler::impNormStructVal(GenTree* structVal,
CORINFO_CLASS_HANDLE structHnd,
unsigned curLevel,
bool forceNormalization /*=false*/)
{
assert(forceNormalization || varTypeIsStruct(structVal));
assert(structHnd != NO_CLASS_HANDLE);
var_types structType = structVal->TypeGet();
bool makeTemp = false;
if (structType == TYP_STRUCT)
{
structType = impNormStructType(structHnd);
}
bool alreadyNormalized = false;
GenTreeLclVarCommon* structLcl = nullptr;
genTreeOps oper = structVal->OperGet();
switch (oper)
{
// GT_RETURN and GT_MKREFANY don't capture the handle.
case GT_RETURN:
break;
case GT_MKREFANY:
alreadyNormalized = true;
break;
case GT_CALL:
structVal->AsCall()->gtRetClsHnd = structHnd;
makeTemp = true;
break;
case GT_RET_EXPR:
structVal->AsRetExpr()->gtRetClsHnd = structHnd;
makeTemp = true;
break;
case GT_ARGPLACE:
structVal->AsArgPlace()->gtArgPlaceClsHnd = structHnd;
break;
case GT_INDEX:
// This will be transformed to an OBJ later.
alreadyNormalized = true;
structVal->AsIndex()->gtStructElemClass = structHnd;
structVal->AsIndex()->gtIndElemSize = info.compCompHnd->getClassSize(structHnd);
break;
case GT_FIELD:
// Wrap it in a GT_OBJ, if needed.
structVal->gtType = structType;
if ((structType == TYP_STRUCT) || forceNormalization)
{
structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
}
break;
case GT_LCL_VAR:
case GT_LCL_FLD:
structLcl = structVal->AsLclVarCommon();
// Wrap it in a GT_OBJ.
structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
FALLTHROUGH;
case GT_OBJ:
case GT_BLK:
case GT_ASG:
// These should already have the appropriate type.
assert(structVal->gtType == structType);
alreadyNormalized = true;
break;
case GT_IND:
assert(structVal->gtType == structType);
structVal = gtNewObjNode(structHnd, structVal->gtGetOp1());
alreadyNormalized = true;
break;
#ifdef FEATURE_SIMD
case GT_SIMD:
assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType));
break;
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
assert(structVal->gtType == structType);
assert(varTypeIsSIMD(structVal) ||
HWIntrinsicInfo::IsMultiReg(structVal->AsHWIntrinsic()->GetHWIntrinsicId()));
break;
#endif
case GT_COMMA:
{
// The second thing could either be a block node or a GT_FIELD or a GT_SIMD or a GT_COMMA node.
GenTree* blockNode = structVal->AsOp()->gtOp2;
assert(blockNode->gtType == structType);
// Is this GT_COMMA(op1, GT_COMMA())?
GenTree* parent = structVal;
if (blockNode->OperGet() == GT_COMMA)
{
// Find the last node in the comma chain.
do
{
assert(blockNode->gtType == structType);
parent = blockNode;
blockNode = blockNode->AsOp()->gtOp2;
} while (blockNode->OperGet() == GT_COMMA);
}
if (blockNode->OperGet() == GT_FIELD)
{
// If we have a GT_FIELD then wrap it in a GT_OBJ.
blockNode = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, blockNode));
}
#ifdef FEATURE_SIMD
if (blockNode->OperIsSimdOrHWintrinsic())
{
parent->AsOp()->gtOp2 = impNormStructVal(blockNode, structHnd, curLevel, forceNormalization);
alreadyNormalized = true;
}
else
#endif
{
noway_assert(blockNode->OperIsBlk());
// Sink the GT_COMMA below the blockNode addr.
// That is GT_COMMA(op1, op2=blockNode) is tranformed into
// blockNode(GT_COMMA(TYP_BYREF, op1, op2's op1)).
//
// In case of a chained GT_COMMA case, we sink the last
// GT_COMMA below the blockNode addr.
GenTree* blockNodeAddr = blockNode->AsOp()->gtOp1;
assert(blockNodeAddr->gtType == TYP_BYREF);
GenTree* commaNode = parent;
commaNode->gtType = TYP_BYREF;
commaNode->AsOp()->gtOp2 = blockNodeAddr;
blockNode->AsOp()->gtOp1 = commaNode;
if (parent == structVal)
{
structVal = blockNode;
}
alreadyNormalized = true;
}
}
break;
default:
noway_assert(!"Unexpected node in impNormStructVal()");
break;
}
structVal->gtType = structType;
if (!alreadyNormalized || forceNormalization)
{
if (makeTemp)
{
unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
// The structVal is now the temp itself
structLcl = gtNewLclvNode(tmpNum, structType)->AsLclVarCommon();
structVal = structLcl;
}
if ((forceNormalization || (structType == TYP_STRUCT)) && !structVal->OperIsBlk())
{
// Wrap it in a GT_OBJ
structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
}
}
if (structLcl != nullptr)
{
// A OBJ on a ADDR(LCL_VAR) can never raise an exception
// so we don't set GTF_EXCEPT here.
if (!lvaIsImplicitByRefLocal(structLcl->GetLclNum()))
{
structVal->gtFlags &= ~GTF_GLOB_REF;
}
}
else if (structVal->OperIsBlk())
{
// In general a OBJ is an indirection and could raise an exception.
structVal->gtFlags |= GTF_EXCEPT;
}
return structVal;
}
/******************************************************************************/
// Given a type token, generate code that will evaluate to the correct
// handle representation of that token (type handle, field handle, or method handle)
//
// For most cases, the handle is determined at compile-time, and the code
// generated is simply an embedded handle.
//
// Run-time lookup is required if the enclosing method is shared between instantiations
// and the token refers to formal type parameters whose instantiation is not known
// at compile-time.
//
GenTree* Compiler::impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken,
bool* pRuntimeLookup /* = NULL */,
bool mustRestoreHandle /* = false */,
bool importParent /* = false */)
{
assert(!fgGlobalMorph);
CORINFO_GENERICHANDLE_RESULT embedInfo;
info.compCompHnd->embedGenericHandle(pResolvedToken, importParent, &embedInfo);
if (pRuntimeLookup)
{
*pRuntimeLookup = embedInfo.lookup.lookupKind.needsRuntimeLookup;
}
if (mustRestoreHandle && !embedInfo.lookup.lookupKind.needsRuntimeLookup)
{
switch (embedInfo.handleType)
{
case CORINFO_HANDLETYPE_CLASS:
info.compCompHnd->classMustBeLoadedBeforeCodeIsRun((CORINFO_CLASS_HANDLE)embedInfo.compileTimeHandle);
break;
case CORINFO_HANDLETYPE_METHOD:
info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun((CORINFO_METHOD_HANDLE)embedInfo.compileTimeHandle);
break;
case CORINFO_HANDLETYPE_FIELD:
info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
info.compCompHnd->getFieldClass((CORINFO_FIELD_HANDLE)embedInfo.compileTimeHandle));
break;
default:
break;
}
}
// Generate the full lookup tree. May be null if we're abandoning an inline attempt.
GenTree* result = impLookupToTree(pResolvedToken, &embedInfo.lookup, gtTokenToIconFlags(pResolvedToken->token),
embedInfo.compileTimeHandle);
// If we have a result and it requires runtime lookup, wrap it in a runtime lookup node.
if ((result != nullptr) && embedInfo.lookup.lookupKind.needsRuntimeLookup)
{
result = gtNewRuntimeLookup(embedInfo.compileTimeHandle, embedInfo.handleType, result);
}
return result;
}
GenTree* Compiler::impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_LOOKUP* pLookup,
GenTreeFlags handleFlags,
void* compileTimeHandle)
{
if (!pLookup->lookupKind.needsRuntimeLookup)
{
// No runtime lookup is required.
// Access is direct or memory-indirect (of a fixed address) reference
CORINFO_GENERIC_HANDLE handle = nullptr;
void* pIndirection = nullptr;
assert(pLookup->constLookup.accessType != IAT_PPVALUE && pLookup->constLookup.accessType != IAT_RELPVALUE);
if (pLookup->constLookup.accessType == IAT_VALUE)
{
handle = pLookup->constLookup.handle;
}
else if (pLookup->constLookup.accessType == IAT_PVALUE)
{
pIndirection = pLookup->constLookup.addr;
}
GenTree* addr = gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle);
#ifdef DEBUG
size_t handleToTrack;
if (handleFlags == GTF_ICON_TOKEN_HDL)
{
handleToTrack = 0;
}
else
{
handleToTrack = (size_t)compileTimeHandle;
}
if (handle != nullptr)
{
addr->AsIntCon()->gtTargetHandle = handleToTrack;
}
else
{
addr->gtGetOp1()->AsIntCon()->gtTargetHandle = handleToTrack;
}
#endif
return addr;
}
if (pLookup->lookupKind.runtimeLookupKind == CORINFO_LOOKUP_NOT_SUPPORTED)
{
// Runtime does not support inlining of all shapes of runtime lookups
// Inlining has to be aborted in such a case
assert(compIsForInlining());
compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
return nullptr;
}
// Need to use dictionary-based access which depends on the typeContext
// which is only available at runtime, not at compile-time.
return impRuntimeLookupToTree(pResolvedToken, pLookup, compileTimeHandle);
}
#ifdef FEATURE_READYTORUN
GenTree* Compiler::impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup,
GenTreeFlags handleFlags,
void* compileTimeHandle)
{
CORINFO_GENERIC_HANDLE handle = nullptr;
void* pIndirection = nullptr;
assert(pLookup->accessType != IAT_PPVALUE && pLookup->accessType != IAT_RELPVALUE);
if (pLookup->accessType == IAT_VALUE)
{
handle = pLookup->handle;
}
else if (pLookup->accessType == IAT_PVALUE)
{
pIndirection = pLookup->addr;
}
GenTree* addr = gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle);
#ifdef DEBUG
assert((handleFlags == GTF_ICON_CLASS_HDL) || (handleFlags == GTF_ICON_METHOD_HDL));
if (handle != nullptr)
{
addr->AsIntCon()->gtTargetHandle = (size_t)compileTimeHandle;
}
else
{
addr->gtGetOp1()->AsIntCon()->gtTargetHandle = (size_t)compileTimeHandle;
}
#endif // DEBUG
return addr;
}
//------------------------------------------------------------------------
// impIsCastHelperEligibleForClassProbe: Checks whether a tree is a cast helper eligible to
// to be profiled and then optimized with PGO data
//
// Arguments:
// tree - the tree object to check
//
// Returns:
// true if the tree is a cast helper eligible to be profiled
//
bool Compiler::impIsCastHelperEligibleForClassProbe(GenTree* tree)
{
if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR) || (JitConfig.JitProfileCasts() != 1))
{
return false;
}
if (tree->IsCall() && tree->AsCall()->gtCallType == CT_HELPER)
{
const CorInfoHelpFunc helper = eeGetHelperNum(tree->AsCall()->gtCallMethHnd);
if ((helper == CORINFO_HELP_ISINSTANCEOFINTERFACE) || (helper == CORINFO_HELP_ISINSTANCEOFCLASS) ||
(helper == CORINFO_HELP_CHKCASTCLASS) || (helper == CORINFO_HELP_CHKCASTINTERFACE))
{
return true;
}
}
return false;
}
//------------------------------------------------------------------------
// impIsCastHelperMayHaveProfileData: Checks whether a tree is a cast helper that might
// have profile data
//
// Arguments:
// tree - the tree object to check
//
// Returns:
// true if the tree is a cast helper with potential profile data
//
bool Compiler::impIsCastHelperMayHaveProfileData(CorInfoHelpFunc helper)
{
if (JitConfig.JitConsumeProfileForCasts() == 0)
{
return false;
}
if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBOPT))
{
return false;
}
if ((helper == CORINFO_HELP_ISINSTANCEOFINTERFACE) || (helper == CORINFO_HELP_ISINSTANCEOFCLASS) ||
(helper == CORINFO_HELP_CHKCASTCLASS) || (helper == CORINFO_HELP_CHKCASTINTERFACE))
{
return true;
}
return false;
}
GenTreeCall* Compiler::impReadyToRunHelperToTree(
CORINFO_RESOLVED_TOKEN* pResolvedToken,
CorInfoHelpFunc helper,
var_types type,
GenTreeCall::Use* args /* = nullptr */,
CORINFO_LOOKUP_KIND* pGenericLookupKind /* =NULL. Only used with generics */)
{
CORINFO_CONST_LOOKUP lookup;
if (!info.compCompHnd->getReadyToRunHelper(pResolvedToken, pGenericLookupKind, helper, &lookup))
{
return nullptr;
}
GenTreeCall* op1 = gtNewHelperCallNode(helper, type, args);
op1->setEntryPoint(lookup);
return op1;
}
#endif
GenTree* Compiler::impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
{
GenTree* op1 = nullptr;
switch (pCallInfo->kind)
{
case CORINFO_CALL:
op1 = new (this, GT_FTN_ADDR) GenTreeFptrVal(TYP_I_IMPL, pCallInfo->hMethod);
#ifdef FEATURE_READYTORUN
if (opts.IsReadyToRun())
{
op1->AsFptrVal()->gtEntryPoint = pCallInfo->codePointerLookup.constLookup;
}
#endif
break;
case CORINFO_CALL_CODE_POINTER:
op1 = impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_FTN_ADDR, pCallInfo->hMethod);
break;
default:
noway_assert(!"unknown call kind");
break;
}
return op1;
}
//------------------------------------------------------------------------
// getRuntimeContextTree: find pointer to context for runtime lookup.
//
// Arguments:
// kind - lookup kind.
//
// Return Value:
// Return GenTree pointer to generic shared context.
//
// Notes:
// Reports about generic context using.
GenTree* Compiler::getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind)
{
GenTree* ctxTree = nullptr;
// Collectible types requires that for shared generic code, if we use the generic context parameter
// that we report it. (This is a conservative approach, we could detect some cases particularly when the
// context parameter is this that we don't need the eager reporting logic.)
lvaGenericsContextInUse = true;
Compiler* pRoot = impInlineRoot();
if (kind == CORINFO_LOOKUP_THISOBJ)
{
// this Object
ctxTree = gtNewLclvNode(pRoot->info.compThisArg, TYP_REF);
ctxTree->gtFlags |= GTF_VAR_CONTEXT;
// context is the method table pointer of the this object
ctxTree = gtNewMethodTableLookup(ctxTree);
}
else
{
assert(kind == CORINFO_LOOKUP_METHODPARAM || kind == CORINFO_LOOKUP_CLASSPARAM);
// Exact method descriptor as passed in
ctxTree = gtNewLclvNode(pRoot->info.compTypeCtxtArg, TYP_I_IMPL);
ctxTree->gtFlags |= GTF_VAR_CONTEXT;
}
return ctxTree;
}
/*****************************************************************************/
/* Import a dictionary lookup to access a handle in code shared between
generic instantiations.
The lookup depends on the typeContext which is only available at
runtime, and not at compile-time.
pLookup->token1 and pLookup->token2 specify the handle that is needed.
The cases are:
1. pLookup->indirections == CORINFO_USEHELPER : Call a helper passing it the
instantiation-specific handle, and the tokens to lookup the handle.
2. pLookup->indirections != CORINFO_USEHELPER :
2a. pLookup->testForNull == false : Dereference the instantiation-specific handle
to get the handle.
2b. pLookup->testForNull == true : Dereference the instantiation-specific handle.
If it is non-NULL, it is the handle required. Else, call a helper
to lookup the handle.
*/
GenTree* Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_LOOKUP* pLookup,
void* compileTimeHandle)
{
GenTree* ctxTree = getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind);
CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup;
// It's available only via the run-time helper function
if (pRuntimeLookup->indirections == CORINFO_USEHELPER)
{
#ifdef FEATURE_READYTORUN
if (opts.IsReadyToRun())
{
return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
gtNewCallArgs(ctxTree), &pLookup->lookupKind);
}
#endif
return gtNewRuntimeLookupHelperCallNode(pRuntimeLookup, ctxTree, compileTimeHandle);
}
// Slot pointer
GenTree* slotPtrTree = ctxTree;
if (pRuntimeLookup->testForNull)
{
slotPtrTree = impCloneExpr(ctxTree, &ctxTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("impRuntimeLookup slot"));
}
GenTree* indOffTree = nullptr;
GenTree* lastIndOfTree = nullptr;
// Applied repeated indirections
for (WORD i = 0; i < pRuntimeLookup->indirections; i++)
{
if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset))
{
indOffTree = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("impRuntimeLookup indirectOffset"));
}
// The last indirection could be subject to a size check (dynamic dictionary expansion)
bool isLastIndirectionWithSizeCheck =
((i == pRuntimeLookup->indirections - 1) && (pRuntimeLookup->sizeOffset != CORINFO_NO_SIZE_CHECK));
if (i != 0)
{
slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
if (!isLastIndirectionWithSizeCheck)
{
slotPtrTree->gtFlags |= GTF_IND_INVARIANT;
}
}
if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset))
{
slotPtrTree = gtNewOperNode(GT_ADD, TYP_I_IMPL, indOffTree, slotPtrTree);
}
if (pRuntimeLookup->offsets[i] != 0)
{
if (isLastIndirectionWithSizeCheck)
{
lastIndOfTree = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("impRuntimeLookup indirectOffset"));
}
slotPtrTree =
gtNewOperNode(GT_ADD, TYP_I_IMPL, slotPtrTree, gtNewIconNode(pRuntimeLookup->offsets[i], TYP_I_IMPL));
}
}
// No null test required
if (!pRuntimeLookup->testForNull)
{
if (pRuntimeLookup->indirections == 0)
{
return slotPtrTree;
}
slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
if (!pRuntimeLookup->testForFixup)
{
return slotPtrTree;
}
impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark0"));
unsigned slotLclNum = lvaGrabTemp(true DEBUGARG("impRuntimeLookup test"));
impAssignTempGen(slotLclNum, slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr, impCurStmtDI);
GenTree* slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL);
// downcast the pointer to a TYP_INT on 64-bit targets
slot = impImplicitIorI4Cast(slot, TYP_INT);
// Use a GT_AND to check for the lowest bit and indirect if it is set
GenTree* test = gtNewOperNode(GT_AND, TYP_INT, slot, gtNewIconNode(1));
GenTree* relop = gtNewOperNode(GT_EQ, TYP_INT, test, gtNewIconNode(0));
// slot = GT_IND(slot - 1)
slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL);
GenTree* add = gtNewOperNode(GT_ADD, TYP_I_IMPL, slot, gtNewIconNode(-1, TYP_I_IMPL));
GenTree* indir = gtNewOperNode(GT_IND, TYP_I_IMPL, add);
indir->gtFlags |= GTF_IND_NONFAULTING;
indir->gtFlags |= GTF_IND_INVARIANT;
slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL);
GenTree* asg = gtNewAssignNode(slot, indir);
GenTreeColon* colon = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), asg);
GenTreeQmark* qmark = gtNewQmarkNode(TYP_VOID, relop, colon);
impAppendTree(qmark, (unsigned)CHECK_SPILL_NONE, impCurStmtDI);
return gtNewLclvNode(slotLclNum, TYP_I_IMPL);
}
assert(pRuntimeLookup->indirections != 0);
impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark1"));
// Extract the handle
GenTree* handleForNullCheck = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
handleForNullCheck->gtFlags |= GTF_IND_NONFAULTING;
// Call the helper
// - Setup argNode with the pointer to the signature returned by the lookup
GenTree* argNode = gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_GLOBAL_PTR, compileTimeHandle);
GenTreeCall::Use* helperArgs = gtNewCallArgs(ctxTree, argNode);
GenTreeCall* helperCall = gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, helperArgs);
// Check for null and possibly call helper
GenTree* nullCheck = gtNewOperNode(GT_NE, TYP_INT, handleForNullCheck, gtNewIconNode(0, TYP_I_IMPL));
GenTree* handleForResult = gtCloneExpr(handleForNullCheck);
GenTree* result = nullptr;
if (pRuntimeLookup->sizeOffset != CORINFO_NO_SIZE_CHECK)
{
// Dynamic dictionary expansion support
assert((lastIndOfTree != nullptr) && (pRuntimeLookup->indirections > 0));
// sizeValue = dictionary[pRuntimeLookup->sizeOffset]
GenTreeIntCon* sizeOffset = gtNewIconNode(pRuntimeLookup->sizeOffset, TYP_I_IMPL);
GenTree* sizeValueOffset = gtNewOperNode(GT_ADD, TYP_I_IMPL, lastIndOfTree, sizeOffset);
GenTree* sizeValue = gtNewOperNode(GT_IND, TYP_I_IMPL, sizeValueOffset);
sizeValue->gtFlags |= GTF_IND_NONFAULTING;
// sizeCheck fails if sizeValue < pRuntimeLookup->offsets[i]
GenTree* offsetValue = gtNewIconNode(pRuntimeLookup->offsets[pRuntimeLookup->indirections - 1], TYP_I_IMPL);
GenTree* sizeCheck = gtNewOperNode(GT_LE, TYP_INT, sizeValue, offsetValue);
// revert null check condition.
nullCheck->ChangeOperUnchecked(GT_EQ);
// ((sizeCheck fails || nullCheck fails))) ? (helperCall : handle).
// Add checks and the handle as call arguments, indirect call transformer will handle this.
helperCall->gtCallArgs = gtPrependNewCallArg(handleForResult, helperCall->gtCallArgs);
helperCall->gtCallArgs = gtPrependNewCallArg(sizeCheck, helperCall->gtCallArgs);
helperCall->gtCallArgs = gtPrependNewCallArg(nullCheck, helperCall->gtCallArgs);
result = helperCall;
addExpRuntimeLookupCandidate(helperCall);
}
else
{
GenTreeColon* colonNullCheck = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL, handleForResult, helperCall);
result = gtNewQmarkNode(TYP_I_IMPL, nullCheck, colonNullCheck);
}
unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling Runtime Lookup tree"));
impAssignTempGen(tmp, result, (unsigned)CHECK_SPILL_NONE);
return gtNewLclvNode(tmp, TYP_I_IMPL);
}
/******************************************************************************
* Spills the stack at verCurrentState.esStack[level] and replaces it with a temp.
* If tnum!=BAD_VAR_NUM, the temp var used to replace the tree is tnum,
* else, grab a new temp.
* For structs (which can be pushed on the stack using obj, etc),
* special handling is needed
*/
struct RecursiveGuard
{
public:
RecursiveGuard()
{
m_pAddress = nullptr;
}
~RecursiveGuard()
{
if (m_pAddress)
{
*m_pAddress = false;
}
}
void Init(bool* pAddress, bool bInitialize)
{
assert(pAddress && *pAddress == false && "Recursive guard violation");
m_pAddress = pAddress;
if (bInitialize)
{
*m_pAddress = true;
}
}
protected:
bool* m_pAddress;
};
bool Compiler::impSpillStackEntry(unsigned level,
unsigned tnum
#ifdef DEBUG
,
bool bAssertOnRecursion,
const char* reason
#endif
)
{
#ifdef DEBUG
RecursiveGuard guard;
guard.Init(&impNestedStackSpill, bAssertOnRecursion);
#endif
GenTree* tree = verCurrentState.esStack[level].val;
/* Allocate a temp if we haven't been asked to use a particular one */
if (tnum != BAD_VAR_NUM && (tnum >= lvaCount))
{
return false;
}
bool isNewTemp = false;
if (tnum == BAD_VAR_NUM)
{
tnum = lvaGrabTemp(true DEBUGARG(reason));
isNewTemp = true;
}
/* Assign the spilled entry to the temp */
impAssignTempGen(tnum, tree, verCurrentState.esStack[level].seTypeInfo.GetClassHandle(), level);
// If temp is newly introduced and a ref type, grab what type info we can.
if (isNewTemp && (lvaTable[tnum].lvType == TYP_REF))
{
assert(lvaTable[tnum].lvSingleDef == 0);
lvaTable[tnum].lvSingleDef = 1;
JITDUMP("Marked V%02u as a single def temp\n", tnum);
CORINFO_CLASS_HANDLE stkHnd = verCurrentState.esStack[level].seTypeInfo.GetClassHandle();
lvaSetClass(tnum, tree, stkHnd);
// If we're assigning a GT_RET_EXPR, note the temp over on the call,
// so the inliner can use it in case it needs a return spill temp.
if (tree->OperGet() == GT_RET_EXPR)
{
JITDUMP("\n*** see V%02u = GT_RET_EXPR, noting temp\n", tnum);
GenTree* call = tree->AsRetExpr()->gtInlineCandidate;
InlineCandidateInfo* ici = call->AsCall()->gtInlineCandidateInfo;
ici->preexistingSpillTemp = tnum;
}
}
// The tree type may be modified by impAssignTempGen, so use the type of the lclVar.
var_types type = genActualType(lvaTable[tnum].TypeGet());
GenTree* temp = gtNewLclvNode(tnum, type);
verCurrentState.esStack[level].val = temp;
return true;
}
/*****************************************************************************
*
* Ensure that the stack has only spilled values
*/
void Compiler::impSpillStackEnsure(bool spillLeaves)
{
assert(!spillLeaves || opts.compDbgCode);
for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
{
GenTree* tree = verCurrentState.esStack[level].val;
if (!spillLeaves && tree->OperIsLeaf())
{
continue;
}
// Temps introduced by the importer itself don't need to be spilled
bool isTempLcl =
(tree->OperGet() == GT_LCL_VAR) && (tree->AsLclVarCommon()->GetLclNum() >= info.compLocalsCount);
if (isTempLcl)
{
continue;
}
impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillStackEnsure"));
}
}
void Compiler::impSpillEvalStack()
{
for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
{
impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillEvalStack"));
}
}
/*****************************************************************************
*
* If the stack contains any trees with side effects in them, assign those
* trees to temps and append the assignments to the statement list.
* On return the stack is guaranteed to be empty.
*/
inline void Compiler::impEvalSideEffects()
{
impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("impEvalSideEffects"));
verCurrentState.esStackDepth = 0;
}
/*****************************************************************************
*
* If the stack contains any trees with side effects in them, assign those
* trees to temps and replace them on the stack with refs to their temps.
* [0..chkLevel) is the portion of the stack which will be checked and spilled.
*/
inline void Compiler::impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason))
{
assert(chkLevel != (unsigned)CHECK_SPILL_NONE);
/* Before we make any appends to the tree list we must spill the
* "special" side effects (GTF_ORDER_SIDEEFF on a GT_CATCH_ARG) */
impSpillSpecialSideEff();
if (chkLevel == (unsigned)CHECK_SPILL_ALL)
{
chkLevel = verCurrentState.esStackDepth;
}
assert(chkLevel <= verCurrentState.esStackDepth);
GenTreeFlags spillFlags = spillGlobEffects ? GTF_GLOB_EFFECT : GTF_SIDE_EFFECT;
for (unsigned i = 0; i < chkLevel; i++)
{
GenTree* tree = verCurrentState.esStack[i].val;
if ((tree->gtFlags & spillFlags) != 0 ||
(spillGlobEffects && // Only consider the following when spillGlobEffects == true
!impIsAddressInLocal(tree) && // No need to spill the GT_ADDR node on a local.
gtHasLocalsWithAddrOp(tree))) // Spill if we still see GT_LCL_VAR that contains lvHasLdAddrOp or
// lvAddrTaken flag.
{
impSpillStackEntry(i, BAD_VAR_NUM DEBUGARG(false) DEBUGARG(reason));
}
}
}
/*****************************************************************************
*
* If the stack contains any trees with special side effects in them, assign
* those trees to temps and replace them on the stack with refs to their temps.
*/
inline void Compiler::impSpillSpecialSideEff()
{
// Only exception objects need to be carefully handled
if (!compCurBB->bbCatchTyp)
{
return;
}
for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
{
GenTree* tree = verCurrentState.esStack[level].val;
// Make sure if we have an exception object in the sub tree we spill ourselves.
if (gtHasCatchArg(tree))
{
impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillSpecialSideEff"));
}
}
}
/*****************************************************************************
*
* Spill all stack references to value classes (TYP_STRUCT nodes)
*/
void Compiler::impSpillValueClasses()
{
for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
{
GenTree* tree = verCurrentState.esStack[level].val;
if (fgWalkTreePre(&tree, impFindValueClasses) == WALK_ABORT)
{
// Tree walk was aborted, which means that we found a
// value class on the stack. Need to spill that
// stack entry.
impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillValueClasses"));
}
}
}
/*****************************************************************************
*
* Callback that checks if a tree node is TYP_STRUCT
*/
Compiler::fgWalkResult Compiler::impFindValueClasses(GenTree** pTree, fgWalkData* data)
{
fgWalkResult walkResult = WALK_CONTINUE;
if ((*pTree)->gtType == TYP_STRUCT)
{
// Abort the walk and indicate that we found a value class
walkResult = WALK_ABORT;
}
return walkResult;
}
/*****************************************************************************
*
* If the stack contains any trees with references to local #lclNum, assign
* those trees to temps and replace their place on the stack with refs to
* their temps.
*/
void Compiler::impSpillLclRefs(ssize_t lclNum)
{
/* Before we make any appends to the tree list we must spill the
* "special" side effects (GTF_ORDER_SIDEEFF) - GT_CATCH_ARG */
impSpillSpecialSideEff();
for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
{
GenTree* tree = verCurrentState.esStack[level].val;
/* If the tree may throw an exception, and the block has a handler,
then we need to spill assignments to the local if the local is
live on entry to the handler.
Just spill 'em all without considering the liveness */
bool xcptnCaught = ehBlockHasExnFlowDsc(compCurBB) && (tree->gtFlags & (GTF_CALL | GTF_EXCEPT));
/* Skip the tree if it doesn't have an affected reference,
unless xcptnCaught */
if (xcptnCaught || gtHasRef(tree, lclNum))
{
impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillLclRefs"));
}
}
}
/*****************************************************************************
*
* Push catch arg onto the stack.
* If there are jumps to the beginning of the handler, insert basic block
* and spill catch arg to a temp. Update the handler block if necessary.
*
* Returns the basic block of the actual handler.
*/
BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd, bool isSingleBlockFilter)
{
// Do not inject the basic block twice on reimport. This should be
// hit only under JIT stress. See if the block is the one we injected.
// Note that EH canonicalization can inject internal blocks here. We might
// be able to re-use such a block (but we don't, right now).
if ((hndBlk->bbFlags & (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE)) ==
(BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE))
{
Statement* stmt = hndBlk->firstStmt();
if (stmt != nullptr)
{
GenTree* tree = stmt->GetRootNode();
assert(tree != nullptr);
if ((tree->gtOper == GT_ASG) && (tree->AsOp()->gtOp1->gtOper == GT_LCL_VAR) &&
(tree->AsOp()->gtOp2->gtOper == GT_CATCH_ARG))
{
tree = gtNewLclvNode(tree->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum(), TYP_REF);
impPushOnStack(tree, typeInfo(TI_REF, clsHnd));
return hndBlk->bbNext;
}
}
// If we get here, it must have been some other kind of internal block. It's possible that
// someone prepended something to our injected block, but that's unlikely.
}
/* Push the exception address value on the stack */
GenTree* arg = new (this, GT_CATCH_ARG) GenTree(GT_CATCH_ARG, TYP_REF);
/* Mark the node as having a side-effect - i.e. cannot be
* moved around since it is tied to a fixed location (EAX) */
arg->gtFlags |= GTF_ORDER_SIDEEFF;
#if defined(JIT32_GCENCODER)
const bool forceInsertNewBlock = isSingleBlockFilter || compStressCompile(STRESS_CATCH_ARG, 5);
#else
const bool forceInsertNewBlock = compStressCompile(STRESS_CATCH_ARG, 5);
#endif // defined(JIT32_GCENCODER)
/* Spill GT_CATCH_ARG to a temp if there are jumps to the beginning of the handler */
if (hndBlk->bbRefs > 1 || forceInsertNewBlock)
{
if (hndBlk->bbRefs == 1)
{
hndBlk->bbRefs++;
}
/* Create extra basic block for the spill */
BasicBlock* newBlk = fgNewBBbefore(BBJ_NONE, hndBlk, /* extendRegion */ true);
newBlk->bbFlags |= BBF_IMPORTED | BBF_DONT_REMOVE;
newBlk->inheritWeight(hndBlk);
newBlk->bbCodeOffs = hndBlk->bbCodeOffs;
/* Account for the new link we are about to create */
hndBlk->bbRefs++;
// Spill into a temp.
unsigned tempNum = lvaGrabTemp(false DEBUGARG("SpillCatchArg"));
lvaTable[tempNum].lvType = TYP_REF;
GenTree* argAsg = gtNewTempAssign(tempNum, arg);
arg = gtNewLclvNode(tempNum, TYP_REF);
hndBlk->bbStkTempsIn = tempNum;
Statement* argStmt;
if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
{
// Report the debug info. impImportBlockCode won't treat the actual handler as exception block and thus
// won't do it for us.
// TODO-DEBUGINFO: Previous code always set stack as non-empty
// here. Can we not just use impCurStmtOffsSet? Are we out of sync
// here with the stack?
impCurStmtDI = DebugInfo(compInlineContext, ILLocation(newBlk->bbCodeOffs, false, false));
argStmt = gtNewStmt(argAsg, impCurStmtDI);
}
else
{
argStmt = gtNewStmt(argAsg);
}
fgInsertStmtAtEnd(newBlk, argStmt);
}
impPushOnStack(arg, typeInfo(TI_REF, clsHnd));
return hndBlk;
}
/*****************************************************************************
*
* Given a tree, clone it. *pClone is set to the cloned tree.
* Returns the original tree if the cloning was easy,
* else returns the temp to which the tree had to be spilled to.
* If the tree has side-effects, it will be spilled to a temp.
*/
GenTree* Compiler::impCloneExpr(GenTree* tree,
GenTree** pClone,
CORINFO_CLASS_HANDLE structHnd,
unsigned curLevel,
Statement** pAfterStmt DEBUGARG(const char* reason))
{
if (!(tree->gtFlags & GTF_GLOB_EFFECT))
{
GenTree* clone = gtClone(tree, true);
if (clone)
{
*pClone = clone;
return tree;
}
}
/* Store the operand in a temp and return the temp */
unsigned temp = lvaGrabTemp(true DEBUGARG(reason));
// impAssignTempGen() may change tree->gtType to TYP_VOID for calls which
// return a struct type. It also may modify the struct type to a more
// specialized type (e.g. a SIMD type). So we will get the type from
// the lclVar AFTER calling impAssignTempGen().
impAssignTempGen(temp, tree, structHnd, curLevel, pAfterStmt, impCurStmtDI);
var_types type = genActualType(lvaTable[temp].TypeGet());
*pClone = gtNewLclvNode(temp, type);
return gtNewLclvNode(temp, type);
}
//------------------------------------------------------------------------
// impCreateDIWithCurrentStackInfo: Create a DebugInfo instance with the
// specified IL offset and 'is call' bit, using the current stack to determine
// whether to set the 'stack empty' bit.
//
// Arguments:
// offs - the IL offset for the DebugInfo
// isCall - whether the created DebugInfo should have the IsCall bit set
//
// Return Value:
// The DebugInfo instance.
//
DebugInfo Compiler::impCreateDIWithCurrentStackInfo(IL_OFFSET offs, bool isCall)
{
assert(offs != BAD_IL_OFFSET);
bool isStackEmpty = verCurrentState.esStackDepth <= 0;
return DebugInfo(compInlineContext, ILLocation(offs, isStackEmpty, isCall));
}
//------------------------------------------------------------------------
// impCurStmtOffsSet: Set the "current debug info" to attach to statements that
// we are generating next.
//
// Arguments:
// offs - the IL offset
//
// Remarks:
// This function will be called in the main IL processing loop when it is
// determined that we have reached a location in the IL stream for which we
// want to report debug information. This is the main way we determine which
// statements to report debug info for to the EE: for other statements, they
// will have no debug information attached.
//
inline void Compiler::impCurStmtOffsSet(IL_OFFSET offs)
{
if (offs == BAD_IL_OFFSET)
{
impCurStmtDI = DebugInfo(compInlineContext, ILLocation());
}
else
{
impCurStmtDI = impCreateDIWithCurrentStackInfo(offs, false);
}
}
//------------------------------------------------------------------------
// impCanSpillNow: check is it possible to spill all values from eeStack to local variables.
//
// Arguments:
// prevOpcode - last importer opcode
//
// Return Value:
// true if it is legal, false if it could be a sequence that we do not want to divide.
bool Compiler::impCanSpillNow(OPCODE prevOpcode)
{
// Don't spill after ldtoken, newarr and newobj, because it could be a part of the InitializeArray sequence.
// Avoid breaking up to guarantee that impInitializeArrayIntrinsic can succeed.
return (prevOpcode != CEE_LDTOKEN) && (prevOpcode != CEE_NEWARR) && (prevOpcode != CEE_NEWOBJ);
}
/*****************************************************************************
*
* Remember the instr offset for the statements
*
* When we do impAppendTree(tree), we can't set stmt->SetLastILOffset(impCurOpcOffs),
* if the append was done because of a partial stack spill,
* as some of the trees corresponding to code up to impCurOpcOffs might
* still be sitting on the stack.
* So we delay calling of SetLastILOffset() until impNoteLastILoffs().
* This should be called when an opcode finally/explicitly causes
* impAppendTree(tree) to be called (as opposed to being called because of
* a spill caused by the opcode)
*/
#ifdef DEBUG
void Compiler::impNoteLastILoffs()
{
if (impLastILoffsStmt == nullptr)
{
// We should have added a statement for the current basic block
// Is this assert correct ?
assert(impLastStmt);
impLastStmt->SetLastILOffset(compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs);
}
else
{
impLastILoffsStmt->SetLastILOffset(compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs);
impLastILoffsStmt = nullptr;
}
}
#endif // DEBUG
/*****************************************************************************
* We don't create any GenTree (excluding spills) for a branch.
* For debugging info, we need a placeholder so that we can note
* the IL offset in gtStmt.gtStmtOffs. So append an empty statement.
*/
void Compiler::impNoteBranchOffs()
{
if (opts.compDbgCode)
{
impAppendTree(gtNewNothingNode(), (unsigned)CHECK_SPILL_NONE, impCurStmtDI);
}
}
/*****************************************************************************
* Locate the next stmt boundary for which we need to record info.
* We will have to spill the stack at such boundaries if it is not
* already empty.
* Returns the next stmt boundary (after the start of the block)
*/
unsigned Compiler::impInitBlockLineInfo()
{
/* Assume the block does not correspond with any IL offset. This prevents
us from reporting extra offsets. Extra mappings can cause confusing
stepping, especially if the extra mapping is a jump-target, and the
debugger does not ignore extra mappings, but instead rewinds to the
nearest known offset */
impCurStmtOffsSet(BAD_IL_OFFSET);
IL_OFFSET blockOffs = compCurBB->bbCodeOffs;
if ((verCurrentState.esStackDepth == 0) && (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES))
{
impCurStmtOffsSet(blockOffs);
}
/* Always report IL offset 0 or some tests get confused.
Probably a good idea anyways */
if (blockOffs == 0)
{
impCurStmtOffsSet(blockOffs);
}
if (!info.compStmtOffsetsCount)
{
return ~0;
}
/* Find the lowest explicit stmt boundary within the block */
/* Start looking at an entry that is based on our instr offset */
unsigned index = (info.compStmtOffsetsCount * blockOffs) / info.compILCodeSize;
if (index >= info.compStmtOffsetsCount)
{
index = info.compStmtOffsetsCount - 1;
}
/* If we've guessed too far, back up */
while (index > 0 && info.compStmtOffsets[index - 1] >= blockOffs)
{
index--;
}
/* If we guessed short, advance ahead */
while (info.compStmtOffsets[index] < blockOffs)
{
index++;
if (index == info.compStmtOffsetsCount)
{
return info.compStmtOffsetsCount;
}
}
assert(index < info.compStmtOffsetsCount);
if (info.compStmtOffsets[index] == blockOffs)
{
/* There is an explicit boundary for the start of this basic block.
So we will start with bbCodeOffs. Else we will wait until we
get to the next explicit boundary */
impCurStmtOffsSet(blockOffs);
index++;
}
return index;
}
/*****************************************************************************/
bool Compiler::impOpcodeIsCallOpcode(OPCODE opcode)
{
switch (opcode)
{
case CEE_CALL:
case CEE_CALLI:
case CEE_CALLVIRT:
return true;
default:
return false;
}
}
/*****************************************************************************/
static inline bool impOpcodeIsCallSiteBoundary(OPCODE opcode)
{
switch (opcode)
{
case CEE_CALL:
case CEE_CALLI:
case CEE_CALLVIRT:
case CEE_JMP:
case CEE_NEWOBJ:
case CEE_NEWARR:
return true;
default:
return false;
}
}
/*****************************************************************************/
// One might think it is worth caching these values, but results indicate
// that it isn't.
// In addition, caching them causes SuperPMI to be unable to completely
// encapsulate an individual method context.
CORINFO_CLASS_HANDLE Compiler::impGetRefAnyClass()
{
CORINFO_CLASS_HANDLE refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF);
assert(refAnyClass != (CORINFO_CLASS_HANDLE) nullptr);
return refAnyClass;
}
CORINFO_CLASS_HANDLE Compiler::impGetTypeHandleClass()
{
CORINFO_CLASS_HANDLE typeHandleClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPE_HANDLE);
assert(typeHandleClass != (CORINFO_CLASS_HANDLE) nullptr);
return typeHandleClass;
}
CORINFO_CLASS_HANDLE Compiler::impGetRuntimeArgumentHandle()
{
CORINFO_CLASS_HANDLE argIteratorClass = info.compCompHnd->getBuiltinClass(CLASSID_ARGUMENT_HANDLE);
assert(argIteratorClass != (CORINFO_CLASS_HANDLE) nullptr);
return argIteratorClass;
}
CORINFO_CLASS_HANDLE Compiler::impGetStringClass()
{
CORINFO_CLASS_HANDLE stringClass = info.compCompHnd->getBuiltinClass(CLASSID_STRING);
assert(stringClass != (CORINFO_CLASS_HANDLE) nullptr);
return stringClass;
}
CORINFO_CLASS_HANDLE Compiler::impGetObjectClass()
{
CORINFO_CLASS_HANDLE objectClass = info.compCompHnd->getBuiltinClass(CLASSID_SYSTEM_OBJECT);
assert(objectClass != (CORINFO_CLASS_HANDLE) nullptr);
return objectClass;
}
/*****************************************************************************
* "&var" can be used either as TYP_BYREF or TYP_I_IMPL, but we
* set its type to TYP_BYREF when we create it. We know if it can be
* changed to TYP_I_IMPL only at the point where we use it
*/
/* static */
void Compiler::impBashVarAddrsToI(GenTree* tree1, GenTree* tree2)
{
if (tree1->IsLocalAddrExpr() != nullptr)
{
tree1->gtType = TYP_I_IMPL;
}
if (tree2 && (tree2->IsLocalAddrExpr() != nullptr))
{
tree2->gtType = TYP_I_IMPL;
}
}
/*****************************************************************************
* TYP_INT and TYP_I_IMPL can be used almost interchangeably, but we want
* to make that an explicit cast in our trees, so any implicit casts that
* exist in the IL (at least on 64-bit where TYP_I_IMPL != TYP_INT) are
* turned into explicit casts here.
* We also allow an implicit conversion of a ldnull into a TYP_I_IMPL(0)
*/
GenTree* Compiler::impImplicitIorI4Cast(GenTree* tree, var_types dstTyp)
{
var_types currType = genActualType(tree->gtType);
var_types wantedType = genActualType(dstTyp);
if (wantedType != currType)
{
// Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
if ((tree->OperGet() == GT_CNS_INT) && varTypeIsI(dstTyp))
{
if (!varTypeIsI(tree->gtType) || ((tree->gtType == TYP_REF) && (tree->AsIntCon()->gtIconVal == 0)))
{
tree->gtType = TYP_I_IMPL;
}
}
#ifdef TARGET_64BIT
else if (varTypeIsI(wantedType) && (currType == TYP_INT))
{
// Note that this allows TYP_INT to be cast to a TYP_I_IMPL when wantedType is a TYP_BYREF or TYP_REF
tree = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL);
}
else if ((wantedType == TYP_INT) && varTypeIsI(currType))
{
// Note that this allows TYP_BYREF or TYP_REF to be cast to a TYP_INT
tree = gtNewCastNode(TYP_INT, tree, false, TYP_INT);
}
#endif // TARGET_64BIT
}
return tree;
}
/*****************************************************************************
* TYP_FLOAT and TYP_DOUBLE can be used almost interchangeably in some cases,
* but we want to make that an explicit cast in our trees, so any implicit casts
* that exist in the IL are turned into explicit casts here.
*/
GenTree* Compiler::impImplicitR4orR8Cast(GenTree* tree, var_types dstTyp)
{
if (varTypeIsFloating(tree) && varTypeIsFloating(dstTyp) && (dstTyp != tree->gtType))
{
tree = gtNewCastNode(dstTyp, tree, false, dstTyp);
}
return tree;
}
//------------------------------------------------------------------------
// impInitializeArrayIntrinsic: Attempts to replace a call to InitializeArray
// with a GT_COPYBLK node.
//
// Arguments:
// sig - The InitializeArray signature.
//
// Return Value:
// A pointer to the newly created GT_COPYBLK node if the replacement succeeds or
// nullptr otherwise.
//
// Notes:
// The function recognizes the following IL pattern:
// ldc <length> or a list of ldc <lower bound>/<length>
// newarr or newobj
// dup
// ldtoken <field handle>
// call InitializeArray
// The lower bounds need not be constant except when the array rank is 1.
// The function recognizes all kinds of arrays thus enabling a small runtime
// such as CoreRT to skip providing an implementation for InitializeArray.
GenTree* Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig)
{
assert(sig->numArgs == 2);
GenTree* fieldTokenNode = impStackTop(0).val;
GenTree* arrayLocalNode = impStackTop(1).val;
//
// Verify that the field token is known and valid. Note that It's also
// possible for the token to come from reflection, in which case we cannot do
// the optimization and must therefore revert to calling the helper. You can
// see an example of this in bvt\DynIL\initarray2.exe (in Main).
//
// Check to see if the ldtoken helper call is what we see here.
if (fieldTokenNode->gtOper != GT_CALL || (fieldTokenNode->AsCall()->gtCallType != CT_HELPER) ||
(fieldTokenNode->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD)))
{
return nullptr;
}
// Strip helper call away
fieldTokenNode = fieldTokenNode->AsCall()->gtCallArgs->GetNode();
if (fieldTokenNode->gtOper == GT_IND)
{
fieldTokenNode = fieldTokenNode->AsOp()->gtOp1;
}
// Check for constant
if (fieldTokenNode->gtOper != GT_CNS_INT)
{
return nullptr;
}
CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE)fieldTokenNode->AsIntCon()->gtCompileTimeHandle;
if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) || (fieldToken == nullptr))
{
return nullptr;
}
//
// We need to get the number of elements in the array and the size of each element.
// We verify that the newarr statement is exactly what we expect it to be.
// If it's not then we just return NULL and we don't optimize this call
//
// It is possible the we don't have any statements in the block yet.
if (impLastStmt == nullptr)
{
return nullptr;
}
//
// We start by looking at the last statement, making sure it's an assignment, and
// that the target of the assignment is the array passed to InitializeArray.
//
GenTree* arrayAssignment = impLastStmt->GetRootNode();
if ((arrayAssignment->gtOper != GT_ASG) || (arrayAssignment->AsOp()->gtOp1->gtOper != GT_LCL_VAR) ||
(arrayLocalNode->gtOper != GT_LCL_VAR) || (arrayAssignment->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum() !=
arrayLocalNode->AsLclVarCommon()->GetLclNum()))
{
return nullptr;
}
//
// Make sure that the object being assigned is a helper call.
//
GenTree* newArrayCall = arrayAssignment->AsOp()->gtOp2;
if ((newArrayCall->gtOper != GT_CALL) || (newArrayCall->AsCall()->gtCallType != CT_HELPER))
{
return nullptr;
}
//
// Verify that it is one of the new array helpers.
//
bool isMDArray = false;
if (newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_DIRECT) &&
newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_OBJ) &&
newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_VC) &&
newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_ALIGN8)
#ifdef FEATURE_READYTORUN
&& newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1)
#endif
)
{
if (newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEW_MDARR))
{
return nullptr;
}
isMDArray = true;
}
CORINFO_CLASS_HANDLE arrayClsHnd = (CORINFO_CLASS_HANDLE)newArrayCall->AsCall()->compileTimeHelperArgumentHandle;
//
// Make sure we found a compile time handle to the array
//
if (!arrayClsHnd)
{
return nullptr;
}
unsigned rank = 0;
S_UINT32 numElements;
if (isMDArray)
{
rank = info.compCompHnd->getArrayRank(arrayClsHnd);
if (rank == 0)
{
return nullptr;
}
GenTreeCall::Use* tokenArg = newArrayCall->AsCall()->gtCallArgs;
assert(tokenArg != nullptr);
GenTreeCall::Use* numArgsArg = tokenArg->GetNext();
assert(numArgsArg != nullptr);
GenTreeCall::Use* argsArg = numArgsArg->GetNext();
assert(argsArg != nullptr);
//
// The number of arguments should be a constant between 1 and 64. The rank can't be 0
// so at least one length must be present and the rank can't exceed 32 so there can
// be at most 64 arguments - 32 lengths and 32 lower bounds.
//
if ((!numArgsArg->GetNode()->IsCnsIntOrI()) || (numArgsArg->GetNode()->AsIntCon()->IconValue() < 1) ||
(numArgsArg->GetNode()->AsIntCon()->IconValue() > 64))
{
return nullptr;
}
unsigned numArgs = static_cast<unsigned>(numArgsArg->GetNode()->AsIntCon()->IconValue());
bool lowerBoundsSpecified;
if (numArgs == rank * 2)
{
lowerBoundsSpecified = true;
}
else if (numArgs == rank)
{
lowerBoundsSpecified = false;
//
// If the rank is 1 and a lower bound isn't specified then the runtime creates
// a SDArray. Note that even if a lower bound is specified it can be 0 and then
// we get a SDArray as well, see the for loop below.
//
if (rank == 1)
{
isMDArray = false;
}
}
else
{
return nullptr;
}
//
// The rank is known to be at least 1 so we can start with numElements being 1
// to avoid the need to special case the first dimension.
//
numElements = S_UINT32(1);
struct Match
{
static bool IsArgsFieldInit(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
{
return (tree->OperGet() == GT_ASG) && IsArgsFieldIndir(tree->gtGetOp1(), index, lvaNewObjArrayArgs) &&
IsArgsAddr(tree->gtGetOp1()->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
}
static bool IsArgsFieldIndir(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
{
return (tree->OperGet() == GT_IND) && (tree->gtGetOp1()->OperGet() == GT_ADD) &&
(tree->gtGetOp1()->gtGetOp2()->IsIntegralConst(sizeof(INT32) * index)) &&
IsArgsAddr(tree->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
}
static bool IsArgsAddr(GenTree* tree, unsigned lvaNewObjArrayArgs)
{
return (tree->OperGet() == GT_ADDR) && (tree->gtGetOp1()->OperGet() == GT_LCL_VAR) &&
(tree->gtGetOp1()->AsLclVar()->GetLclNum() == lvaNewObjArrayArgs);
}
static bool IsComma(GenTree* tree)
{
return (tree != nullptr) && (tree->OperGet() == GT_COMMA);
}
};
unsigned argIndex = 0;
GenTree* comma;
for (comma = argsArg->GetNode(); Match::IsComma(comma); comma = comma->gtGetOp2())
{
if (lowerBoundsSpecified)
{
//
// In general lower bounds can be ignored because they're not needed to
// calculate the total number of elements. But for single dimensional arrays
// we need to know if the lower bound is 0 because in this case the runtime
// creates a SDArray and this affects the way the array data offset is calculated.
//
if (rank == 1)
{
GenTree* lowerBoundAssign = comma->gtGetOp1();
assert(Match::IsArgsFieldInit(lowerBoundAssign, argIndex, lvaNewObjArrayArgs));
GenTree* lowerBoundNode = lowerBoundAssign->gtGetOp2();
if (lowerBoundNode->IsIntegralConst(0))
{
isMDArray = false;
}
}
comma = comma->gtGetOp2();
argIndex++;
}
GenTree* lengthNodeAssign = comma->gtGetOp1();
assert(Match::IsArgsFieldInit(lengthNodeAssign, argIndex, lvaNewObjArrayArgs));
GenTree* lengthNode = lengthNodeAssign->gtGetOp2();
if (!lengthNode->IsCnsIntOrI())
{
return nullptr;
}
numElements *= S_SIZE_T(lengthNode->AsIntCon()->IconValue());
argIndex++;
}
assert((comma != nullptr) && Match::IsArgsAddr(comma, lvaNewObjArrayArgs));
if (argIndex != numArgs)
{
return nullptr;
}
}
else
{
//
// Make sure there are exactly two arguments: the array class and
// the number of elements.
//
GenTree* arrayLengthNode;
GenTreeCall::Use* args = newArrayCall->AsCall()->gtCallArgs;
#ifdef FEATURE_READYTORUN
if (newArrayCall->AsCall()->gtCallMethHnd == eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1))
{
// Array length is 1st argument for readytorun helper
arrayLengthNode = args->GetNode();
}
else
#endif
{
// Array length is 2nd argument for regular helper
arrayLengthNode = args->GetNext()->GetNode();
}
//
// This optimization is only valid for a constant array size.
//
if (arrayLengthNode->gtOper != GT_CNS_INT)
{
return nullptr;
}
numElements = S_SIZE_T(arrayLengthNode->AsIntCon()->gtIconVal);
if (!info.compCompHnd->isSDArray(arrayClsHnd))
{
return nullptr;
}
}
CORINFO_CLASS_HANDLE elemClsHnd;
var_types elementType = JITtype2varType(info.compCompHnd->getChildType(arrayClsHnd, &elemClsHnd));
//
// Note that genTypeSize will return zero for non primitive types, which is exactly
// what we want (size will then be 0, and we will catch this in the conditional below).
// Note that we don't expect this to fail for valid binaries, so we assert in the
// non-verification case (the verification case should not assert but rather correctly
// handle bad binaries). This assert is not guarding any specific invariant, but rather
// saying that we don't expect this to happen, and if it is hit, we need to investigate
// why.
//
S_UINT32 elemSize(genTypeSize(elementType));
S_UINT32 size = elemSize * S_UINT32(numElements);
if (size.IsOverflow())
{
return nullptr;
}
if ((size.Value() == 0) || (varTypeIsGC(elementType)))
{
return nullptr;
}
void* initData = info.compCompHnd->getArrayInitializationData(fieldToken, size.Value());
if (!initData)
{
return nullptr;
}
//
// At this point we are ready to commit to implementing the InitializeArray
// intrinsic using a struct assignment. Pop the arguments from the stack and
// return the struct assignment node.
//
impPopStack();
impPopStack();
const unsigned blkSize = size.Value();
unsigned dataOffset;
if (isMDArray)
{
dataOffset = eeGetMDArrayDataOffset(rank);
}
else
{
dataOffset = eeGetArrayDataOffset();
}
GenTree* dstAddr = gtNewOperNode(GT_ADD, TYP_BYREF, arrayLocalNode, gtNewIconNode(dataOffset, TYP_I_IMPL));
GenTree* dst = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, dstAddr, typGetBlkLayout(blkSize));
GenTree* src = gtNewIndOfIconHandleNode(TYP_STRUCT, (size_t)initData, GTF_ICON_CONST_PTR, true);
#ifdef DEBUG
src->gtGetOp1()->AsIntCon()->gtTargetHandle = THT_IntializeArrayIntrinsics;
#endif
return gtNewBlkOpNode(dst, // dst
src, // src
false, // volatile
true); // copyBlock
}
GenTree* Compiler::impCreateSpanIntrinsic(CORINFO_SIG_INFO* sig)
{
assert(sig->numArgs == 1);
assert(sig->sigInst.methInstCount == 1);
GenTree* fieldTokenNode = impStackTop(0).val;
//
// Verify that the field token is known and valid. Note that it's also
// possible for the token to come from reflection, in which case we cannot do
// the optimization and must therefore revert to calling the helper. You can
// see an example of this in bvt\DynIL\initarray2.exe (in Main).
//
// Check to see if the ldtoken helper call is what we see here.
if (fieldTokenNode->gtOper != GT_CALL || (fieldTokenNode->AsCall()->gtCallType != CT_HELPER) ||
(fieldTokenNode->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD)))
{
return nullptr;
}
// Strip helper call away
fieldTokenNode = fieldTokenNode->AsCall()->gtCallArgs->GetNode();
if (fieldTokenNode->gtOper == GT_IND)
{
fieldTokenNode = fieldTokenNode->AsOp()->gtOp1;
}
// Check for constant
if (fieldTokenNode->gtOper != GT_CNS_INT)
{
return nullptr;
}
CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE)fieldTokenNode->AsIntCon()->gtCompileTimeHandle;
if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) || (fieldToken == nullptr))
{
return nullptr;
}
CORINFO_CLASS_HANDLE fieldOwnerHnd = info.compCompHnd->getFieldClass(fieldToken);
CORINFO_CLASS_HANDLE fieldClsHnd;
var_types fieldElementType =
JITtype2varType(info.compCompHnd->getFieldType(fieldToken, &fieldClsHnd, fieldOwnerHnd));
unsigned totalFieldSize;
// Most static initialization data fields are of some structure, but it is possible for them to be of various
// primitive types as well
if (fieldElementType == var_types::TYP_STRUCT)
{
totalFieldSize = info.compCompHnd->getClassSize(fieldClsHnd);
}
else
{
totalFieldSize = genTypeSize(fieldElementType);
}
// Limit to primitive or enum type - see ArrayNative::GetSpanDataFrom()
CORINFO_CLASS_HANDLE targetElemHnd = sig->sigInst.methInst[0];
if (info.compCompHnd->getTypeForPrimitiveValueClass(targetElemHnd) == CORINFO_TYPE_UNDEF)
{
return nullptr;
}
const unsigned targetElemSize = info.compCompHnd->getClassSize(targetElemHnd);
assert(targetElemSize != 0);
const unsigned count = totalFieldSize / targetElemSize;
if (count == 0)
{
return nullptr;
}
void* data = info.compCompHnd->getArrayInitializationData(fieldToken, totalFieldSize);
if (!data)
{
return nullptr;
}
//
// Ready to commit to the work
//
impPopStack();
// Turn count and pointer value into constants.
GenTree* lengthValue = gtNewIconNode(count, TYP_INT);
GenTree* pointerValue = gtNewIconHandleNode((size_t)data, GTF_ICON_CONST_PTR);
// Construct ReadOnlySpan<T> to return.
CORINFO_CLASS_HANDLE spanHnd = sig->retTypeClass;
unsigned spanTempNum = lvaGrabTemp(true DEBUGARG("ReadOnlySpan<T> for CreateSpan<T>"));
lvaSetStruct(spanTempNum, spanHnd, false);
CORINFO_FIELD_HANDLE pointerFieldHnd = info.compCompHnd->getFieldInClass(spanHnd, 0);
CORINFO_FIELD_HANDLE lengthFieldHnd = info.compCompHnd->getFieldInClass(spanHnd, 1);
GenTreeLclFld* pointerField = gtNewLclFldNode(spanTempNum, TYP_BYREF, 0);
pointerField->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(pointerFieldHnd));
GenTree* pointerFieldAsg = gtNewAssignNode(pointerField, pointerValue);
GenTreeLclFld* lengthField = gtNewLclFldNode(spanTempNum, TYP_INT, TARGET_POINTER_SIZE);
lengthField->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(lengthFieldHnd));
GenTree* lengthFieldAsg = gtNewAssignNode(lengthField, lengthValue);
// Now append a few statements the initialize the span
impAppendTree(lengthFieldAsg, (unsigned)CHECK_SPILL_NONE, impCurStmtDI);
impAppendTree(pointerFieldAsg, (unsigned)CHECK_SPILL_NONE, impCurStmtDI);
// And finally create a tree that points at the span.
return impCreateLocalNode(spanTempNum DEBUGARG(0));
}
//------------------------------------------------------------------------
// impIntrinsic: possibly expand intrinsic call into alternate IR sequence
//
// Arguments:
// newobjThis - for constructor calls, the tree for the newly allocated object
// clsHnd - handle for the intrinsic method's class
// method - handle for the intrinsic method
// sig - signature of the intrinsic method
// methodFlags - CORINFO_FLG_XXX flags of the intrinsic method
// memberRef - the token for the intrinsic method
// readonlyCall - true if call has a readonly prefix
// tailCall - true if call is in tail position
// pConstrainedResolvedToken -- resolved token for constrained call, or nullptr
// if call is not constrained
// constraintCallThisTransform -- this transform to apply for a constrained call
// pIntrinsicName [OUT] -- intrinsic name (see enumeration in namedintrinsiclist.h)
// for "traditional" jit intrinsics
// isSpecialIntrinsic [OUT] -- set true if intrinsic expansion is a call
// that is amenable to special downstream optimization opportunities
//
// Returns:
// IR tree to use in place of the call, or nullptr if the jit should treat
// the intrinsic call like a normal call.
//
// pIntrinsicName set to non-illegal value if the call is recognized as a
// traditional jit intrinsic, even if the intrinsic is not expaned.
//
// isSpecial set true if the expansion is subject to special
// optimizations later in the jit processing
//
// Notes:
// On success the IR tree may be a call to a different method or an inline
// sequence. If it is a call, then the intrinsic processing here is responsible
// for handling all the special cases, as upon return to impImportCall
// expanded intrinsics bypass most of the normal call processing.
//
// Intrinsics are generally not recognized in minopts and debug codegen.
//
// However, certain traditional intrinsics are identifed as "must expand"
// if there is no fallback implmentation to invoke; these must be handled
// in all codegen modes.
//
// New style intrinsics (where the fallback implementation is in IL) are
// identified as "must expand" if they are invoked from within their
// own method bodies.
//
GenTree* Compiler::impIntrinsic(GenTree* newobjThis,
CORINFO_CLASS_HANDLE clsHnd,
CORINFO_METHOD_HANDLE method,
CORINFO_SIG_INFO* sig,
unsigned methodFlags,
int memberRef,
bool readonlyCall,
bool tailCall,
CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
CORINFO_THIS_TRANSFORM constraintCallThisTransform,
NamedIntrinsic* pIntrinsicName,
bool* isSpecialIntrinsic)
{
assert((methodFlags & CORINFO_FLG_INTRINSIC) != 0);
bool mustExpand = false;
bool isSpecial = false;
NamedIntrinsic ni = NI_Illegal;
if ((methodFlags & CORINFO_FLG_INTRINSIC) != 0)
{
// The recursive non-virtual calls to Jit intrinsics are must-expand by convention.
mustExpand = mustExpand || (gtIsRecursiveCall(method) && !(methodFlags & CORINFO_FLG_VIRTUAL));
ni = lookupNamedIntrinsic(method);
// We specially support the following on all platforms to allow for dead
// code optimization and to more generally support recursive intrinsics.
if (ni == NI_IsSupported_True)
{
assert(sig->numArgs == 0);
return gtNewIconNode(true);
}
if (ni == NI_IsSupported_False)
{
assert(sig->numArgs == 0);
return gtNewIconNode(false);
}
if (ni == NI_Throw_PlatformNotSupportedException)
{
return impUnsupportedNamedIntrinsic(CORINFO_HELP_THROW_PLATFORM_NOT_SUPPORTED, method, sig, mustExpand);
}
#ifdef FEATURE_HW_INTRINSICS
if ((ni > NI_HW_INTRINSIC_START) && (ni < NI_HW_INTRINSIC_END))
{
GenTree* hwintrinsic = impHWIntrinsic(ni, clsHnd, method, sig, mustExpand);
if (mustExpand && (hwintrinsic == nullptr))
{
return impUnsupportedNamedIntrinsic(CORINFO_HELP_THROW_NOT_IMPLEMENTED, method, sig, mustExpand);
}
return hwintrinsic;
}
if ((ni > NI_SIMD_AS_HWINTRINSIC_START) && (ni < NI_SIMD_AS_HWINTRINSIC_END))
{
// These intrinsics aren't defined recursively and so they will never be mustExpand
// Instead, they provide software fallbacks that will be executed instead.
assert(!mustExpand);
return impSimdAsHWIntrinsic(ni, clsHnd, method, sig, newobjThis);
}
#endif // FEATURE_HW_INTRINSICS
}
*pIntrinsicName = ni;
if (ni == NI_System_StubHelpers_GetStubContext)
{
// must be done regardless of DbgCode and MinOpts
return gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL);
}
if (ni == NI_System_StubHelpers_NextCallReturnAddress)
{
// For now we just avoid inlining anything into these methods since
// this intrinsic is only rarely used. We could do this better if we
// wanted to by trying to match which call is the one we need to get
// the return address of.
info.compHasNextCallRetAddr = true;
return new (this, GT_LABEL) GenTree(GT_LABEL, TYP_I_IMPL);
}
switch (ni)
{
// CreateSpan must be expanded for NativeAOT
case NI_System_Runtime_CompilerServices_RuntimeHelpers_CreateSpan:
case NI_System_Runtime_CompilerServices_RuntimeHelpers_InitializeArray:
mustExpand |= IsTargetAbi(CORINFO_CORERT_ABI);
break;
case NI_System_ByReference_ctor:
case NI_System_ByReference_get_Value:
case NI_System_Activator_AllocatorOf:
case NI_System_Activator_DefaultConstructorOf:
case NI_System_Object_MethodTableOf:
case NI_System_EETypePtr_EETypePtrOf:
mustExpand = true;
break;
default:
break;
}
GenTree* retNode = nullptr;
// Under debug and minopts, only expand what is required.
// NextCallReturnAddress intrinsic returns the return address of the next call.
// If that call is an intrinsic and is expanded, codegen for NextCallReturnAddress will fail.
// To avoid that we conservatively expand only required intrinsics in methods that call
// the NextCallReturnAddress intrinsic.
if (!mustExpand && (opts.OptimizationDisabled() || info.compHasNextCallRetAddr))
{
*pIntrinsicName = NI_Illegal;
return retNode;
}
CorInfoType callJitType = sig->retType;
var_types callType = JITtype2varType(callJitType);
/* First do the intrinsics which are always smaller than a call */
if (ni != NI_Illegal)
{
assert(retNode == nullptr);
switch (ni)
{
case NI_Array_Address:
case NI_Array_Get:
case NI_Array_Set:
retNode = impArrayAccessIntrinsic(clsHnd, sig, memberRef, readonlyCall, ni);
break;
case NI_System_String_Equals:
{
retNode = impStringEqualsOrStartsWith(/*startsWith:*/ false, sig, methodFlags);
break;
}
case NI_System_MemoryExtensions_Equals:
case NI_System_MemoryExtensions_SequenceEqual:
{
retNode = impSpanEqualsOrStartsWith(/*startsWith:*/ false, sig, methodFlags);
break;
}
case NI_System_String_StartsWith:
{
retNode = impStringEqualsOrStartsWith(/*startsWith:*/ true, sig, methodFlags);
break;
}
case NI_System_MemoryExtensions_StartsWith:
{
retNode = impSpanEqualsOrStartsWith(/*startsWith:*/ true, sig, methodFlags);
break;
}
case NI_System_MemoryExtensions_AsSpan:
case NI_System_String_op_Implicit:
{
assert(sig->numArgs == 1);
isSpecial = impStackTop().val->OperIs(GT_CNS_STR);
break;
}
case NI_System_String_get_Chars:
{
GenTree* op2 = impPopStack().val;
GenTree* op1 = impPopStack().val;
retNode = gtNewIndexRef(TYP_USHORT, op1, op2);
retNode->gtFlags |= GTF_INX_STRING_LAYOUT;
break;
}
case NI_System_String_get_Length:
{
GenTree* op1 = impPopStack().val;
if (op1->OperIs(GT_CNS_STR))
{
// Optimize `ldstr + String::get_Length()` to CNS_INT
// e.g. "Hello".Length => 5
GenTreeIntCon* iconNode = gtNewStringLiteralLength(op1->AsStrCon());
if (iconNode != nullptr)
{
retNode = iconNode;
break;
}
}
GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, OFFSETOF__CORINFO_String__stringLen, compCurBB);
op1 = arrLen;
// Getting the length of a null string should throw
op1->gtFlags |= GTF_EXCEPT;
retNode = op1;
break;
}
// Implement ByReference Ctor. This wraps the assignment of the ref into a byref-like field
// in a value type. The canonical example of this is Span<T>. In effect this is just a
// substitution. The parameter byref will be assigned into the newly allocated object.
case NI_System_ByReference_ctor:
{
// Remove call to constructor and directly assign the byref passed
// to the call to the first slot of the ByReference struct.
GenTree* op1 = impPopStack().val;
GenTree* thisptr = newobjThis;
CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0);
GenTree* field = gtNewFieldRef(TYP_BYREF, fldHnd, thisptr, 0);
GenTree* assign = gtNewAssignNode(field, op1);
GenTree* byReferenceStruct = gtCloneExpr(thisptr->gtGetOp1());
assert(byReferenceStruct != nullptr);
impPushOnStack(byReferenceStruct, typeInfo(TI_STRUCT, clsHnd));
retNode = assign;
break;
}
// Implement ptr value getter for ByReference struct.
case NI_System_ByReference_get_Value:
{
GenTree* op1 = impPopStack().val;
CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0);
GenTree* field = gtNewFieldRef(TYP_BYREF, fldHnd, op1, 0);
retNode = field;
break;
}
case NI_System_Runtime_CompilerServices_RuntimeHelpers_CreateSpan:
{
retNode = impCreateSpanIntrinsic(sig);
break;
}
case NI_System_Runtime_CompilerServices_RuntimeHelpers_InitializeArray:
{
retNode = impInitializeArrayIntrinsic(sig);
break;
}
case NI_System_Runtime_CompilerServices_RuntimeHelpers_IsKnownConstant:
{
GenTree* op1 = impPopStack().val;
if (op1->OperIsConst())
{
// op1 is a known constant, replace with 'true'.
retNode = gtNewIconNode(1);
JITDUMP("\nExpanding RuntimeHelpers.IsKnownConstant to true early\n");
// We can also consider FTN_ADDR and typeof(T) here
}
else
{
// op1 is not a known constant, we'll do the expansion in morph
retNode = new (this, GT_INTRINSIC) GenTreeIntrinsic(TYP_INT, op1, ni, method);
JITDUMP("\nConverting RuntimeHelpers.IsKnownConstant to:\n");
DISPTREE(retNode);
}
break;
}
case NI_System_Activator_AllocatorOf:
case NI_System_Activator_DefaultConstructorOf:
case NI_System_Object_MethodTableOf:
case NI_System_EETypePtr_EETypePtrOf:
{
assert(IsTargetAbi(CORINFO_CORERT_ABI)); // Only CoreRT supports it.
CORINFO_RESOLVED_TOKEN resolvedToken;
resolvedToken.tokenContext = impTokenLookupContextHandle;
resolvedToken.tokenScope = info.compScopeHnd;
resolvedToken.token = memberRef;
resolvedToken.tokenType = CORINFO_TOKENKIND_Method;
CORINFO_GENERICHANDLE_RESULT embedInfo;
info.compCompHnd->expandRawHandleIntrinsic(&resolvedToken, &embedInfo);
GenTree* rawHandle = impLookupToTree(&resolvedToken, &embedInfo.lookup, gtTokenToIconFlags(memberRef),
embedInfo.compileTimeHandle);
if (rawHandle == nullptr)
{
return nullptr;
}
noway_assert(genTypeSize(rawHandle->TypeGet()) == genTypeSize(TYP_I_IMPL));
unsigned rawHandleSlot = lvaGrabTemp(true DEBUGARG("rawHandle"));
impAssignTempGen(rawHandleSlot, rawHandle, clsHnd, (unsigned)CHECK_SPILL_NONE);
GenTree* lclVar = gtNewLclvNode(rawHandleSlot, TYP_I_IMPL);
GenTree* lclVarAddr = gtNewOperNode(GT_ADDR, TYP_I_IMPL, lclVar);
var_types resultType = JITtype2varType(sig->retType);
retNode = gtNewOperNode(GT_IND, resultType, lclVarAddr);
break;
}
case NI_System_Span_get_Item:
case NI_System_ReadOnlySpan_get_Item:
{
// Have index, stack pointer-to Span<T> s on the stack. Expand to:
//
// For Span<T>
// Comma
// BoundsCheck(index, s->_length)
// s->_pointer + index * sizeof(T)
//
// For ReadOnlySpan<T> -- same expansion, as it now returns a readonly ref
//
// Signature should show one class type parameter, which
// we need to examine.
assert(sig->sigInst.classInstCount == 1);
assert(sig->numArgs == 1);
CORINFO_CLASS_HANDLE spanElemHnd = sig->sigInst.classInst[0];
const unsigned elemSize = info.compCompHnd->getClassSize(spanElemHnd);
assert(elemSize > 0);
const bool isReadOnly = (ni == NI_System_ReadOnlySpan_get_Item);
JITDUMP("\nimpIntrinsic: Expanding %sSpan<T>.get_Item, T=%s, sizeof(T)=%u\n",
isReadOnly ? "ReadOnly" : "", eeGetClassName(spanElemHnd), elemSize);
GenTree* index = impPopStack().val;
GenTree* ptrToSpan = impPopStack().val;
GenTree* indexClone = nullptr;
GenTree* ptrToSpanClone = nullptr;
assert(genActualType(index) == TYP_INT);
assert(ptrToSpan->TypeGet() == TYP_BYREF);
#if defined(DEBUG)
if (verbose)
{
printf("with ptr-to-span\n");
gtDispTree(ptrToSpan);
printf("and index\n");
gtDispTree(index);
}
#endif // defined(DEBUG)
// We need to use both index and ptr-to-span twice, so clone or spill.
index = impCloneExpr(index, &indexClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Span.get_Item index"));
ptrToSpan = impCloneExpr(ptrToSpan, &ptrToSpanClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Span.get_Item ptrToSpan"));
// Bounds check
CORINFO_FIELD_HANDLE lengthHnd = info.compCompHnd->getFieldInClass(clsHnd, 1);
const unsigned lengthOffset = info.compCompHnd->getFieldOffset(lengthHnd);
GenTree* length = gtNewFieldRef(TYP_INT, lengthHnd, ptrToSpan, lengthOffset);
GenTree* boundsCheck = new (this, GT_BOUNDS_CHECK) GenTreeBoundsChk(index, length, SCK_RNGCHK_FAIL);
// Element access
index = indexClone;
#ifdef TARGET_64BIT
if (index->OperGet() == GT_CNS_INT)
{
index->gtType = TYP_I_IMPL;
}
else
{
index = gtNewCastNode(TYP_I_IMPL, index, true, TYP_I_IMPL);
}
#endif
if (elemSize != 1)
{
GenTree* sizeofNode = gtNewIconNode(static_cast<ssize_t>(elemSize), TYP_I_IMPL);
index = gtNewOperNode(GT_MUL, TYP_I_IMPL, index, sizeofNode);
}
CORINFO_FIELD_HANDLE ptrHnd = info.compCompHnd->getFieldInClass(clsHnd, 0);
const unsigned ptrOffset = info.compCompHnd->getFieldOffset(ptrHnd);
GenTree* data = gtNewFieldRef(TYP_BYREF, ptrHnd, ptrToSpanClone, ptrOffset);
GenTree* result = gtNewOperNode(GT_ADD, TYP_BYREF, data, index);
// Prepare result
var_types resultType = JITtype2varType(sig->retType);
assert(resultType == result->TypeGet());
retNode = gtNewOperNode(GT_COMMA, resultType, boundsCheck, result);
break;
}
case NI_System_RuntimeTypeHandle_GetValueInternal:
{
GenTree* op1 = impStackTop(0).val;
if (op1->gtOper == GT_CALL && (op1->AsCall()->gtCallType == CT_HELPER) &&
gtIsTypeHandleToRuntimeTypeHandleHelper(op1->AsCall()))
{
// Old tree
// Helper-RuntimeTypeHandle -> TreeToGetNativeTypeHandle
//
// New tree
// TreeToGetNativeTypeHandle
// Remove call to helper and return the native TypeHandle pointer that was the parameter
// to that helper.
op1 = impPopStack().val;
// Get native TypeHandle argument to old helper
GenTreeCall::Use* arg = op1->AsCall()->gtCallArgs;
assert(arg->GetNext() == nullptr);
op1 = arg->GetNode();
retNode = op1;
}
// Call the regular function.
break;
}
case NI_System_Type_GetTypeFromHandle:
{
GenTree* op1 = impStackTop(0).val;
CorInfoHelpFunc typeHandleHelper;
if (op1->gtOper == GT_CALL && (op1->AsCall()->gtCallType == CT_HELPER) &&
gtIsTypeHandleToRuntimeTypeHandleHelper(op1->AsCall(), &typeHandleHelper))
{
op1 = impPopStack().val;
// Replace helper with a more specialized helper that returns RuntimeType
if (typeHandleHelper == CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE)
{
typeHandleHelper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE;
}
else
{
assert(typeHandleHelper == CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE_MAYBENULL);
typeHandleHelper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL;
}
assert(op1->AsCall()->gtCallArgs->GetNext() == nullptr);
op1 = gtNewHelperCallNode(typeHandleHelper, TYP_REF, op1->AsCall()->gtCallArgs);
op1->gtType = TYP_REF;
retNode = op1;
}
break;
}
case NI_System_Type_op_Equality:
case NI_System_Type_op_Inequality:
{
JITDUMP("Importing Type.op_*Equality intrinsic\n");
GenTree* op1 = impStackTop(1).val;
GenTree* op2 = impStackTop(0).val;
GenTree* optTree = gtFoldTypeEqualityCall(ni == NI_System_Type_op_Equality, op1, op2);
if (optTree != nullptr)
{
// Success, clean up the evaluation stack.
impPopStack();
impPopStack();
// See if we can optimize even further, to a handle compare.
optTree = gtFoldTypeCompare(optTree);
// See if we can now fold a handle compare to a constant.
optTree = gtFoldExpr(optTree);
retNode = optTree;
}
else
{
// Retry optimizing these later
isSpecial = true;
}
break;
}
case NI_System_Enum_HasFlag:
{
GenTree* thisOp = impStackTop(1).val;
GenTree* flagOp = impStackTop(0).val;
GenTree* optTree = gtOptimizeEnumHasFlag(thisOp, flagOp);
if (optTree != nullptr)
{
// Optimization successful. Pop the stack for real.
impPopStack();
impPopStack();
retNode = optTree;
}
else
{
// Retry optimizing this during morph.
isSpecial = true;
}
break;
}
case NI_System_Type_IsAssignableFrom:
{
GenTree* typeTo = impStackTop(1).val;
GenTree* typeFrom = impStackTop(0).val;
retNode = impTypeIsAssignable(typeTo, typeFrom);
break;
}
case NI_System_Type_IsAssignableTo:
{
GenTree* typeTo = impStackTop(0).val;
GenTree* typeFrom = impStackTop(1).val;
retNode = impTypeIsAssignable(typeTo, typeFrom);
break;
}
case NI_System_Type_get_IsValueType:
{
// Optimize
//
// call Type.GetTypeFromHandle (which is replaced with CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE)
// call Type.IsValueType
//
// to `true` or `false`
// e.g. `typeof(int).IsValueType` => `true`
if (impStackTop().val->IsCall())
{
GenTreeCall* call = impStackTop().val->AsCall();
if (call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE))
{
CORINFO_CLASS_HANDLE hClass = gtGetHelperArgClassHandle(call->gtCallArgs->GetNode());
if (hClass != NO_CLASS_HANDLE)
{
retNode =
gtNewIconNode((eeIsValueClass(hClass) &&
// pointers are not value types (e.g. typeof(int*).IsValueType is false)
info.compCompHnd->asCorInfoType(hClass) != CORINFO_TYPE_PTR)
? 1
: 0);
impPopStack(); // drop CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE call
}
}
}
break;
}
case NI_System_Threading_Thread_get_ManagedThreadId:
{
if (impStackTop().val->OperIs(GT_RET_EXPR))
{
GenTreeCall* call = impStackTop().val->AsRetExpr()->gtInlineCandidate->AsCall();
if (call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC)
{
if (lookupNamedIntrinsic(call->gtCallMethHnd) == NI_System_Threading_Thread_get_CurrentThread)
{
// drop get_CurrentThread() call
impPopStack();
call->ReplaceWith(gtNewNothingNode(), this);
retNode = gtNewHelperCallNode(CORINFO_HELP_GETCURRENTMANAGEDTHREADID, TYP_INT);
}
}
}
break;
}
#ifdef TARGET_ARM64
// Intrinsify Interlocked.Or and Interlocked.And only for arm64-v8.1 (and newer)
// TODO-CQ: Implement for XArch (https://github.com/dotnet/runtime/issues/32239).
case NI_System_Threading_Interlocked_Or:
case NI_System_Threading_Interlocked_And:
{
if (compOpportunisticallyDependsOn(InstructionSet_Atomics))
{
assert(sig->numArgs == 2);
GenTree* op2 = impPopStack().val;
GenTree* op1 = impPopStack().val;
genTreeOps op = (ni == NI_System_Threading_Interlocked_Or) ? GT_XORR : GT_XAND;
retNode = gtNewOperNode(op, genActualType(callType), op1, op2);
retNode->gtFlags |= GTF_GLOB_REF | GTF_ASG;
}
break;
}
#endif // TARGET_ARM64
#if defined(TARGET_XARCH) || defined(TARGET_ARM64)
// TODO-ARM-CQ: reenable treating InterlockedCmpXchg32 operation as intrinsic
case NI_System_Threading_Interlocked_CompareExchange:
{
var_types retType = JITtype2varType(sig->retType);
if ((retType == TYP_LONG) && (TARGET_POINTER_SIZE == 4))
{
break;
}
if ((retType != TYP_INT) && (retType != TYP_LONG))
{
break;
}
assert(callType != TYP_STRUCT);
assert(sig->numArgs == 3);
GenTree* op3 = impPopStack().val; // comparand
GenTree* op2 = impPopStack().val; // value
GenTree* op1 = impPopStack().val; // location
GenTree* node = new (this, GT_CMPXCHG) GenTreeCmpXchg(genActualType(callType), op1, op2, op3);
node->AsCmpXchg()->gtOpLocation->gtFlags |= GTF_DONT_CSE;
retNode = node;
break;
}
case NI_System_Threading_Interlocked_Exchange:
case NI_System_Threading_Interlocked_ExchangeAdd:
{
assert(callType != TYP_STRUCT);
assert(sig->numArgs == 2);
var_types retType = JITtype2varType(sig->retType);
if ((retType == TYP_LONG) && (TARGET_POINTER_SIZE == 4))
{
break;
}
if ((retType != TYP_INT) && (retType != TYP_LONG))
{
break;
}
GenTree* op2 = impPopStack().val;
GenTree* op1 = impPopStack().val;
// This creates:
// val
// XAdd
// addr
// field (for example)
//
// In the case where the first argument is the address of a local, we might
// want to make this *not* make the var address-taken -- but atomic instructions
// on a local are probably pretty useless anyway, so we probably don't care.
op1 = gtNewOperNode(ni == NI_System_Threading_Interlocked_ExchangeAdd ? GT_XADD : GT_XCHG,
genActualType(callType), op1, op2);
op1->gtFlags |= GTF_GLOB_REF | GTF_ASG;
retNode = op1;
break;
}
#endif // defined(TARGET_XARCH) || defined(TARGET_ARM64)
case NI_System_Threading_Interlocked_MemoryBarrier:
case NI_System_Threading_Interlocked_ReadMemoryBarrier:
{
assert(sig->numArgs == 0);
GenTree* op1 = new (this, GT_MEMORYBARRIER) GenTree(GT_MEMORYBARRIER, TYP_VOID);
op1->gtFlags |= GTF_GLOB_REF | GTF_ASG;
// On XARCH `NI_System_Threading_Interlocked_ReadMemoryBarrier` fences need not be emitted.
// However, we still need to capture the effect on reordering.
if (ni == NI_System_Threading_Interlocked_ReadMemoryBarrier)
{
op1->gtFlags |= GTF_MEMORYBARRIER_LOAD;
}
retNode = op1;
break;
}
#ifdef FEATURE_HW_INTRINSICS
case NI_System_Math_FusedMultiplyAdd:
{
#ifdef TARGET_XARCH
if (compExactlyDependsOn(InstructionSet_FMA))
{
assert(varTypeIsFloating(callType));
// We are constructing a chain of intrinsics similar to:
// return FMA.MultiplyAddScalar(
// Vector128.CreateScalarUnsafe(x),
// Vector128.CreateScalarUnsafe(y),
// Vector128.CreateScalarUnsafe(z)
// ).ToScalar();
GenTree* op3 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, impPopStack().val,
NI_Vector128_CreateScalarUnsafe, callJitType, 16);
GenTree* op2 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, impPopStack().val,
NI_Vector128_CreateScalarUnsafe, callJitType, 16);
GenTree* op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, impPopStack().val,
NI_Vector128_CreateScalarUnsafe, callJitType, 16);
GenTree* res =
gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, op2, op3, NI_FMA_MultiplyAddScalar, callJitType, 16);
retNode = gtNewSimdHWIntrinsicNode(callType, res, NI_Vector128_ToScalar, callJitType, 16);
break;
}
#elif defined(TARGET_ARM64)
if (compExactlyDependsOn(InstructionSet_AdvSimd))
{
assert(varTypeIsFloating(callType));
// We are constructing a chain of intrinsics similar to:
// return AdvSimd.FusedMultiplyAddScalar(
// Vector64.Create{ScalarUnsafe}(z),
// Vector64.Create{ScalarUnsafe}(y),
// Vector64.Create{ScalarUnsafe}(x)
// ).ToScalar();
NamedIntrinsic createVector64 =
(callType == TYP_DOUBLE) ? NI_Vector64_Create : NI_Vector64_CreateScalarUnsafe;
constexpr unsigned int simdSize = 8;
GenTree* op3 =
gtNewSimdHWIntrinsicNode(TYP_SIMD8, impPopStack().val, createVector64, callJitType, simdSize);
GenTree* op2 =
gtNewSimdHWIntrinsicNode(TYP_SIMD8, impPopStack().val, createVector64, callJitType, simdSize);
GenTree* op1 =
gtNewSimdHWIntrinsicNode(TYP_SIMD8, impPopStack().val, createVector64, callJitType, simdSize);
// Note that AdvSimd.FusedMultiplyAddScalar(op1,op2,op3) corresponds to op1 + op2 * op3
// while Math{F}.FusedMultiplyAddScalar(op1,op2,op3) corresponds to op1 * op2 + op3
retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op3, op2, op1, NI_AdvSimd_FusedMultiplyAddScalar,
callJitType, simdSize);
retNode = gtNewSimdHWIntrinsicNode(callType, retNode, NI_Vector64_ToScalar, callJitType, simdSize);
break;
}
#endif
// TODO-CQ-XArch: Ideally we would create a GT_INTRINSIC node for fma, however, that currently
// requires more extensive changes to valuenum to support methods with 3 operands
// We want to generate a GT_INTRINSIC node in the case the call can't be treated as
// a target intrinsic so that we can still benefit from CSE and constant folding.
break;
}
#endif // FEATURE_HW_INTRINSICS
case NI_System_Math_Abs:
case NI_System_Math_Acos:
case NI_System_Math_Acosh:
case NI_System_Math_Asin:
case NI_System_Math_Asinh:
case NI_System_Math_Atan:
case NI_System_Math_Atanh:
case NI_System_Math_Atan2:
case NI_System_Math_Cbrt:
case NI_System_Math_Ceiling:
case NI_System_Math_Cos:
case NI_System_Math_Cosh:
case NI_System_Math_Exp:
case NI_System_Math_Floor:
case NI_System_Math_FMod:
case NI_System_Math_ILogB:
case NI_System_Math_Log:
case NI_System_Math_Log2:
case NI_System_Math_Log10:
#ifdef TARGET_ARM64
// ARM64 has fmax/fmin which are IEEE754:2019 minimum/maximum compatible
// TODO-XARCH-CQ: Enable this for XARCH when one of the arguments is a constant
// so we can then emit maxss/minss and avoid NaN/-0.0 handling
case NI_System_Math_Max:
case NI_System_Math_Min:
#endif
case NI_System_Math_Pow:
case NI_System_Math_Round:
case NI_System_Math_Sin:
case NI_System_Math_Sinh:
case NI_System_Math_Sqrt:
case NI_System_Math_Tan:
case NI_System_Math_Tanh:
case NI_System_Math_Truncate:
{
retNode = impMathIntrinsic(method, sig, callType, ni, tailCall);
break;
}
case NI_System_Array_Clone:
case NI_System_Collections_Generic_Comparer_get_Default:
case NI_System_Collections_Generic_EqualityComparer_get_Default:
case NI_System_Object_MemberwiseClone:
case NI_System_Threading_Thread_get_CurrentThread:
{
// Flag for later handling.
isSpecial = true;
break;
}
case NI_System_Object_GetType:
{
JITDUMP("\n impIntrinsic: call to Object.GetType\n");
GenTree* op1 = impStackTop(0).val;
// If we're calling GetType on a boxed value, just get the type directly.
if (op1->IsBoxedValue())
{
JITDUMP("Attempting to optimize box(...).getType() to direct type construction\n");
// Try and clean up the box. Obtain the handle we
// were going to pass to the newobj.
GenTree* boxTypeHandle = gtTryRemoveBoxUpstreamEffects(op1, BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE);
if (boxTypeHandle != nullptr)
{
// Note we don't need to play the TYP_STRUCT games here like
// do for LDTOKEN since the return value of this operator is Type,
// not RuntimeTypeHandle.
impPopStack();
GenTreeCall::Use* helperArgs = gtNewCallArgs(boxTypeHandle);
GenTree* runtimeType =
gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE, TYP_REF, helperArgs);
retNode = runtimeType;
}
}
// If we have a constrained callvirt with a "box this" transform
// we know we have a value class and hence an exact type.
//
// If so, instead of boxing and then extracting the type, just
// construct the type directly.
if ((retNode == nullptr) && (pConstrainedResolvedToken != nullptr) &&
(constraintCallThisTransform == CORINFO_BOX_THIS))
{
// Ensure this is one of the is simple box cases (in particular, rule out nullables).
const CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pConstrainedResolvedToken->hClass);
const bool isSafeToOptimize = (boxHelper == CORINFO_HELP_BOX);
if (isSafeToOptimize)
{
JITDUMP("Optimizing constrained box-this obj.getType() to direct type construction\n");
impPopStack();
GenTree* typeHandleOp =
impTokenToHandle(pConstrainedResolvedToken, nullptr, true /* mustRestoreHandle */);
if (typeHandleOp == nullptr)
{
assert(compDonotInline());
return nullptr;
}
GenTreeCall::Use* helperArgs = gtNewCallArgs(typeHandleOp);
GenTree* runtimeType =
gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE, TYP_REF, helperArgs);
retNode = runtimeType;
}
}
#ifdef DEBUG
if (retNode != nullptr)
{
JITDUMP("Optimized result for call to GetType is\n");
if (verbose)
{
gtDispTree(retNode);
}
}
#endif
// Else expand as an intrinsic, unless the call is constrained,
// in which case we defer expansion to allow impImportCall do the
// special constraint processing.
if ((retNode == nullptr) && (pConstrainedResolvedToken == nullptr))
{
JITDUMP("Expanding as special intrinsic\n");
impPopStack();
op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, ni, method);
// Set the CALL flag to indicate that the operator is implemented by a call.
// Set also the EXCEPTION flag because the native implementation of
// NI_System_Object_GetType intrinsic can throw NullReferenceException.
op1->gtFlags |= (GTF_CALL | GTF_EXCEPT);
retNode = op1;
// Might be further optimizable, so arrange to leave a mark behind
isSpecial = true;
}
if (retNode == nullptr)
{
JITDUMP("Leaving as normal call\n");
// Might be further optimizable, so arrange to leave a mark behind
isSpecial = true;
}
break;
}
case NI_System_Array_GetLength:
case NI_System_Array_GetLowerBound:
case NI_System_Array_GetUpperBound:
{
// System.Array.GetLength(Int32) method:
// public int GetLength(int dimension)
// System.Array.GetLowerBound(Int32) method:
// public int GetLowerBound(int dimension)
// System.Array.GetUpperBound(Int32) method:
// public int GetUpperBound(int dimension)
//
// Only implement these as intrinsics for multi-dimensional arrays.
// Only handle constant dimension arguments.
GenTree* gtDim = impStackTop().val;
GenTree* gtArr = impStackTop(1).val;
if (gtDim->IsIntegralConst())
{
bool isExact = false;
bool isNonNull = false;
CORINFO_CLASS_HANDLE arrCls = gtGetClassHandle(gtArr, &isExact, &isNonNull);
if (arrCls != NO_CLASS_HANDLE)
{
unsigned rank = info.compCompHnd->getArrayRank(arrCls);
if ((rank > 1) && !info.compCompHnd->isSDArray(arrCls))
{
// `rank` is guaranteed to be <=32 (see MAX_RANK in vm\array.h). Any constant argument
// is `int` sized.
INT64 dimValue = gtDim->AsIntConCommon()->IntegralValue();
assert((unsigned int)dimValue == dimValue);
unsigned dim = (unsigned int)dimValue;
if (dim < rank)
{
// This is now known to be a multi-dimension array with a constant dimension
// that is in range; we can expand it as an intrinsic.
impPopStack().val; // Pop the dim and array object; we already have a pointer to them.
impPopStack().val;
// Make sure there are no global effects in the array (such as it being a function
// call), so we can mark the generated indirection with GTF_IND_INVARIANT. In the
// GetUpperBound case we need the cloned object, since we refer to the array
// object twice. In the other cases, we don't need to clone.
GenTree* gtArrClone = nullptr;
if (((gtArr->gtFlags & GTF_GLOB_EFFECT) != 0) || (ni == NI_System_Array_GetUpperBound))
{
gtArr = impCloneExpr(gtArr, >ArrClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("MD intrinsics array"));
}
switch (ni)
{
case NI_System_Array_GetLength:
{
// Generate *(array + offset-to-length-array + sizeof(int) * dim)
unsigned offs = eeGetMDArrayLengthOffset(rank, dim);
GenTree* gtOffs = gtNewIconNode(offs, TYP_I_IMPL);
GenTree* gtAddr = gtNewOperNode(GT_ADD, TYP_BYREF, gtArr, gtOffs);
retNode = gtNewIndir(TYP_INT, gtAddr);
retNode->gtFlags |= GTF_IND_INVARIANT;
break;
}
case NI_System_Array_GetLowerBound:
{
// Generate *(array + offset-to-bounds-array + sizeof(int) * dim)
unsigned offs = eeGetMDArrayLowerBoundOffset(rank, dim);
GenTree* gtOffs = gtNewIconNode(offs, TYP_I_IMPL);
GenTree* gtAddr = gtNewOperNode(GT_ADD, TYP_BYREF, gtArr, gtOffs);
retNode = gtNewIndir(TYP_INT, gtAddr);
retNode->gtFlags |= GTF_IND_INVARIANT;
break;
}
case NI_System_Array_GetUpperBound:
{
assert(gtArrClone != nullptr);
// Generate:
// *(array + offset-to-length-array + sizeof(int) * dim) +
// *(array + offset-to-bounds-array + sizeof(int) * dim) - 1
unsigned offs = eeGetMDArrayLowerBoundOffset(rank, dim);
GenTree* gtOffs = gtNewIconNode(offs, TYP_I_IMPL);
GenTree* gtAddr = gtNewOperNode(GT_ADD, TYP_BYREF, gtArr, gtOffs);
GenTree* gtLowerBound = gtNewIndir(TYP_INT, gtAddr);
gtLowerBound->gtFlags |= GTF_IND_INVARIANT;
offs = eeGetMDArrayLengthOffset(rank, dim);
gtOffs = gtNewIconNode(offs, TYP_I_IMPL);
gtAddr = gtNewOperNode(GT_ADD, TYP_BYREF, gtArrClone, gtOffs);
GenTree* gtLength = gtNewIndir(TYP_INT, gtAddr);
gtLength->gtFlags |= GTF_IND_INVARIANT;
GenTree* gtSum = gtNewOperNode(GT_ADD, TYP_INT, gtLowerBound, gtLength);
GenTree* gtOne = gtNewIconNode(1, TYP_INT);
retNode = gtNewOperNode(GT_SUB, TYP_INT, gtSum, gtOne);
break;
}
default:
unreached();
}
}
}
}
}
break;
}
case NI_System_Buffers_Binary_BinaryPrimitives_ReverseEndianness:
{
assert(sig->numArgs == 1);
// We expect the return type of the ReverseEndianness routine to match the type of the
// one and only argument to the method. We use a special instruction for 16-bit
// BSWAPs since on x86 processors this is implemented as ROR <16-bit reg>, 8. Additionally,
// we only emit 64-bit BSWAP instructions on 64-bit archs; if we're asked to perform a
// 64-bit byte swap on a 32-bit arch, we'll fall to the default case in the switch block below.
switch (sig->retType)
{
case CorInfoType::CORINFO_TYPE_SHORT:
case CorInfoType::CORINFO_TYPE_USHORT:
retNode = gtNewCastNode(TYP_INT, gtNewOperNode(GT_BSWAP16, TYP_INT, impPopStack().val), false,
callType);
break;
case CorInfoType::CORINFO_TYPE_INT:
case CorInfoType::CORINFO_TYPE_UINT:
#ifdef TARGET_64BIT
case CorInfoType::CORINFO_TYPE_LONG:
case CorInfoType::CORINFO_TYPE_ULONG:
#endif // TARGET_64BIT
retNode = gtNewOperNode(GT_BSWAP, callType, impPopStack().val);
break;
default:
// This default case gets hit on 32-bit archs when a call to a 64-bit overload
// of ReverseEndianness is encountered. In that case we'll let JIT treat this as a standard
// method call, where the implementation decomposes the operation into two 32-bit
// bswap routines. If the input to the 64-bit function is a constant, then we rely
// on inlining + constant folding of 32-bit bswaps to effectively constant fold
// the 64-bit call site.
break;
}
break;
}
// Fold PopCount for constant input
case NI_System_Numerics_BitOperations_PopCount:
{
assert(sig->numArgs == 1);
if (impStackTop().val->IsIntegralConst())
{
typeInfo argType = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack();
INT64 cns = impPopStack().val->AsIntConCommon()->IntegralValue();
if (argType.IsType(TI_LONG))
{
retNode = gtNewIconNode(genCountBits(cns), callType);
}
else
{
assert(argType.IsType(TI_INT));
retNode = gtNewIconNode(genCountBits(static_cast<unsigned>(cns)), callType);
}
}
break;
}
case NI_System_GC_KeepAlive:
{
retNode = impKeepAliveIntrinsic(impPopStack().val);
break;
}
default:
break;
}
}
if (mustExpand && (retNode == nullptr))
{
assert(!"Unhandled must expand intrinsic, throwing PlatformNotSupportedException");
return impUnsupportedNamedIntrinsic(CORINFO_HELP_THROW_PLATFORM_NOT_SUPPORTED, method, sig, mustExpand);
}
// Optionally report if this intrinsic is special
// (that is, potentially re-optimizable during morph).
if (isSpecialIntrinsic != nullptr)
{
*isSpecialIntrinsic = isSpecial;
}
return retNode;
}
GenTree* Compiler::impTypeIsAssignable(GenTree* typeTo, GenTree* typeFrom)
{
// Optimize patterns like:
//
// typeof(TTo).IsAssignableFrom(typeof(TTFrom))
// valueTypeVar.GetType().IsAssignableFrom(typeof(TTFrom))
// typeof(TTFrom).IsAssignableTo(typeof(TTo))
// typeof(TTFrom).IsAssignableTo(valueTypeVar.GetType())
//
// to true/false
if (typeTo->IsCall() && typeFrom->IsCall())
{
// make sure both arguments are `typeof()`
CORINFO_METHOD_HANDLE hTypeof = eeFindHelper(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE);
if ((typeTo->AsCall()->gtCallMethHnd == hTypeof) && (typeFrom->AsCall()->gtCallMethHnd == hTypeof))
{
CORINFO_CLASS_HANDLE hClassTo = gtGetHelperArgClassHandle(typeTo->AsCall()->gtCallArgs->GetNode());
CORINFO_CLASS_HANDLE hClassFrom = gtGetHelperArgClassHandle(typeFrom->AsCall()->gtCallArgs->GetNode());
if (hClassTo == NO_CLASS_HANDLE || hClassFrom == NO_CLASS_HANDLE)
{
return nullptr;
}
TypeCompareState castResult = info.compCompHnd->compareTypesForCast(hClassFrom, hClassTo);
if (castResult == TypeCompareState::May)
{
// requires runtime check
// e.g. __Canon, COMObjects, Nullable
return nullptr;
}
GenTreeIntCon* retNode = gtNewIconNode((castResult == TypeCompareState::Must) ? 1 : 0);
impPopStack(); // drop both CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE calls
impPopStack();
return retNode;
}
}
return nullptr;
}
GenTree* Compiler::impMathIntrinsic(CORINFO_METHOD_HANDLE method,
CORINFO_SIG_INFO* sig,
var_types callType,
NamedIntrinsic intrinsicName,
bool tailCall)
{
GenTree* op1;
GenTree* op2;
assert(callType != TYP_STRUCT);
assert(IsMathIntrinsic(intrinsicName));
op1 = nullptr;
#if !defined(TARGET_X86)
// Intrinsics that are not implemented directly by target instructions will
// be re-materialized as users calls in rationalizer. For prefixed tail calls,
// don't do this optimization, because
// a) For back compatibility reasons on desktop .NET Framework 4.6 / 4.6.1
// b) It will be non-trivial task or too late to re-materialize a surviving
// tail prefixed GT_INTRINSIC as tail call in rationalizer.
if (!IsIntrinsicImplementedByUserCall(intrinsicName) || !tailCall)
#else
// On x86 RyuJIT, importing intrinsics that are implemented as user calls can cause incorrect calculation
// of the depth of the stack if these intrinsics are used as arguments to another call. This causes bad
// code generation for certain EH constructs.
if (!IsIntrinsicImplementedByUserCall(intrinsicName))
#endif
{
CORINFO_CLASS_HANDLE tmpClass;
CORINFO_ARG_LIST_HANDLE arg;
var_types op1Type;
var_types op2Type;
switch (sig->numArgs)
{
case 1:
op1 = impPopStack().val;
arg = sig->args;
op1Type = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg, &tmpClass)));
if (op1->TypeGet() != genActualType(op1Type))
{
assert(varTypeIsFloating(op1));
op1 = gtNewCastNode(callType, op1, false, callType);
}
op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicName, method);
break;
case 2:
op2 = impPopStack().val;
op1 = impPopStack().val;
arg = sig->args;
op1Type = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg, &tmpClass)));
if (op1->TypeGet() != genActualType(op1Type))
{
assert(varTypeIsFloating(op1));
op1 = gtNewCastNode(callType, op1, false, callType);
}
arg = info.compCompHnd->getArgNext(arg);
op2Type = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg, &tmpClass)));
if (op2->TypeGet() != genActualType(op2Type))
{
assert(varTypeIsFloating(op2));
op2 = gtNewCastNode(callType, op2, false, callType);
}
op1 =
new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, op2, intrinsicName, method);
break;
default:
NO_WAY("Unsupported number of args for Math Intrinsic");
}
if (IsIntrinsicImplementedByUserCall(intrinsicName))
{
op1->gtFlags |= GTF_CALL;
}
}
return op1;
}
//------------------------------------------------------------------------
// lookupNamedIntrinsic: map method to jit named intrinsic value
//
// Arguments:
// method -- method handle for method
//
// Return Value:
// Id for the named intrinsic, or Illegal if none.
//
// Notes:
// method should have CORINFO_FLG_INTRINSIC set in its attributes,
// otherwise it is not a named jit intrinsic.
//
NamedIntrinsic Compiler::lookupNamedIntrinsic(CORINFO_METHOD_HANDLE method)
{
const char* className = nullptr;
const char* namespaceName = nullptr;
const char* enclosingClassName = nullptr;
const char* methodName =
info.compCompHnd->getMethodNameFromMetadata(method, &className, &namespaceName, &enclosingClassName);
JITDUMP("Named Intrinsic ");
if (namespaceName != nullptr)
{
JITDUMP("%s.", namespaceName);
}
if (enclosingClassName != nullptr)
{
JITDUMP("%s.", enclosingClassName);
}
if (className != nullptr)
{
JITDUMP("%s.", className);
}
if (methodName != nullptr)
{
JITDUMP("%s", methodName);
}
if ((namespaceName == nullptr) || (className == nullptr) || (methodName == nullptr))
{
// Check if we are dealing with an MD array's known runtime method
CorInfoArrayIntrinsic arrayFuncIndex = info.compCompHnd->getArrayIntrinsicID(method);
switch (arrayFuncIndex)
{
case CorInfoArrayIntrinsic::GET:
JITDUMP("ARRAY_FUNC_GET: Recognized\n");
return NI_Array_Get;
case CorInfoArrayIntrinsic::SET:
JITDUMP("ARRAY_FUNC_SET: Recognized\n");
return NI_Array_Set;
case CorInfoArrayIntrinsic::ADDRESS:
JITDUMP("ARRAY_FUNC_ADDRESS: Recognized\n");
return NI_Array_Address;
default:
break;
}
JITDUMP(": Not recognized, not enough metadata\n");
return NI_Illegal;
}
JITDUMP(": ");
NamedIntrinsic result = NI_Illegal;
if (strcmp(namespaceName, "System") == 0)
{
if ((strcmp(className, "Enum") == 0) && (strcmp(methodName, "HasFlag") == 0))
{
result = NI_System_Enum_HasFlag;
}
else if (strcmp(className, "Activator") == 0)
{
if (strcmp(methodName, "AllocatorOf") == 0)
{
result = NI_System_Activator_AllocatorOf;
}
else if (strcmp(methodName, "DefaultConstructorOf") == 0)
{
result = NI_System_Activator_DefaultConstructorOf;
}
}
else if (strcmp(className, "ByReference`1") == 0)
{
if (strcmp(methodName, ".ctor") == 0)
{
result = NI_System_ByReference_ctor;
}
else if (strcmp(methodName, "get_Value") == 0)
{
result = NI_System_ByReference_get_Value;
}
}
else if (strcmp(className, "Math") == 0 || strcmp(className, "MathF") == 0)
{
if (strcmp(methodName, "Abs") == 0)
{
result = NI_System_Math_Abs;
}
else if (strcmp(methodName, "Acos") == 0)
{
result = NI_System_Math_Acos;
}
else if (strcmp(methodName, "Acosh") == 0)
{
result = NI_System_Math_Acosh;
}
else if (strcmp(methodName, "Asin") == 0)
{
result = NI_System_Math_Asin;
}
else if (strcmp(methodName, "Asinh") == 0)
{
result = NI_System_Math_Asinh;
}
else if (strcmp(methodName, "Atan") == 0)
{
result = NI_System_Math_Atan;
}
else if (strcmp(methodName, "Atanh") == 0)
{
result = NI_System_Math_Atanh;
}
else if (strcmp(methodName, "Atan2") == 0)
{
result = NI_System_Math_Atan2;
}
else if (strcmp(methodName, "Cbrt") == 0)
{
result = NI_System_Math_Cbrt;
}
else if (strcmp(methodName, "Ceiling") == 0)
{
result = NI_System_Math_Ceiling;
}
else if (strcmp(methodName, "Cos") == 0)
{
result = NI_System_Math_Cos;
}
else if (strcmp(methodName, "Cosh") == 0)
{
result = NI_System_Math_Cosh;
}
else if (strcmp(methodName, "Exp") == 0)
{
result = NI_System_Math_Exp;
}
else if (strcmp(methodName, "Floor") == 0)
{
result = NI_System_Math_Floor;
}
else if (strcmp(methodName, "FMod") == 0)
{
result = NI_System_Math_FMod;
}
else if (strcmp(methodName, "FusedMultiplyAdd") == 0)
{
result = NI_System_Math_FusedMultiplyAdd;
}
else if (strcmp(methodName, "ILogB") == 0)
{
result = NI_System_Math_ILogB;
}
else if (strcmp(methodName, "Log") == 0)
{
result = NI_System_Math_Log;
}
else if (strcmp(methodName, "Log2") == 0)
{
result = NI_System_Math_Log2;
}
else if (strcmp(methodName, "Log10") == 0)
{
result = NI_System_Math_Log10;
}
else if (strcmp(methodName, "Max") == 0)
{
result = NI_System_Math_Max;
}
else if (strcmp(methodName, "Min") == 0)
{
result = NI_System_Math_Min;
}
else if (strcmp(methodName, "Pow") == 0)
{
result = NI_System_Math_Pow;
}
else if (strcmp(methodName, "Round") == 0)
{
result = NI_System_Math_Round;
}
else if (strcmp(methodName, "Sin") == 0)
{
result = NI_System_Math_Sin;
}
else if (strcmp(methodName, "Sinh") == 0)
{
result = NI_System_Math_Sinh;
}
else if (strcmp(methodName, "Sqrt") == 0)
{
result = NI_System_Math_Sqrt;
}
else if (strcmp(methodName, "Tan") == 0)
{
result = NI_System_Math_Tan;
}
else if (strcmp(methodName, "Tanh") == 0)
{
result = NI_System_Math_Tanh;
}
else if (strcmp(methodName, "Truncate") == 0)
{
result = NI_System_Math_Truncate;
}
}
else if (strcmp(className, "GC") == 0)
{
if (strcmp(methodName, "KeepAlive") == 0)
{
result = NI_System_GC_KeepAlive;
}
}
else if (strcmp(className, "Array") == 0)
{
if (strcmp(methodName, "Clone") == 0)
{
result = NI_System_Array_Clone;
}
else if (strcmp(methodName, "GetLength") == 0)
{
result = NI_System_Array_GetLength;
}
else if (strcmp(methodName, "GetLowerBound") == 0)
{
result = NI_System_Array_GetLowerBound;
}
else if (strcmp(methodName, "GetUpperBound") == 0)
{
result = NI_System_Array_GetUpperBound;
}
}
else if (strcmp(className, "Object") == 0)
{
if (strcmp(methodName, "MemberwiseClone") == 0)
{
result = NI_System_Object_MemberwiseClone;
}
else if (strcmp(methodName, "GetType") == 0)
{
result = NI_System_Object_GetType;
}
else if (strcmp(methodName, "MethodTableOf") == 0)
{
result = NI_System_Object_MethodTableOf;
}
}
else if (strcmp(className, "RuntimeTypeHandle") == 0)
{
if (strcmp(methodName, "GetValueInternal") == 0)
{
result = NI_System_RuntimeTypeHandle_GetValueInternal;
}
}
else if (strcmp(className, "Type") == 0)
{
if (strcmp(methodName, "get_IsValueType") == 0)
{
result = NI_System_Type_get_IsValueType;
}
else if (strcmp(methodName, "IsAssignableFrom") == 0)
{
result = NI_System_Type_IsAssignableFrom;
}
else if (strcmp(methodName, "IsAssignableTo") == 0)
{
result = NI_System_Type_IsAssignableTo;
}
else if (strcmp(methodName, "op_Equality") == 0)
{
result = NI_System_Type_op_Equality;
}
else if (strcmp(methodName, "op_Inequality") == 0)
{
result = NI_System_Type_op_Inequality;
}
else if (strcmp(methodName, "GetTypeFromHandle") == 0)
{
result = NI_System_Type_GetTypeFromHandle;
}
}
else if (strcmp(className, "String") == 0)
{
if (strcmp(methodName, "Equals") == 0)
{
result = NI_System_String_Equals;
}
else if (strcmp(methodName, "get_Chars") == 0)
{
result = NI_System_String_get_Chars;
}
else if (strcmp(methodName, "get_Length") == 0)
{
result = NI_System_String_get_Length;
}
else if (strcmp(methodName, "op_Implicit") == 0)
{
result = NI_System_String_op_Implicit;
}
else if (strcmp(methodName, "StartsWith") == 0)
{
result = NI_System_String_StartsWith;
}
}
else if (strcmp(className, "MemoryExtensions") == 0)
{
if (strcmp(methodName, "AsSpan") == 0)
{
result = NI_System_MemoryExtensions_AsSpan;
}
if (strcmp(methodName, "SequenceEqual") == 0)
{
result = NI_System_MemoryExtensions_SequenceEqual;
}
else if (strcmp(methodName, "Equals") == 0)
{
result = NI_System_MemoryExtensions_Equals;
}
else if (strcmp(methodName, "StartsWith") == 0)
{
result = NI_System_MemoryExtensions_StartsWith;
}
}
else if (strcmp(className, "Span`1") == 0)
{
if (strcmp(methodName, "get_Item") == 0)
{
result = NI_System_Span_get_Item;
}
}
else if (strcmp(className, "ReadOnlySpan`1") == 0)
{
if (strcmp(methodName, "get_Item") == 0)
{
result = NI_System_ReadOnlySpan_get_Item;
}
}
else if (strcmp(className, "EETypePtr") == 0)
{
if (strcmp(methodName, "EETypePtrOf") == 0)
{
result = NI_System_EETypePtr_EETypePtrOf;
}
}
}
else if (strcmp(namespaceName, "System.Threading") == 0)
{
if (strcmp(className, "Thread") == 0)
{
if (strcmp(methodName, "get_CurrentThread") == 0)
{
result = NI_System_Threading_Thread_get_CurrentThread;
}
else if (strcmp(methodName, "get_ManagedThreadId") == 0)
{
result = NI_System_Threading_Thread_get_ManagedThreadId;
}
}
else if (strcmp(className, "Interlocked") == 0)
{
#ifndef TARGET_ARM64
// TODO-CQ: Implement for XArch (https://github.com/dotnet/runtime/issues/32239).
if (strcmp(methodName, "And") == 0)
{
result = NI_System_Threading_Interlocked_And;
}
else if (strcmp(methodName, "Or") == 0)
{
result = NI_System_Threading_Interlocked_Or;
}
#endif
if (strcmp(methodName, "CompareExchange") == 0)
{
result = NI_System_Threading_Interlocked_CompareExchange;
}
else if (strcmp(methodName, "Exchange") == 0)
{
result = NI_System_Threading_Interlocked_Exchange;
}
else if (strcmp(methodName, "ExchangeAdd") == 0)
{
result = NI_System_Threading_Interlocked_ExchangeAdd;
}
else if (strcmp(methodName, "MemoryBarrier") == 0)
{
result = NI_System_Threading_Interlocked_MemoryBarrier;
}
else if (strcmp(methodName, "ReadMemoryBarrier") == 0)
{
result = NI_System_Threading_Interlocked_ReadMemoryBarrier;
}
}
}
#if defined(TARGET_XARCH) || defined(TARGET_ARM64)
else if (strcmp(namespaceName, "System.Buffers.Binary") == 0)
{
if ((strcmp(className, "BinaryPrimitives") == 0) && (strcmp(methodName, "ReverseEndianness") == 0))
{
result = NI_System_Buffers_Binary_BinaryPrimitives_ReverseEndianness;
}
}
#endif // defined(TARGET_XARCH) || defined(TARGET_ARM64)
else if (strcmp(namespaceName, "System.Collections.Generic") == 0)
{
if ((strcmp(className, "EqualityComparer`1") == 0) && (strcmp(methodName, "get_Default") == 0))
{
result = NI_System_Collections_Generic_EqualityComparer_get_Default;
}
else if ((strcmp(className, "Comparer`1") == 0) && (strcmp(methodName, "get_Default") == 0))
{
result = NI_System_Collections_Generic_Comparer_get_Default;
}
}
else if ((strcmp(namespaceName, "System.Numerics") == 0) && (strcmp(className, "BitOperations") == 0))
{
if (strcmp(methodName, "PopCount") == 0)
{
result = NI_System_Numerics_BitOperations_PopCount;
}
}
#ifdef FEATURE_HW_INTRINSICS
else if (strcmp(namespaceName, "System.Numerics") == 0)
{
CORINFO_SIG_INFO sig;
info.compCompHnd->getMethodSig(method, &sig);
int sizeOfVectorT = getSIMDVectorRegisterByteLength();
result = SimdAsHWIntrinsicInfo::lookupId(&sig, className, methodName, enclosingClassName, sizeOfVectorT);
}
#endif // FEATURE_HW_INTRINSICS
else if ((strcmp(namespaceName, "System.Runtime.CompilerServices") == 0) &&
(strcmp(className, "RuntimeHelpers") == 0))
{
if (strcmp(methodName, "CreateSpan") == 0)
{
result = NI_System_Runtime_CompilerServices_RuntimeHelpers_CreateSpan;
}
else if (strcmp(methodName, "InitializeArray") == 0)
{
result = NI_System_Runtime_CompilerServices_RuntimeHelpers_InitializeArray;
}
else if (strcmp(methodName, "IsKnownConstant") == 0)
{
result = NI_System_Runtime_CompilerServices_RuntimeHelpers_IsKnownConstant;
}
}
else if (strncmp(namespaceName, "System.Runtime.Intrinsics", 25) == 0)
{
// We go down this path even when FEATURE_HW_INTRINSICS isn't enabled
// so we can specially handle IsSupported and recursive calls.
// This is required to appropriately handle the intrinsics on platforms
// which don't support them. On such a platform methods like Vector64.Create
// will be seen as `Intrinsic` and `mustExpand` due to having a code path
// which is recursive. When such a path is hit we expect it to be handled by
// the importer and we fire an assert if it wasn't and in previous versions
// of the JIT would fail fast. This was changed to throw a PNSE instead but
// we still assert as most intrinsics should have been recognized/handled.
// In order to avoid the assert, we specially handle the IsSupported checks
// (to better allow dead-code optimizations) and we explicitly throw a PNSE
// as we know that is the desired behavior for the HWIntrinsics when not
// supported. For cases like Vector64.Create, this is fine because it will
// be behind a relevant IsSupported check and will never be hit and the
// software fallback will be executed instead.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef FEATURE_HW_INTRINSICS
namespaceName += 25;
const char* platformNamespaceName;
#if defined(TARGET_XARCH)
platformNamespaceName = ".X86";
#elif defined(TARGET_ARM64)
platformNamespaceName = ".Arm";
#else
#error Unsupported platform
#endif
if ((namespaceName[0] == '\0') || (strcmp(namespaceName, platformNamespaceName) == 0))
{
CORINFO_SIG_INFO sig;
info.compCompHnd->getMethodSig(method, &sig);
result = HWIntrinsicInfo::lookupId(this, &sig, className, methodName, enclosingClassName);
}
#endif // FEATURE_HW_INTRINSICS
if (result == NI_Illegal)
{
if ((strcmp(methodName, "get_IsSupported") == 0) || (strcmp(methodName, "get_IsHardwareAccelerated") == 0))
{
// This allows the relevant code paths to be dropped as dead code even
// on platforms where FEATURE_HW_INTRINSICS is not supported.
result = NI_IsSupported_False;
}
else if (gtIsRecursiveCall(method))
{
// For the framework itself, any recursive intrinsics will either be
// only supported on a single platform or will be guarded by a relevant
// IsSupported check so the throw PNSE will be valid or dropped.
result = NI_Throw_PlatformNotSupportedException;
}
}
}
else if (strcmp(namespaceName, "System.StubHelpers") == 0)
{
if (strcmp(className, "StubHelpers") == 0)
{
if (strcmp(methodName, "GetStubContext") == 0)
{
result = NI_System_StubHelpers_GetStubContext;
}
else if (strcmp(methodName, "NextCallReturnAddress") == 0)
{
result = NI_System_StubHelpers_NextCallReturnAddress;
}
}
}
if (result == NI_Illegal)
{
JITDUMP("Not recognized\n");
}
else if (result == NI_IsSupported_False)
{
JITDUMP("Unsupported - return false");
}
else if (result == NI_Throw_PlatformNotSupportedException)
{
JITDUMP("Unsupported - throw PlatformNotSupportedException");
}
else
{
JITDUMP("Recognized\n");
}
return result;
}
//------------------------------------------------------------------------
// impUnsupportedNamedIntrinsic: Throws an exception for an unsupported named intrinsic
//
// Arguments:
// helper - JIT helper ID for the exception to be thrown
// method - method handle of the intrinsic function.
// sig - signature of the intrinsic call
// mustExpand - true if the intrinsic must return a GenTree*; otherwise, false
//
// Return Value:
// a gtNewMustThrowException if mustExpand is true; otherwise, nullptr
//
GenTree* Compiler::impUnsupportedNamedIntrinsic(unsigned helper,
CORINFO_METHOD_HANDLE method,
CORINFO_SIG_INFO* sig,
bool mustExpand)
{
// We've hit some error case and may need to return a node for the given error.
//
// When `mustExpand=false`, we are attempting to inline the intrinsic directly into another method. In this
// scenario, we need to return `nullptr` so that a GT_CALL to the intrinsic is emitted instead. This is to
// ensure that everything continues to behave correctly when optimizations are enabled (e.g. things like the
// inliner may expect the node we return to have a certain signature, and the `MustThrowException` node won't
// match that).
//
// When `mustExpand=true`, we are in a GT_CALL to the intrinsic and are attempting to JIT it. This will generally
// be in response to an indirect call (e.g. done via reflection) or in response to an earlier attempt returning
// `nullptr` (under `mustExpand=false`). In that scenario, we are safe to return the `MustThrowException` node.
if (mustExpand)
{
for (unsigned i = 0; i < sig->numArgs; i++)
{
impPopStack();
}
return gtNewMustThrowException(helper, JITtype2varType(sig->retType), sig->retTypeClass);
}
else
{
return nullptr;
}
}
/*****************************************************************************/
GenTree* Compiler::impArrayAccessIntrinsic(
CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, int memberRef, bool readonlyCall, NamedIntrinsic intrinsicName)
{
/* If we are generating SMALL_CODE, we don't want to use intrinsics for
the following, as it generates fatter code.
*/
if (compCodeOpt() == SMALL_CODE)
{
return nullptr;
}
/* These intrinsics generate fatter (but faster) code and are only
done if we don't need SMALL_CODE */
unsigned rank = (intrinsicName == NI_Array_Set) ? (sig->numArgs - 1) : sig->numArgs;
// The rank 1 case is special because it has to handle two array formats
// we will simply not do that case
if (rank > GT_ARR_MAX_RANK || rank <= 1)
{
return nullptr;
}
CORINFO_CLASS_HANDLE arrElemClsHnd = nullptr;
var_types elemType = JITtype2varType(info.compCompHnd->getChildType(clsHnd, &arrElemClsHnd));
// For the ref case, we will only be able to inline if the types match
// (verifier checks for this, we don't care for the nonverified case and the
// type is final (so we don't need to do the cast)
if ((intrinsicName != NI_Array_Get) && !readonlyCall && varTypeIsGC(elemType))
{
// Get the call site signature
CORINFO_SIG_INFO LocalSig;
eeGetCallSiteSig(memberRef, info.compScopeHnd, impTokenLookupContextHandle, &LocalSig);
assert(LocalSig.hasThis());
CORINFO_CLASS_HANDLE actualElemClsHnd;
if (intrinsicName == NI_Array_Set)
{
// Fetch the last argument, the one that indicates the type we are setting.
CORINFO_ARG_LIST_HANDLE argType = LocalSig.args;
for (unsigned r = 0; r < rank; r++)
{
argType = info.compCompHnd->getArgNext(argType);
}
typeInfo argInfo = verParseArgSigToTypeInfo(&LocalSig, argType);
actualElemClsHnd = argInfo.GetClassHandle();
}
else
{
assert(intrinsicName == NI_Array_Address);
// Fetch the return type
typeInfo retInfo = verMakeTypeInfo(LocalSig.retType, LocalSig.retTypeClass);
assert(retInfo.IsByRef());
actualElemClsHnd = retInfo.GetClassHandle();
}
// if it's not final, we can't do the optimization
if (!(info.compCompHnd->getClassAttribs(actualElemClsHnd) & CORINFO_FLG_FINAL))
{
return nullptr;
}
}
unsigned arrayElemSize;
if (elemType == TYP_STRUCT)
{
assert(arrElemClsHnd);
arrayElemSize = info.compCompHnd->getClassSize(arrElemClsHnd);
}
else
{
arrayElemSize = genTypeSize(elemType);
}
if ((unsigned char)arrayElemSize != arrayElemSize)
{
// arrayElemSize would be truncated as an unsigned char.
// This means the array element is too large. Don't do the optimization.
return nullptr;
}
GenTree* val = nullptr;
if (intrinsicName == NI_Array_Set)
{
// Assignment of a struct is more work, and there are more gets than sets.
if (elemType == TYP_STRUCT)
{
return nullptr;
}
val = impPopStack().val;
assert(genActualType(elemType) == genActualType(val->gtType) ||
(elemType == TYP_FLOAT && val->gtType == TYP_DOUBLE) ||
(elemType == TYP_INT && val->gtType == TYP_BYREF) ||
(elemType == TYP_DOUBLE && val->gtType == TYP_FLOAT));
}
noway_assert((unsigned char)GT_ARR_MAX_RANK == GT_ARR_MAX_RANK);
GenTree* inds[GT_ARR_MAX_RANK];
for (unsigned k = rank; k > 0; k--)
{
inds[k - 1] = impPopStack().val;
}
GenTree* arr = impPopStack().val;
assert(arr->gtType == TYP_REF);
GenTree* arrElem =
new (this, GT_ARR_ELEM) GenTreeArrElem(TYP_BYREF, arr, static_cast<unsigned char>(rank),
static_cast<unsigned char>(arrayElemSize), elemType, &inds[0]);
if (intrinsicName != NI_Array_Address)
{
if (varTypeIsStruct(elemType))
{
arrElem = gtNewObjNode(sig->retTypeClass, arrElem);
}
else
{
arrElem = gtNewOperNode(GT_IND, elemType, arrElem);
}
}
if (intrinsicName == NI_Array_Set)
{
assert(val != nullptr);
return gtNewAssignNode(arrElem, val);
}
else
{
return arrElem;
}
}
//------------------------------------------------------------------------
// impKeepAliveIntrinsic: Import the GC.KeepAlive intrinsic call
//
// Imports the intrinsic as a GT_KEEPALIVE node, and, as an optimization,
// if the object to keep alive is a GT_BOX, removes its side effects and
// uses the address of a local (copied from the box's source if needed)
// as the operand for GT_KEEPALIVE. For the BOX optimization, if the class
// of the box has no GC fields, a GT_NOP is returned.
//
// Arguments:
// objToKeepAlive - the intrinisic call's argument
//
// Return Value:
// The imported GT_KEEPALIVE or GT_NOP - see description.
//
GenTree* Compiler::impKeepAliveIntrinsic(GenTree* objToKeepAlive)
{
assert(objToKeepAlive->TypeIs(TYP_REF));
if (opts.OptimizationEnabled() && objToKeepAlive->IsBoxedValue())
{
CORINFO_CLASS_HANDLE boxedClass = lvaGetDesc(objToKeepAlive->AsBox()->BoxOp()->AsLclVar())->lvClassHnd;
ClassLayout* layout = typGetObjLayout(boxedClass);
if (!layout->HasGCPtr())
{
gtTryRemoveBoxUpstreamEffects(objToKeepAlive, BR_REMOVE_AND_NARROW);
JITDUMP("\nBOX class has no GC fields, KEEPALIVE is a NOP");
return gtNewNothingNode();
}
GenTree* boxSrc = gtTryRemoveBoxUpstreamEffects(objToKeepAlive, BR_REMOVE_BUT_NOT_NARROW);
if (boxSrc != nullptr)
{
unsigned boxTempNum;
if (boxSrc->OperIs(GT_LCL_VAR))
{
boxTempNum = boxSrc->AsLclVarCommon()->GetLclNum();
}
else
{
boxTempNum = lvaGrabTemp(true DEBUGARG("Temp for the box source"));
GenTree* boxTempAsg = gtNewTempAssign(boxTempNum, boxSrc);
Statement* boxAsgStmt = objToKeepAlive->AsBox()->gtCopyStmtWhenInlinedBoxValue;
boxAsgStmt->SetRootNode(boxTempAsg);
}
JITDUMP("\nImporting KEEPALIVE(BOX) as KEEPALIVE(ADDR(LCL_VAR V%02u))", boxTempNum);
GenTree* boxTemp = gtNewLclvNode(boxTempNum, boxSrc->TypeGet());
GenTree* boxTempAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, boxTemp);
return gtNewKeepAliveNode(boxTempAddr);
}
}
return gtNewKeepAliveNode(objToKeepAlive);
}
bool Compiler::verMergeEntryStates(BasicBlock* block, bool* changed)
{
unsigned i;
// do some basic checks first
if (block->bbStackDepthOnEntry() != verCurrentState.esStackDepth)
{
return false;
}
if (verCurrentState.esStackDepth > 0)
{
// merge stack types
StackEntry* parentStack = block->bbStackOnEntry();
StackEntry* childStack = verCurrentState.esStack;
for (i = 0; i < verCurrentState.esStackDepth; i++, parentStack++, childStack++)
{
if (tiMergeToCommonParent(&parentStack->seTypeInfo, &childStack->seTypeInfo, changed) == false)
{
return false;
}
}
}
// merge initialization status of this ptr
if (verTrackObjCtorInitState)
{
// If we're tracking the CtorInitState, then it must not be unknown in the current state.
assert(verCurrentState.thisInitialized != TIS_Bottom);
// If the successor block's thisInit state is unknown, copy it from the current state.
if (block->bbThisOnEntry() == TIS_Bottom)
{
*changed = true;
verSetThisInit(block, verCurrentState.thisInitialized);
}
else if (verCurrentState.thisInitialized != block->bbThisOnEntry())
{
if (block->bbThisOnEntry() != TIS_Top)
{
*changed = true;
verSetThisInit(block, TIS_Top);
if (block->bbFlags & BBF_FAILED_VERIFICATION)
{
// The block is bad. Control can flow through the block to any handler that catches the
// verification exception, but the importer ignores bad blocks and therefore won't model
// this flow in the normal way. To complete the merge into the bad block, the new state
// needs to be manually pushed to the handlers that may be reached after the verification
// exception occurs.
//
// Usually, the new state was already propagated to the relevant handlers while processing
// the predecessors of the bad block. The exception is when the bad block is at the start
// of a try region, meaning it is protected by additional handlers that do not protect its
// predecessors.
//
if (block->hasTryIndex() && ((block->bbFlags & BBF_TRY_BEG) != 0))
{
// Push TIS_Top to the handlers that protect the bad block. Note that this can cause
// recursive calls back into this code path (if successors of the current bad block are
// also bad blocks).
//
ThisInitState origTIS = verCurrentState.thisInitialized;
verCurrentState.thisInitialized = TIS_Top;
impVerifyEHBlock(block, true);
verCurrentState.thisInitialized = origTIS;
}
}
}
}
}
else
{
assert(verCurrentState.thisInitialized == TIS_Bottom && block->bbThisOnEntry() == TIS_Bottom);
}
return true;
}
/*****************************************************************************
* 'logMsg' is true if a log message needs to be logged. false if the caller has
* already logged it (presumably in a more detailed fashion than done here)
*/
void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg))
{
block->bbJumpKind = BBJ_THROW;
block->bbFlags |= BBF_FAILED_VERIFICATION;
block->bbFlags &= ~BBF_IMPORTED;
impCurStmtOffsSet(block->bbCodeOffs);
// Clear the statement list as it exists so far; we're only going to have a verification exception.
impStmtList = impLastStmt = nullptr;
#ifdef DEBUG
if (logMsg)
{
JITLOG((LL_ERROR, "Verification failure: while compiling %s near IL offset %x..%xh \n", info.compFullName,
block->bbCodeOffs, block->bbCodeOffsEnd));
if (verbose)
{
printf("\n\nVerification failure: %s near IL %xh \n", info.compFullName, block->bbCodeOffs);
}
}
if (JitConfig.DebugBreakOnVerificationFailure())
{
DebugBreak();
}
#endif
impBeginTreeList();
// if the stack is non-empty evaluate all the side-effects
if (verCurrentState.esStackDepth > 0)
{
impEvalSideEffects();
}
assert(verCurrentState.esStackDepth == 0);
GenTree* op1 =
gtNewHelperCallNode(CORINFO_HELP_VERIFICATION, TYP_VOID, gtNewCallArgs(gtNewIconNode(block->bbCodeOffs)));
// verCurrentState.esStackDepth = 0;
impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtDI);
// The inliner is not able to handle methods that require throw block, so
// make sure this methods never gets inlined.
info.compCompHnd->setMethodAttribs(info.compMethodHnd, CORINFO_FLG_BAD_INLINEE);
}
/*****************************************************************************
*
*/
void Compiler::verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg))
{
verResetCurrentState(block, &verCurrentState);
verConvertBBToThrowVerificationException(block DEBUGARG(logMsg));
#ifdef DEBUG
impNoteLastILoffs(); // Remember at which BC offset the tree was finished
#endif // DEBUG
}
/******************************************************************************/
typeInfo Compiler::verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd)
{
assert(ciType < CORINFO_TYPE_COUNT);
typeInfo tiResult;
switch (ciType)
{
case CORINFO_TYPE_STRING:
case CORINFO_TYPE_CLASS:
tiResult = verMakeTypeInfo(clsHnd);
if (!tiResult.IsType(TI_REF))
{ // type must be consistent with element type
return typeInfo();
}
break;
#ifdef TARGET_64BIT
case CORINFO_TYPE_NATIVEINT:
case CORINFO_TYPE_NATIVEUINT:
if (clsHnd)
{
// If we have more precise information, use it
return verMakeTypeInfo(clsHnd);
}
else
{
return typeInfo::nativeInt();
}
break;
#endif // TARGET_64BIT
case CORINFO_TYPE_VALUECLASS:
case CORINFO_TYPE_REFANY:
tiResult = verMakeTypeInfo(clsHnd);
// type must be constant with element type;
if (!tiResult.IsValueClass())
{
return typeInfo();
}
break;
case CORINFO_TYPE_VAR:
return verMakeTypeInfo(clsHnd);
case CORINFO_TYPE_PTR: // for now, pointers are treated as an error
case CORINFO_TYPE_VOID:
return typeInfo();
break;
case CORINFO_TYPE_BYREF:
{
CORINFO_CLASS_HANDLE childClassHandle;
CorInfoType childType = info.compCompHnd->getChildType(clsHnd, &childClassHandle);
return ByRef(verMakeTypeInfo(childType, childClassHandle));
}
break;
default:
if (clsHnd)
{ // If we have more precise information, use it
return typeInfo(TI_STRUCT, clsHnd);
}
else
{
return typeInfo(JITtype2tiType(ciType));
}
}
return tiResult;
}
/******************************************************************************/
typeInfo Compiler::verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructToRef /* = false */)
{
if (clsHnd == nullptr)
{
return typeInfo();
}
// Byrefs should only occur in method and local signatures, which are accessed
// using ICorClassInfo and ICorClassInfo.getChildType.
// So findClass() and getClassAttribs() should not be called for byrefs
if (JITtype2varType(info.compCompHnd->asCorInfoType(clsHnd)) == TYP_BYREF)
{
assert(!"Did findClass() return a Byref?");
return typeInfo();
}
unsigned attribs = info.compCompHnd->getClassAttribs(clsHnd);
if (attribs & CORINFO_FLG_VALUECLASS)
{
CorInfoType t = info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd);
// Meta-data validation should ensure that CORINF_TYPE_BYREF should
// not occur here, so we may want to change this to an assert instead.
if (t == CORINFO_TYPE_VOID || t == CORINFO_TYPE_BYREF || t == CORINFO_TYPE_PTR)
{
return typeInfo();
}
#ifdef TARGET_64BIT
if (t == CORINFO_TYPE_NATIVEINT || t == CORINFO_TYPE_NATIVEUINT)
{
return typeInfo::nativeInt();
}
#endif // TARGET_64BIT
if (t != CORINFO_TYPE_UNDEF)
{
return (typeInfo(JITtype2tiType(t)));
}
else if (bashStructToRef)
{
return (typeInfo(TI_REF, clsHnd));
}
else
{
return (typeInfo(TI_STRUCT, clsHnd));
}
}
else if (attribs & CORINFO_FLG_GENERIC_TYPE_VARIABLE)
{
// See comment in _typeInfo.h for why we do it this way.
return (typeInfo(TI_REF, clsHnd, true));
}
else
{
return (typeInfo(TI_REF, clsHnd));
}
}
/******************************************************************************/
bool Compiler::verIsSDArray(const typeInfo& ti)
{
if (ti.IsNullObjRef())
{ // nulls are SD arrays
return true;
}
if (!ti.IsType(TI_REF))
{
return false;
}
if (!info.compCompHnd->isSDArray(ti.GetClassHandleForObjRef()))
{
return false;
}
return true;
}
/******************************************************************************/
/* Given 'arrayObjectType' which is an array type, fetch the element type. */
/* Returns an error type if anything goes wrong */
typeInfo Compiler::verGetArrayElemType(const typeInfo& arrayObjectType)
{
assert(!arrayObjectType.IsNullObjRef()); // you need to check for null explicitly since that is a success case
if (!verIsSDArray(arrayObjectType))
{
return typeInfo();
}
CORINFO_CLASS_HANDLE childClassHandle = nullptr;
CorInfoType ciType = info.compCompHnd->getChildType(arrayObjectType.GetClassHandleForObjRef(), &childClassHandle);
return verMakeTypeInfo(ciType, childClassHandle);
}
/*****************************************************************************
*/
typeInfo Compiler::verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args)
{
CORINFO_CLASS_HANDLE classHandle;
CorInfoType ciType = strip(info.compCompHnd->getArgType(sig, args, &classHandle));
var_types type = JITtype2varType(ciType);
if (varTypeIsGC(type))
{
// For efficiency, getArgType only returns something in classHandle for
// value types. For other types that have addition type info, you
// have to call back explicitly
classHandle = info.compCompHnd->getArgClass(sig, args);
if (!classHandle)
{
NO_WAY("Could not figure out Class specified in argument or local signature");
}
}
return verMakeTypeInfo(ciType, classHandle);
}
bool Compiler::verIsByRefLike(const typeInfo& ti)
{
if (ti.IsByRef())
{
return true;
}
if (!ti.IsType(TI_STRUCT))
{
return false;
}
return info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_BYREF_LIKE;
}
bool Compiler::verIsSafeToReturnByRef(const typeInfo& ti)
{
if (ti.IsPermanentHomeByRef())
{
return true;
}
else
{
return false;
}
}
bool Compiler::verIsBoxable(const typeInfo& ti)
{
return (ti.IsPrimitiveType() || ti.IsObjRef() // includes boxed generic type variables
|| ti.IsUnboxedGenericTypeVar() ||
(ti.IsType(TI_STRUCT) &&
// exclude byreflike structs
!(info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_BYREF_LIKE)));
}
// Is it a boxed value type?
bool Compiler::verIsBoxedValueType(const typeInfo& ti)
{
if (ti.GetType() == TI_REF)
{
CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandleForObjRef();
return !!eeIsValueClass(clsHnd);
}
else
{
return false;
}
}
/*****************************************************************************
*
* Check if a TailCall is legal.
*/
bool Compiler::verCheckTailCallConstraint(
OPCODE opcode,
CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a type parameter?
bool speculative // If true, won't throw if verificatoin fails. Instead it will
// return false to the caller.
// If false, it will throw.
)
{
DWORD mflags;
CORINFO_SIG_INFO sig;
unsigned int popCount = 0; // we can't pop the stack since impImportCall needs it, so
// this counter is used to keep track of how many items have been
// virtually popped
CORINFO_METHOD_HANDLE methodHnd = nullptr;
CORINFO_CLASS_HANDLE methodClassHnd = nullptr;
unsigned methodClassFlgs = 0;
assert(impOpcodeIsCallOpcode(opcode));
if (compIsForInlining())
{
return false;
}
// for calli, VerifyOrReturn that this is not a virtual method
if (opcode == CEE_CALLI)
{
/* Get the call sig */
eeGetSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &sig);
// We don't know the target method, so we have to infer the flags, or
// assume the worst-case.
mflags = (sig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
}
else
{
methodHnd = pResolvedToken->hMethod;
mflags = info.compCompHnd->getMethodAttribs(methodHnd);
// When verifying generic code we pair the method handle with its
// owning class to get the exact method signature.
methodClassHnd = pResolvedToken->hClass;
assert(methodClassHnd);
eeGetMethodSig(methodHnd, &sig, methodClassHnd);
// opcode specific check
methodClassFlgs = info.compCompHnd->getClassAttribs(methodClassHnd);
}
// We must have got the methodClassHnd if opcode is not CEE_CALLI
assert((methodHnd != nullptr && methodClassHnd != nullptr) || opcode == CEE_CALLI);
if ((sig.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
{
eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &sig);
}
// check compatibility of the arguments
unsigned int argCount;
argCount = sig.numArgs;
CORINFO_ARG_LIST_HANDLE args;
args = sig.args;
while (argCount--)
{
typeInfo tiDeclared = verParseArgSigToTypeInfo(&sig, args).NormaliseForStack();
// check that the argument is not a byref for tailcalls
VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclared), "tailcall on byrefs", speculative);
// For unsafe code, we might have parameters containing pointer to the stack location.
// Disallow the tailcall for this kind.
CORINFO_CLASS_HANDLE classHandle;
CorInfoType ciType = strip(info.compCompHnd->getArgType(&sig, args, &classHandle));
VerifyOrReturnSpeculative(ciType != CORINFO_TYPE_PTR, "tailcall on CORINFO_TYPE_PTR", speculative);
args = info.compCompHnd->getArgNext(args);
}
// update popCount
popCount += sig.numArgs;
// check for 'this' which is on non-static methods, not called via NEWOBJ
if (!(mflags & CORINFO_FLG_STATIC))
{
// Always update the popCount.
// This is crucial for the stack calculation to be correct.
typeInfo tiThis = impStackTop(popCount).seTypeInfo;
popCount++;
if (opcode == CEE_CALLI)
{
// For CALLI, we don't know the methodClassHnd. Therefore, let's check the "this" object
// on the stack.
if (tiThis.IsValueClass())
{
tiThis.MakeByRef();
}
VerifyOrReturnSpeculative(!verIsByRefLike(tiThis), "byref in tailcall", speculative);
}
else
{
// Check type compatibility of the this argument
typeInfo tiDeclaredThis = verMakeTypeInfo(methodClassHnd);
if (tiDeclaredThis.IsValueClass())
{
tiDeclaredThis.MakeByRef();
}
VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclaredThis), "byref in tailcall", speculative);
}
}
// Tail calls on constrained calls should be illegal too:
// when instantiated at a value type, a constrained call may pass the address of a stack allocated value
VerifyOrReturnSpeculative(!pConstrainedResolvedToken, "byref in constrained tailcall", speculative);
// Get the exact view of the signature for an array method
if (sig.retType != CORINFO_TYPE_VOID)
{
if (methodClassFlgs & CORINFO_FLG_ARRAY)
{
assert(opcode != CEE_CALLI);
eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &sig);
}
}
typeInfo tiCalleeRetType = verMakeTypeInfo(sig.retType, sig.retTypeClass);
typeInfo tiCallerRetType =
verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
// void return type gets morphed into the error type, so we have to treat them specially here
if (sig.retType == CORINFO_TYPE_VOID)
{
VerifyOrReturnSpeculative(info.compMethodInfo->args.retType == CORINFO_TYPE_VOID, "tailcall return mismatch",
speculative);
}
else
{
VerifyOrReturnSpeculative(tiCompatibleWith(NormaliseForStack(tiCalleeRetType),
NormaliseForStack(tiCallerRetType), true),
"tailcall return mismatch", speculative);
}
// for tailcall, stack must be empty
VerifyOrReturnSpeculative(verCurrentState.esStackDepth == popCount, "stack non-empty on tailcall", speculative);
return true; // Yes, tailcall is legal
}
/*****************************************************************************
*
* Checks the IL verification rules for the call
*/
void Compiler::verVerifyCall(OPCODE opcode,
CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
bool tailCall,
bool readonlyCall,
const BYTE* delegateCreateStart,
const BYTE* codeAddr,
CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName))
{
DWORD mflags;
CORINFO_SIG_INFO* sig = nullptr;
unsigned int popCount = 0; // we can't pop the stack since impImportCall needs it, so
// this counter is used to keep track of how many items have been
// virtually popped
// for calli, VerifyOrReturn that this is not a virtual method
if (opcode == CEE_CALLI)
{
Verify(false, "Calli not verifiable");
return;
}
//<NICE> It would be nice to cache the rest of it, but eeFindMethod is the big ticket item.
mflags = callInfo->verMethodFlags;
sig = &callInfo->verSig;
if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
{
eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
}
// opcode specific check
unsigned methodClassFlgs = callInfo->classFlags;
switch (opcode)
{
case CEE_CALLVIRT:
// cannot do callvirt on valuetypes
VerifyOrReturn(!(methodClassFlgs & CORINFO_FLG_VALUECLASS), "callVirt on value class");
VerifyOrReturn(sig->hasThis(), "CallVirt on static method");
break;
case CEE_NEWOBJ:
{
assert(!tailCall); // Importer should not allow this
VerifyOrReturn((mflags & CORINFO_FLG_CONSTRUCTOR) && !(mflags & CORINFO_FLG_STATIC),
"newobj must be on instance");
if (methodClassFlgs & CORINFO_FLG_DELEGATE)
{
VerifyOrReturn(sig->numArgs == 2, "wrong number args to delegate ctor");
typeInfo tiDeclaredObj = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack();
typeInfo tiDeclaredFtn =
verParseArgSigToTypeInfo(sig, info.compCompHnd->getArgNext(sig->args)).NormaliseForStack();
VerifyOrReturn(tiDeclaredFtn.IsNativeIntType(), "ftn arg needs to be a native int type");
assert(popCount == 0);
typeInfo tiActualObj = impStackTop(1).seTypeInfo;
typeInfo tiActualFtn = impStackTop(0).seTypeInfo;
VerifyOrReturn(tiActualFtn.IsMethod(), "delegate needs method as first arg");
VerifyOrReturn(tiCompatibleWith(tiActualObj, tiDeclaredObj, true), "delegate object type mismatch");
VerifyOrReturn(tiActualObj.IsNullObjRef() || tiActualObj.IsType(TI_REF),
"delegate object type mismatch");
CORINFO_CLASS_HANDLE objTypeHandle =
tiActualObj.IsNullObjRef() ? nullptr : tiActualObj.GetClassHandleForObjRef();
// the method signature must be compatible with the delegate's invoke method
// check that for virtual functions, the type of the object used to get the
// ftn ptr is the same as the type of the object passed to the delegate ctor.
// since this is a bit of work to determine in general, we pattern match stylized
// code sequences
// the delegate creation code check, which used to be done later, is now done here
// so we can read delegateMethodRef directly from
// from the preceding LDFTN or CEE_LDVIRTFN instruction sequence;
// we then use it in our call to isCompatibleDelegate().
mdMemberRef delegateMethodRef = mdMemberRefNil;
VerifyOrReturn(verCheckDelegateCreation(delegateCreateStart, codeAddr, delegateMethodRef),
"must create delegates with certain IL");
CORINFO_RESOLVED_TOKEN delegateResolvedToken;
delegateResolvedToken.tokenContext = impTokenLookupContextHandle;
delegateResolvedToken.tokenScope = info.compScopeHnd;
delegateResolvedToken.token = delegateMethodRef;
delegateResolvedToken.tokenType = CORINFO_TOKENKIND_Method;
info.compCompHnd->resolveToken(&delegateResolvedToken);
CORINFO_CALL_INFO delegateCallInfo;
eeGetCallInfo(&delegateResolvedToken, nullptr /* constraint typeRef */, CORINFO_CALLINFO_SECURITYCHECKS,
&delegateCallInfo);
bool isOpenDelegate = false;
VerifyOrReturn(info.compCompHnd->isCompatibleDelegate(objTypeHandle, delegateResolvedToken.hClass,
tiActualFtn.GetMethod(), pResolvedToken->hClass,
&isOpenDelegate),
"function incompatible with delegate");
// check the constraints on the target method
VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(delegateResolvedToken.hClass),
"delegate target has unsatisfied class constraints");
VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(delegateResolvedToken.hClass,
tiActualFtn.GetMethod()),
"delegate target has unsatisfied method constraints");
// See ECMA spec section 1.8.1.5.2 (Delegating via instance dispatch)
// for additional verification rules for delegates
CORINFO_METHOD_HANDLE actualMethodHandle = tiActualFtn.GetMethod();
DWORD actualMethodAttribs = info.compCompHnd->getMethodAttribs(actualMethodHandle);
if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
{
if ((actualMethodAttribs & CORINFO_FLG_VIRTUAL) && ((actualMethodAttribs & CORINFO_FLG_FINAL) == 0))
{
VerifyOrReturn((tiActualObj.IsThisPtr() && lvaIsOriginalThisReadOnly()) ||
verIsBoxedValueType(tiActualObj),
"The 'this' parameter to the call must be either the calling method's "
"'this' parameter or "
"a boxed value type.");
}
}
if (actualMethodAttribs & CORINFO_FLG_PROTECTED)
{
bool targetIsStatic = actualMethodAttribs & CORINFO_FLG_STATIC;
Verify(targetIsStatic || !isOpenDelegate,
"Unverifiable creation of an open instance delegate for a protected member.");
CORINFO_CLASS_HANDLE instanceClassHnd = (tiActualObj.IsNullObjRef() || targetIsStatic)
? info.compClassHnd
: tiActualObj.GetClassHandleForObjRef();
// In the case of protected methods, it is a requirement that the 'this'
// pointer be a subclass of the current context. Perform this check.
Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
"Accessing protected method through wrong type.");
}
goto DONE_ARGS;
}
}
// fall thru to default checks
FALLTHROUGH;
default:
VerifyOrReturn(!(mflags & CORINFO_FLG_ABSTRACT), "method abstract");
}
VerifyOrReturn(!((mflags & CORINFO_FLG_CONSTRUCTOR) && (methodClassFlgs & CORINFO_FLG_DELEGATE)),
"can only newobj a delegate constructor");
// check compatibility of the arguments
unsigned int argCount;
argCount = sig->numArgs;
CORINFO_ARG_LIST_HANDLE args;
args = sig->args;
while (argCount--)
{
typeInfo tiActual = impStackTop(popCount + argCount).seTypeInfo;
typeInfo tiDeclared = verParseArgSigToTypeInfo(sig, args).NormaliseForStack();
VerifyOrReturn(tiCompatibleWith(tiActual, tiDeclared, true), "type mismatch");
args = info.compCompHnd->getArgNext(args);
}
DONE_ARGS:
// update popCount
popCount += sig->numArgs;
// check for 'this' which are is non-static methods, not called via NEWOBJ
CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
if (!(mflags & CORINFO_FLG_STATIC) && (opcode != CEE_NEWOBJ))
{
typeInfo tiThis = impStackTop(popCount).seTypeInfo;
popCount++;
// If it is null, we assume we can access it (since it will AV shortly)
// If it is anything but a reference class, there is no hierarchy, so
// again, we don't need the precise instance class to compute 'protected' access
if (tiThis.IsType(TI_REF))
{
instanceClassHnd = tiThis.GetClassHandleForObjRef();
}
// Check type compatibility of the this argument
typeInfo tiDeclaredThis = verMakeTypeInfo(pResolvedToken->hClass);
if (tiDeclaredThis.IsValueClass())
{
tiDeclaredThis.MakeByRef();
}
// If this is a call to the base class .ctor, set thisPtr Init for
// this block.
if (mflags & CORINFO_FLG_CONSTRUCTOR)
{
if (verTrackObjCtorInitState && tiThis.IsThisPtr() &&
verIsCallToInitThisPtr(info.compClassHnd, pResolvedToken->hClass))
{
assert(verCurrentState.thisInitialized !=
TIS_Bottom); // This should never be the case just from the logic of the verifier.
VerifyOrReturn(verCurrentState.thisInitialized == TIS_Uninit,
"Call to base class constructor when 'this' is possibly initialized");
// Otherwise, 'this' is now initialized.
verCurrentState.thisInitialized = TIS_Init;
tiThis.SetInitialisedObjRef();
}
else
{
// We allow direct calls to value type constructors
// NB: we have to check that the contents of tiThis is a value type, otherwise we could use a
// constrained callvirt to illegally re-enter a .ctor on a value of reference type.
VerifyOrReturn(tiThis.IsByRef() && DereferenceByRef(tiThis).IsValueClass(),
"Bad call to a constructor");
}
}
if (pConstrainedResolvedToken != nullptr)
{
VerifyOrReturn(tiThis.IsByRef(), "non-byref this type in constrained call");
typeInfo tiConstraint = verMakeTypeInfo(pConstrainedResolvedToken->hClass);
// We just dereference this and test for equality
tiThis.DereferenceByRef();
VerifyOrReturn(typeInfo::AreEquivalent(tiThis, tiConstraint),
"this type mismatch with constrained type operand");
// Now pretend the this type is the boxed constrained type, for the sake of subsequent checks
tiThis = typeInfo(TI_REF, pConstrainedResolvedToken->hClass);
}
// To support direct calls on readonly byrefs, just pretend tiDeclaredThis is readonly too
if (tiDeclaredThis.IsByRef() && tiThis.IsReadonlyByRef())
{
tiDeclaredThis.SetIsReadonlyByRef();
}
VerifyOrReturn(tiCompatibleWith(tiThis, tiDeclaredThis, true), "this type mismatch");
if (tiThis.IsByRef())
{
// Find the actual type where the method exists (as opposed to what is declared
// in the metadata). This is to prevent passing a byref as the "this" argument
// while calling methods like System.ValueType.GetHashCode() which expect boxed objects.
CORINFO_CLASS_HANDLE actualClassHnd = info.compCompHnd->getMethodClass(pResolvedToken->hMethod);
VerifyOrReturn(eeIsValueClass(actualClassHnd),
"Call to base type of valuetype (which is never a valuetype)");
}
// Rules for non-virtual call to a non-final virtual method:
// Define:
// The "this" pointer is considered to be "possibly written" if
// 1. Its address have been taken (LDARGA 0) anywhere in the method.
// (or)
// 2. It has been stored to (STARG.0) anywhere in the method.
// A non-virtual call to a non-final virtual method is only allowed if
// 1. The this pointer passed to the callee is an instance of a boxed value type.
// (or)
// 2. The this pointer passed to the callee is the current method's this pointer.
// (and) The current method's this pointer is not "possibly written".
// Thus the rule is that if you assign to this ANYWHERE you can't make "base" calls to
// virtual methods. (Luckily this does affect .ctors, since they are not virtual).
// This is stronger that is strictly needed, but implementing a laxer rule is significantly
// hard and more error prone.
if (opcode == CEE_CALL && (mflags & CORINFO_FLG_VIRTUAL) && ((mflags & CORINFO_FLG_FINAL) == 0))
{
VerifyOrReturn((tiThis.IsThisPtr() && lvaIsOriginalThisReadOnly()) || verIsBoxedValueType(tiThis),
"The 'this' parameter to the call must be either the calling method's 'this' parameter or "
"a boxed value type.");
}
}
// check any constraints on the callee's class and type parameters
VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(pResolvedToken->hClass),
"method has unsatisfied class constraints");
VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(pResolvedToken->hClass, pResolvedToken->hMethod),
"method has unsatisfied method constraints");
if (mflags & CORINFO_FLG_PROTECTED)
{
VerifyOrReturn(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
"Can't access protected method");
}
// Get the exact view of the signature for an array method
if (sig->retType != CORINFO_TYPE_VOID)
{
eeGetMethodSig(pResolvedToken->hMethod, sig, pResolvedToken->hClass);
}
// "readonly." prefixed calls only allowed for the Address operation on arrays.
// The methods supported by array types are under the control of the EE
// so we can trust that only the Address operation returns a byref.
if (readonlyCall)
{
typeInfo tiCalleeRetType = verMakeTypeInfo(sig->retType, sig->retTypeClass);
VerifyOrReturn((methodClassFlgs & CORINFO_FLG_ARRAY) && tiCalleeRetType.IsByRef(),
"unexpected use of readonly prefix");
}
// Verify the tailcall
if (tailCall)
{
verCheckTailCallConstraint(opcode, pResolvedToken, pConstrainedResolvedToken, false);
}
}
/*****************************************************************************
* Checks that a delegate creation is done using the following pattern:
* dup
* ldvirtftn targetMemberRef
* OR
* ldftn targetMemberRef
*
* 'delegateCreateStart' points at the last dup or ldftn in this basic block (null if
* not in this basic block)
*
* targetMemberRef is read from the code sequence.
* targetMemberRef is validated iff verificationNeeded.
*/
bool Compiler::verCheckDelegateCreation(const BYTE* delegateCreateStart,
const BYTE* codeAddr,
mdMemberRef& targetMemberRef)
{
if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
{
targetMemberRef = getU4LittleEndian(&delegateCreateStart[2]);
return true;
}
else if (impIsDUP_LDVIRTFTN_TOKEN(delegateCreateStart, codeAddr))
{
targetMemberRef = getU4LittleEndian(&delegateCreateStart[3]);
return true;
}
return false;
}
typeInfo Compiler::verVerifySTIND(const typeInfo& tiTo, const typeInfo& value, const typeInfo& instrType)
{
Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
typeInfo ptrVal = verVerifyLDIND(tiTo, instrType);
typeInfo normPtrVal = typeInfo(ptrVal).NormaliseForStack();
if (!tiCompatibleWith(value, normPtrVal, true))
{
Verify(tiCompatibleWith(value, normPtrVal, true), "type mismatch");
}
return ptrVal;
}
typeInfo Compiler::verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType)
{
assert(!instrType.IsStruct());
typeInfo ptrVal;
if (ptr.IsByRef())
{
ptrVal = DereferenceByRef(ptr);
if (instrType.IsObjRef() && !ptrVal.IsObjRef())
{
Verify(false, "bad pointer");
}
else if (!instrType.IsObjRef() && !typeInfo::AreEquivalent(instrType, ptrVal))
{
Verify(false, "pointer not consistent with instr");
}
}
else
{
Verify(false, "pointer not byref");
}
return ptrVal;
}
// Verify that the field is used properly. 'tiThis' is NULL for statics,
// 'fieldFlags' is the fields attributes, and mutator is true if it is a
// ld*flda or a st*fld.
// 'enclosingClass' is given if we are accessing a field in some specific type.
void Compiler::verVerifyField(CORINFO_RESOLVED_TOKEN* pResolvedToken,
const CORINFO_FIELD_INFO& fieldInfo,
const typeInfo* tiThis,
bool mutator,
bool allowPlainStructAsThis)
{
CORINFO_CLASS_HANDLE enclosingClass = pResolvedToken->hClass;
unsigned fieldFlags = fieldInfo.fieldFlags;
CORINFO_CLASS_HANDLE instanceClass =
info.compClassHnd; // for statics, we imagine the instance is the current class.
bool isStaticField = ((fieldFlags & CORINFO_FLG_FIELD_STATIC) != 0);
if (mutator)
{
Verify(!(fieldFlags & CORINFO_FLG_FIELD_UNMANAGED), "mutating an RVA bases static");
if ((fieldFlags & CORINFO_FLG_FIELD_FINAL))
{
Verify((info.compFlags & CORINFO_FLG_CONSTRUCTOR) && enclosingClass == info.compClassHnd &&
info.compIsStatic == isStaticField,
"bad use of initonly field (set or address taken)");
}
}
if (tiThis == nullptr)
{
Verify(isStaticField, "used static opcode with non-static field");
}
else
{
typeInfo tThis = *tiThis;
if (allowPlainStructAsThis && tThis.IsValueClass())
{
tThis.MakeByRef();
}
// If it is null, we assume we can access it (since it will AV shortly)
// If it is anything but a refernce class, there is no hierarchy, so
// again, we don't need the precise instance class to compute 'protected' access
if (tiThis->IsType(TI_REF))
{
instanceClass = tiThis->GetClassHandleForObjRef();
}
// Note that even if the field is static, we require that the this pointer
// satisfy the same constraints as a non-static field This happens to
// be simpler and seems reasonable
typeInfo tiDeclaredThis = verMakeTypeInfo(enclosingClass);
if (tiDeclaredThis.IsValueClass())
{
tiDeclaredThis.MakeByRef();
// we allow read-only tThis, on any field access (even stores!), because if the
// class implementor wants to prohibit stores he should make the field private.
// we do this by setting the read-only bit on the type we compare tThis to.
tiDeclaredThis.SetIsReadonlyByRef();
}
else if (verTrackObjCtorInitState && tThis.IsThisPtr())
{
// Any field access is legal on "uninitialized" this pointers.
// The easiest way to implement this is to simply set the
// initialized bit for the duration of the type check on the
// field access only. It does not change the state of the "this"
// for the function as a whole. Note that the "tThis" is a copy
// of the original "this" type (*tiThis) passed in.
tThis.SetInitialisedObjRef();
}
Verify(tiCompatibleWith(tThis, tiDeclaredThis, true), "this type mismatch");
}
// Presently the JIT does not check that we don't store or take the address of init-only fields
// since we cannot guarantee their immutability and it is not a security issue.
// check any constraints on the fields's class --- accessing the field might cause a class constructor to run.
VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(enclosingClass),
"field has unsatisfied class constraints");
if (fieldFlags & CORINFO_FLG_FIELD_PROTECTED)
{
Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClass),
"Accessing protected method through wrong type.");
}
}
void Compiler::verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode)
{
if (tiOp1.IsNumberType())
{
#ifdef TARGET_64BIT
Verify(tiCompatibleWith(tiOp1, tiOp2, true), "Cond type mismatch");
#else // TARGET_64BIT
// [10/17/2013] Consider changing this: to put on my verification lawyer hat,
// this is non-conforming to the ECMA Spec: types don't have to be equivalent,
// but compatible, since we can coalesce native int with int32 (see section III.1.5).
Verify(typeInfo::AreEquivalent(tiOp1, tiOp2), "Cond type mismatch");
#endif // !TARGET_64BIT
}
else if (tiOp1.IsObjRef())
{
switch (opcode)
{
case CEE_BEQ_S:
case CEE_BEQ:
case CEE_BNE_UN_S:
case CEE_BNE_UN:
case CEE_CEQ:
case CEE_CGT_UN:
break;
default:
Verify(false, "Cond not allowed on object types");
}
Verify(tiOp2.IsObjRef(), "Cond type mismatch");
}
else if (tiOp1.IsByRef())
{
Verify(tiOp2.IsByRef(), "Cond type mismatch");
}
else
{
Verify(tiOp1.IsMethod() && tiOp2.IsMethod(), "Cond type mismatch");
}
}
void Compiler::verVerifyThisPtrInitialised()
{
if (verTrackObjCtorInitState)
{
Verify(verCurrentState.thisInitialized == TIS_Init, "this ptr is not initialized");
}
}
bool Compiler::verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target)
{
// Either target == context, in this case calling an alternate .ctor
// Or target is the immediate parent of context
return ((target == context) || (target == info.compCompHnd->getParentType(context)));
}
GenTree* Compiler::impImportLdvirtftn(GenTree* thisPtr,
CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_CALL_INFO* pCallInfo)
{
if ((pCallInfo->methodFlags & CORINFO_FLG_EnC) && !(pCallInfo->classFlags & CORINFO_FLG_INTERFACE))
{
NO_WAY("Virtual call to a function added via EnC is not supported");
}
// CoreRT generic virtual method
if ((pCallInfo->sig.sigInst.methInstCount != 0) && IsTargetAbi(CORINFO_CORERT_ABI))
{
GenTree* runtimeMethodHandle =
impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_METHOD_HDL, pCallInfo->hMethod);
return gtNewHelperCallNode(CORINFO_HELP_GVMLOOKUP_FOR_SLOT, TYP_I_IMPL,
gtNewCallArgs(thisPtr, runtimeMethodHandle));
}
#ifdef FEATURE_READYTORUN
if (opts.IsReadyToRun())
{
if (!pCallInfo->exactContextNeedsRuntimeLookup)
{
GenTreeCall* call =
gtNewHelperCallNode(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR, TYP_I_IMPL, gtNewCallArgs(thisPtr));
call->setEntryPoint(pCallInfo->codePointerLookup.constLookup);
return call;
}
// We need a runtime lookup. CoreRT has a ReadyToRun helper for that too.
if (IsTargetAbi(CORINFO_CORERT_ABI))
{
GenTree* ctxTree = getRuntimeContextTree(pCallInfo->codePointerLookup.lookupKind.runtimeLookupKind);
return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
gtNewCallArgs(ctxTree), &pCallInfo->codePointerLookup.lookupKind);
}
}
#endif
// Get the exact descriptor for the static callsite
GenTree* exactTypeDesc = impParentClassTokenToHandle(pResolvedToken);
if (exactTypeDesc == nullptr)
{ // compDonotInline()
return nullptr;
}
GenTree* exactMethodDesc = impTokenToHandle(pResolvedToken);
if (exactMethodDesc == nullptr)
{ // compDonotInline()
return nullptr;
}
GenTreeCall::Use* helpArgs = gtNewCallArgs(exactMethodDesc);
helpArgs = gtPrependNewCallArg(exactTypeDesc, helpArgs);
helpArgs = gtPrependNewCallArg(thisPtr, helpArgs);
// Call helper function. This gets the target address of the final destination callsite.
return gtNewHelperCallNode(CORINFO_HELP_VIRTUAL_FUNC_PTR, TYP_I_IMPL, helpArgs);
}
//------------------------------------------------------------------------
// impBoxPatternMatch: match and import common box idioms
//
// Arguments:
// pResolvedToken - resolved token from the box operation
// codeAddr - position in IL stream after the box instruction
// codeEndp - end of IL stream
//
// Return Value:
// Number of IL bytes matched and imported, -1 otherwise
//
// Notes:
// pResolvedToken is known to be a value type; ref type boxing
// is handled in the CEE_BOX clause.
int Compiler::impBoxPatternMatch(CORINFO_RESOLVED_TOKEN* pResolvedToken,
const BYTE* codeAddr,
const BYTE* codeEndp,
bool makeInlineObservation)
{
if (codeAddr >= codeEndp)
{
return -1;
}
switch (codeAddr[0])
{
case CEE_UNBOX_ANY:
// box + unbox.any
if (codeAddr + 1 + sizeof(mdToken) <= codeEndp)
{
if (makeInlineObservation)
{
compInlineResult->Note(InlineObservation::CALLEE_FOLDABLE_BOX);
return 1 + sizeof(mdToken);
}
CORINFO_RESOLVED_TOKEN unboxResolvedToken;
impResolveToken(codeAddr + 1, &unboxResolvedToken, CORINFO_TOKENKIND_Class);
// See if the resolved tokens describe types that are equal.
const TypeCompareState compare =
info.compCompHnd->compareTypesForEquality(unboxResolvedToken.hClass, pResolvedToken->hClass);
// If so, box/unbox.any is a nop.
if (compare == TypeCompareState::Must)
{
JITDUMP("\n Importing BOX; UNBOX.ANY as NOP\n");
// Skip the next unbox.any instruction
return 1 + sizeof(mdToken);
}
}
break;
case CEE_BRTRUE:
case CEE_BRTRUE_S:
case CEE_BRFALSE:
case CEE_BRFALSE_S:
// box + br_true/false
if ((codeAddr + ((codeAddr[0] >= CEE_BRFALSE) ? 5 : 2)) <= codeEndp)
{
if (makeInlineObservation)
{
compInlineResult->Note(InlineObservation::CALLEE_FOLDABLE_BOX);
return 0;
}
GenTree* const treeToBox = impStackTop().val;
bool canOptimize = true;
GenTree* treeToNullcheck = nullptr;
// Can the thing being boxed cause a side effect?
if ((treeToBox->gtFlags & GTF_SIDE_EFFECT) != 0)
{
// Is this a side effect we can replicate cheaply?
if (((treeToBox->gtFlags & GTF_SIDE_EFFECT) == GTF_EXCEPT) &&
treeToBox->OperIs(GT_OBJ, GT_BLK, GT_IND))
{
// Yes, we just need to perform a null check if needed.
GenTree* const addr = treeToBox->AsOp()->gtGetOp1();
if (fgAddrCouldBeNull(addr))
{
treeToNullcheck = addr;
}
}
else
{
canOptimize = false;
}
}
if (canOptimize)
{
CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass);
if (boxHelper == CORINFO_HELP_BOX)
{
JITDUMP("\n Importing BOX; BR_TRUE/FALSE as %sconstant\n",
treeToNullcheck == nullptr ? "" : "nullcheck+");
impPopStack();
GenTree* result = gtNewIconNode(1);
if (treeToNullcheck != nullptr)
{
GenTree* nullcheck = gtNewNullCheck(treeToNullcheck, compCurBB);
result = gtNewOperNode(GT_COMMA, TYP_INT, nullcheck, result);
}
impPushOnStack(result, typeInfo(TI_INT));
return 0;
}
}
}
break;
case CEE_ISINST:
if (codeAddr + 1 + sizeof(mdToken) + 1 <= codeEndp)
{
const BYTE* nextCodeAddr = codeAddr + 1 + sizeof(mdToken);
switch (nextCodeAddr[0])
{
// box + isinst + br_true/false
case CEE_BRTRUE:
case CEE_BRTRUE_S:
case CEE_BRFALSE:
case CEE_BRFALSE_S:
if ((nextCodeAddr + ((nextCodeAddr[0] >= CEE_BRFALSE) ? 5 : 2)) <= codeEndp)
{
if (makeInlineObservation)
{
compInlineResult->Note(InlineObservation::CALLEE_FOLDABLE_BOX);
return 1 + sizeof(mdToken);
}
if (!(impStackTop().val->gtFlags & GTF_SIDE_EFFECT))
{
CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass);
if (boxHelper == CORINFO_HELP_BOX)
{
CORINFO_RESOLVED_TOKEN isInstResolvedToken;
impResolveToken(codeAddr + 1, &isInstResolvedToken, CORINFO_TOKENKIND_Casting);
TypeCompareState castResult =
info.compCompHnd->compareTypesForCast(pResolvedToken->hClass,
isInstResolvedToken.hClass);
if (castResult != TypeCompareState::May)
{
JITDUMP("\n Importing BOX; ISINST; BR_TRUE/FALSE as constant\n");
impPopStack();
impPushOnStack(gtNewIconNode((castResult == TypeCompareState::Must) ? 1 : 0),
typeInfo(TI_INT));
// Skip the next isinst instruction
return 1 + sizeof(mdToken);
}
}
else if (boxHelper == CORINFO_HELP_BOX_NULLABLE)
{
// For nullable we're going to fold it to "ldfld hasValue + brtrue/brfalse" or
// "ldc.i4.0 + brtrue/brfalse" in case if the underlying type is not castable to
// the target type.
CORINFO_RESOLVED_TOKEN isInstResolvedToken;
impResolveToken(codeAddr + 1, &isInstResolvedToken, CORINFO_TOKENKIND_Casting);
CORINFO_CLASS_HANDLE nullableCls = pResolvedToken->hClass;
CORINFO_CLASS_HANDLE underlyingCls = info.compCompHnd->getTypeForBox(nullableCls);
TypeCompareState castResult =
info.compCompHnd->compareTypesForCast(underlyingCls,
isInstResolvedToken.hClass);
if (castResult == TypeCompareState::Must)
{
const CORINFO_FIELD_HANDLE hasValueFldHnd =
info.compCompHnd->getFieldInClass(nullableCls, 0);
assert(info.compCompHnd->getFieldOffset(hasValueFldHnd) == 0);
assert(!strcmp(info.compCompHnd->getFieldName(hasValueFldHnd, nullptr),
"hasValue"));
GenTree* objToBox = impPopStack().val;
// Spill struct to get its address (to access hasValue field)
objToBox =
impGetStructAddr(objToBox, nullableCls, (unsigned)CHECK_SPILL_ALL, true);
impPushOnStack(gtNewFieldRef(TYP_BOOL, hasValueFldHnd, objToBox, 0),
typeInfo(TI_INT));
JITDUMP("\n Importing BOX; ISINST; BR_TRUE/FALSE as nullableVT.hasValue\n");
return 1 + sizeof(mdToken);
}
else if (castResult == TypeCompareState::MustNot)
{
impPopStack();
impPushOnStack(gtNewIconNode(0), typeInfo(TI_INT));
JITDUMP("\n Importing BOX; ISINST; BR_TRUE/FALSE as constant (false)\n");
return 1 + sizeof(mdToken);
}
}
}
}
break;
// box + isinst + unbox.any
case CEE_UNBOX_ANY:
if ((nextCodeAddr + 1 + sizeof(mdToken)) <= codeEndp)
{
if (makeInlineObservation)
{
compInlineResult->Note(InlineObservation::CALLEE_FOLDABLE_BOX);
return 2 + sizeof(mdToken) * 2;
}
// See if the resolved tokens in box, isinst and unbox.any describe types that are equal.
CORINFO_RESOLVED_TOKEN isinstResolvedToken = {};
impResolveToken(codeAddr + 1, &isinstResolvedToken, CORINFO_TOKENKIND_Class);
if (info.compCompHnd->compareTypesForEquality(isinstResolvedToken.hClass,
pResolvedToken->hClass) ==
TypeCompareState::Must)
{
CORINFO_RESOLVED_TOKEN unboxResolvedToken = {};
impResolveToken(nextCodeAddr + 1, &unboxResolvedToken, CORINFO_TOKENKIND_Class);
// If so, box + isinst + unbox.any is a nop.
if (info.compCompHnd->compareTypesForEquality(unboxResolvedToken.hClass,
pResolvedToken->hClass) ==
TypeCompareState::Must)
{
JITDUMP("\n Importing BOX; ISINST, UNBOX.ANY as NOP\n");
return 2 + sizeof(mdToken) * 2;
}
}
}
break;
}
}
break;
default:
break;
}
return -1;
}
//------------------------------------------------------------------------
// impImportAndPushBox: build and import a value-type box
//
// Arguments:
// pResolvedToken - resolved token from the box operation
//
// Return Value:
// None.
//
// Side Effects:
// The value to be boxed is popped from the stack, and a tree for
// the boxed value is pushed. This method may create upstream
// statements, spill side effecting trees, and create new temps.
//
// If importing an inlinee, we may also discover the inline must
// fail. If so there is no new value pushed on the stack. Callers
// should use CompDoNotInline after calling this method to see if
// ongoing importation should be aborted.
//
// Notes:
// Boxing of ref classes results in the same value as the value on
// the top of the stack, so is handled inline in impImportBlockCode
// for the CEE_BOX case. Only value or primitive type boxes make it
// here.
//
// Boxing for nullable types is done via a helper call; boxing
// of other value types is expanded inline or handled via helper
// call, depending on the jit's codegen mode.
//
// When the jit is operating in size and time constrained modes,
// using a helper call here can save jit time and code size. But it
// also may inhibit cleanup optimizations that could have also had a
// even greater benefit effect on code size and jit time. An optimal
// strategy may need to peek ahead and see if it is easy to tell how
// the box is being used. For now, we defer.
void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken)
{
// Spill any special side effects
impSpillSpecialSideEff();
// Get get the expression to box from the stack.
GenTree* op1 = nullptr;
GenTree* op2 = nullptr;
StackEntry se = impPopStack();
CORINFO_CLASS_HANDLE operCls = se.seTypeInfo.GetClassHandle();
GenTree* exprToBox = se.val;
// Look at what helper we should use.
CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass);
// Determine what expansion to prefer.
//
// In size/time/debuggable constrained modes, the helper call
// expansion for box is generally smaller and is preferred, unless
// the value to box is a struct that comes from a call. In that
// case the call can construct its return value directly into the
// box payload, saving possibly some up-front zeroing.
//
// Currently primitive type boxes always get inline expanded. We may
// want to do the same for small structs if they don't come from
// calls and don't have GC pointers, since explicitly copying such
// structs is cheap.
JITDUMP("\nCompiler::impImportAndPushBox -- handling BOX(value class) via");
bool canExpandInline = (boxHelper == CORINFO_HELP_BOX);
bool optForSize = !exprToBox->IsCall() && (operCls != nullptr) && opts.OptimizationDisabled();
bool expandInline = canExpandInline && !optForSize;
if (expandInline)
{
JITDUMP(" inline allocate/copy sequence\n");
// we are doing 'normal' boxing. This means that we can inline the box operation
// Box(expr) gets morphed into
// temp = new(clsHnd)
// cpobj(temp+4, expr, clsHnd)
// push temp
// The code paths differ slightly below for structs and primitives because
// "cpobj" differs in these cases. In one case you get
// impAssignStructPtr(temp+4, expr, clsHnd)
// and the other you get
// *(temp+4) = expr
if (opts.OptimizationDisabled())
{
// For minopts/debug code, try and minimize the total number
// of box temps by reusing an existing temp when possible.
if (impBoxTempInUse || impBoxTemp == BAD_VAR_NUM)
{
impBoxTemp = lvaGrabTemp(true DEBUGARG("Reusable Box Helper"));
}
}
else
{
// When optimizing, use a new temp for each box operation
// since we then know the exact class of the box temp.
impBoxTemp = lvaGrabTemp(true DEBUGARG("Single-def Box Helper"));
lvaTable[impBoxTemp].lvType = TYP_REF;
lvaTable[impBoxTemp].lvSingleDef = 1;
JITDUMP("Marking V%02u as a single def local\n", impBoxTemp);
const bool isExact = true;
lvaSetClass(impBoxTemp, pResolvedToken->hClass, isExact);
}
// needs to stay in use until this box expression is appended
// some other node. We approximate this by keeping it alive until
// the opcode stack becomes empty
impBoxTempInUse = true;
// Remember the current last statement in case we need to move
// a range of statements to ensure the box temp is initialized
// before it's used.
//
Statement* const cursor = impLastStmt;
const bool useParent = false;
op1 = gtNewAllocObjNode(pResolvedToken, useParent);
if (op1 == nullptr)
{
// If we fail to create the newobj node, we must be inlining
// and have run across a type we can't describe.
//
assert(compDonotInline());
return;
}
// Remember that this basic block contains 'new' of an object,
// and so does this method
//
compCurBB->bbFlags |= BBF_HAS_NEWOBJ;
optMethodFlags |= OMF_HAS_NEWOBJ;
// Assign the boxed object to the box temp.
//
GenTree* asg = gtNewTempAssign(impBoxTemp, op1);
Statement* asgStmt = impAppendTree(asg, (unsigned)CHECK_SPILL_NONE, impCurStmtDI);
// If the exprToBox is a call that returns its value via a ret buf arg,
// move the assignment statement(s) before the call (which must be a top level tree).
//
// We do this because impAssignStructPtr (invoked below) will
// back-substitute into a call when it sees a GT_RET_EXPR and the call
// has a hidden buffer pointer, So we need to reorder things to avoid
// creating out-of-sequence IR.
//
if (varTypeIsStruct(exprToBox) && exprToBox->OperIs(GT_RET_EXPR))
{
GenTreeCall* const call = exprToBox->AsRetExpr()->gtInlineCandidate->AsCall();
if (call->HasRetBufArg())
{
JITDUMP("Must insert newobj stmts for box before call [%06u]\n", dspTreeID(call));
// Walk back through the statements in this block, looking for the one
// that has this call as the root node.
//
// Because gtNewTempAssign (above) may have added statements that
// feed into the actual assignment we need to move this set of added
// statements as a group.
//
// Note boxed allocations are side-effect free (no com or finalizer) so
// our only worries here are (correctness) not overlapping the box temp
// lifetime and (perf) stretching the temp lifetime across the inlinee
// body.
//
// Since this is an inline candidate, we must be optimizing, and so we have
// a unique box temp per call. So no worries about overlap.
//
assert(!opts.OptimizationDisabled());
// Lifetime stretching could addressed with some extra cleverness--sinking
// the allocation back down to just before the copy, once we figure out
// where the copy is. We defer for now.
//
Statement* insertBeforeStmt = cursor;
noway_assert(insertBeforeStmt != nullptr);
while (true)
{
if (insertBeforeStmt->GetRootNode() == call)
{
break;
}
// If we've searched all the statements in the block and failed to
// find the call, then something's wrong.
//
noway_assert(insertBeforeStmt != impStmtList);
insertBeforeStmt = insertBeforeStmt->GetPrevStmt();
}
// Found the call. Move the statements comprising the assignment.
//
JITDUMP("Moving " FMT_STMT "..." FMT_STMT " before " FMT_STMT "\n", cursor->GetNextStmt()->GetID(),
asgStmt->GetID(), insertBeforeStmt->GetID());
assert(asgStmt == impLastStmt);
do
{
Statement* movingStmt = impExtractLastStmt();
impInsertStmtBefore(movingStmt, insertBeforeStmt);
insertBeforeStmt = movingStmt;
} while (impLastStmt != cursor);
}
}
// Create a pointer to the box payload in op1.
//
op1 = gtNewLclvNode(impBoxTemp, TYP_REF);
op2 = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL);
op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, op2);
// Copy from the exprToBox to the box payload.
//
if (varTypeIsStruct(exprToBox))
{
assert(info.compCompHnd->getClassSize(pResolvedToken->hClass) == info.compCompHnd->getClassSize(operCls));
op1 = impAssignStructPtr(op1, exprToBox, operCls, (unsigned)CHECK_SPILL_ALL);
}
else
{
var_types lclTyp = exprToBox->TypeGet();
if (lclTyp == TYP_BYREF)
{
lclTyp = TYP_I_IMPL;
}
CorInfoType jitType = info.compCompHnd->asCorInfoType(pResolvedToken->hClass);
if (impIsPrimitive(jitType))
{
lclTyp = JITtype2varType(jitType);
}
var_types srcTyp = exprToBox->TypeGet();
var_types dstTyp = lclTyp;
// We allow float <-> double mismatches and implicit truncation for small types.
assert((genActualType(srcTyp) == genActualType(dstTyp)) ||
(varTypeIsFloating(srcTyp) == varTypeIsFloating(dstTyp)));
// Note regarding small types.
// We are going to store to the box here via an indirection, so the cast added below is
// redundant, since the store has an implicit truncation semantic. The reason we still
// add this cast is so that the code which deals with GT_BOX optimizations does not have
// to account for this implicit truncation (e. g. understand that BOX<byte>(0xFF + 1) is
// actually BOX<byte>(0) or deal with signedness mismatch and other GT_CAST complexities).
if (srcTyp != dstTyp)
{
exprToBox = gtNewCastNode(genActualType(dstTyp), exprToBox, false, dstTyp);
}
op1 = gtNewAssignNode(gtNewOperNode(GT_IND, dstTyp, op1), exprToBox);
}
// Spill eval stack to flush out any pending side effects.
impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportAndPushBox"));
// Set up this copy as a second assignment.
Statement* copyStmt = impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtDI);
op1 = gtNewLclvNode(impBoxTemp, TYP_REF);
// Record that this is a "box" node and keep track of the matching parts.
op1 = new (this, GT_BOX) GenTreeBox(TYP_REF, op1, asgStmt, copyStmt);
// If it is a value class, mark the "box" node. We can use this information
// to optimise several cases:
// "box(x) == null" --> false
// "(box(x)).CallAnInterfaceMethod(...)" --> "(&x).CallAValueTypeMethod"
// "(box(x)).CallAnObjectMethod(...)" --> "(&x).CallAValueTypeMethod"
op1->gtFlags |= GTF_BOX_VALUE;
assert(op1->IsBoxedValue());
assert(asg->gtOper == GT_ASG);
}
else
{
// Don't optimize, just call the helper and be done with it.
JITDUMP(" helper call because: %s\n", canExpandInline ? "optimizing for size" : "nullable");
assert(operCls != nullptr);
// Ensure that the value class is restored
op2 = impTokenToHandle(pResolvedToken, nullptr, true /* mustRestoreHandle */);
if (op2 == nullptr)
{
// We must be backing out of an inline.
assert(compDonotInline());
return;
}
GenTreeCall::Use* args =
gtNewCallArgs(op2, impGetStructAddr(exprToBox, operCls, (unsigned)CHECK_SPILL_ALL, true));
op1 = gtNewHelperCallNode(boxHelper, TYP_REF, args);
}
/* Push the result back on the stack, */
/* even if clsHnd is a value class we want the TI_REF */
typeInfo tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(pResolvedToken->hClass));
impPushOnStack(op1, tiRetVal);
}
//------------------------------------------------------------------------
// impImportNewObjArray: Build and import `new` of multi-dimmensional array
//
// Arguments:
// pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
// by a call to CEEInfo::resolveToken().
// pCallInfo - The CORINFO_CALL_INFO that has been initialized
// by a call to CEEInfo::getCallInfo().
//
// Assumptions:
// The multi-dimensional array constructor arguments (array dimensions) are
// pushed on the IL stack on entry to this method.
//
// Notes:
// Multi-dimensional array constructors are imported as calls to a JIT
// helper, not as regular calls.
void Compiler::impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
{
GenTree* classHandle = impParentClassTokenToHandle(pResolvedToken);
if (classHandle == nullptr)
{ // compDonotInline()
return;
}
assert(pCallInfo->sig.numArgs);
GenTree* node;
// Reuse the temp used to pass the array dimensions to avoid bloating
// the stack frame in case there are multiple calls to multi-dim array
// constructors within a single method.
if (lvaNewObjArrayArgs == BAD_VAR_NUM)
{
lvaNewObjArrayArgs = lvaGrabTemp(false DEBUGARG("NewObjArrayArgs"));
lvaTable[lvaNewObjArrayArgs].lvType = TYP_BLK;
lvaTable[lvaNewObjArrayArgs].lvExactSize = 0;
}
// Increase size of lvaNewObjArrayArgs to be the largest size needed to hold 'numArgs' integers
// for our call to CORINFO_HELP_NEW_MDARR.
lvaTable[lvaNewObjArrayArgs].lvExactSize =
max(lvaTable[lvaNewObjArrayArgs].lvExactSize, pCallInfo->sig.numArgs * sizeof(INT32));
// The side-effects may include allocation of more multi-dimensional arrays. Spill all side-effects
// to ensure that the shared lvaNewObjArrayArgs local variable is only ever used to pass arguments
// to one allocation at a time.
impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportNewObjArray"));
//
// The arguments of the CORINFO_HELP_NEW_MDARR helper are:
// - Array class handle
// - Number of dimension arguments
// - Pointer to block of int32 dimensions - address of lvaNewObjArrayArgs temp.
//
node = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
node = gtNewOperNode(GT_ADDR, TYP_I_IMPL, node);
// Pop dimension arguments from the stack one at a time and store it
// into lvaNewObjArrayArgs temp.
for (int i = pCallInfo->sig.numArgs - 1; i >= 0; i--)
{
GenTree* arg = impImplicitIorI4Cast(impPopStack().val, TYP_INT);
GenTree* dest = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
dest = gtNewOperNode(GT_ADDR, TYP_I_IMPL, dest);
dest = gtNewOperNode(GT_ADD, TYP_I_IMPL, dest,
new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(INT32) * i));
dest = gtNewOperNode(GT_IND, TYP_INT, dest);
node = gtNewOperNode(GT_COMMA, node->TypeGet(), gtNewAssignNode(dest, arg), node);
}
GenTreeCall::Use* args = gtNewCallArgs(node);
// pass number of arguments to the helper
args = gtPrependNewCallArg(gtNewIconNode(pCallInfo->sig.numArgs), args);
args = gtPrependNewCallArg(classHandle, args);
node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR, TYP_REF, args);
for (GenTreeCall::Use& use : node->AsCall()->Args())
{
node->gtFlags |= use.GetNode()->gtFlags & GTF_GLOB_EFFECT;
}
node->AsCall()->compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)pResolvedToken->hClass;
// Remember that this basic block contains 'new' of a md array
compCurBB->bbFlags |= BBF_HAS_NEWARRAY;
impPushOnStack(node, typeInfo(TI_REF, pResolvedToken->hClass));
}
GenTree* Compiler::impTransformThis(GenTree* thisPtr,
CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
CORINFO_THIS_TRANSFORM transform)
{
switch (transform)
{
case CORINFO_DEREF_THIS:
{
GenTree* obj = thisPtr;
// This does a LDIND on the obj, which should be a byref. pointing to a ref
impBashVarAddrsToI(obj);
assert(genActualType(obj->gtType) == TYP_I_IMPL || obj->gtType == TYP_BYREF);
CorInfoType constraintTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
obj = gtNewOperNode(GT_IND, JITtype2varType(constraintTyp), obj);
// ldind could point anywhere, example a boxed class static int
obj->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
return obj;
}
case CORINFO_BOX_THIS:
{
// Constraint calls where there might be no
// unboxed entry point require us to implement the call via helper.
// These only occur when a possible target of the call
// may have inherited an implementation of an interface
// method from System.Object or System.ValueType. The EE does not provide us with
// "unboxed" versions of these methods.
GenTree* obj = thisPtr;
assert(obj->TypeGet() == TYP_BYREF || obj->TypeGet() == TYP_I_IMPL);
obj = gtNewObjNode(pConstrainedResolvedToken->hClass, obj);
obj->gtFlags |= GTF_EXCEPT;
CorInfoType jitTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
if (impIsPrimitive(jitTyp))
{
if (obj->OperIsBlk())
{
obj->ChangeOperUnchecked(GT_IND);
// Obj could point anywhere, example a boxed class static int
obj->gtFlags |= GTF_IND_TGTANYWHERE;
obj->AsOp()->gtOp2 = nullptr; // must be zero for tree walkers
}
obj->gtType = JITtype2varType(jitTyp);
assert(varTypeIsArithmetic(obj->gtType));
}
// This pushes on the dereferenced byref
// This is then used immediately to box.
impPushOnStack(obj, verMakeTypeInfo(pConstrainedResolvedToken->hClass).NormaliseForStack());
// This pops off the byref-to-a-value-type remaining on the stack and
// replaces it with a boxed object.
// This is then used as the object to the virtual call immediately below.
impImportAndPushBox(pConstrainedResolvedToken);
if (compDonotInline())
{
return nullptr;
}
obj = impPopStack().val;
return obj;
}
case CORINFO_NO_THIS_TRANSFORM:
default:
return thisPtr;
}
}
//------------------------------------------------------------------------
// impCanPInvokeInline: check whether PInvoke inlining should enabled in current method.
//
// Return Value:
// true if PInvoke inlining should be enabled in current method, false otherwise
//
// Notes:
// Checks a number of ambient conditions where we could pinvoke but choose not to
bool Compiler::impCanPInvokeInline()
{
return getInlinePInvokeEnabled() && (!opts.compDbgCode) && (compCodeOpt() != SMALL_CODE) &&
(!opts.compNoPInvokeInlineCB) // profiler is preventing inline pinvoke
;
}
//------------------------------------------------------------------------
// impCanPInvokeInlineCallSite: basic legality checks using information
// from a call to see if the call qualifies as an inline pinvoke.
//
// Arguments:
// block - block contaning the call, or for inlinees, block
// containing the call being inlined
//
// Return Value:
// true if this call can legally qualify as an inline pinvoke, false otherwise
//
// Notes:
// For runtimes that support exception handling interop there are
// restrictions on using inline pinvoke in handler regions.
//
// * We have to disable pinvoke inlining inside of filters because
// in case the main execution (i.e. in the try block) is inside
// unmanaged code, we cannot reuse the inlined stub (we still need
// the original state until we are in the catch handler)
//
// * We disable pinvoke inlining inside handlers since the GSCookie
// is in the inlined Frame (see
// CORINFO_EE_INFO::InlinedCallFrameInfo::offsetOfGSCookie), but
// this would not protect framelets/return-address of handlers.
//
// These restrictions are currently also in place for CoreCLR but
// can be relaxed when coreclr/#8459 is addressed.
bool Compiler::impCanPInvokeInlineCallSite(BasicBlock* block)
{
if (block->hasHndIndex())
{
return false;
}
// The remaining limitations do not apply to CoreRT
if (IsTargetAbi(CORINFO_CORERT_ABI))
{
return true;
}
#ifdef TARGET_64BIT
// On 64-bit platforms, we disable pinvoke inlining inside of try regions.
// Note that this could be needed on other architectures too, but we
// haven't done enough investigation to know for sure at this point.
//
// Here is the comment from JIT64 explaining why:
// [VSWhidbey: 611015] - because the jitted code links in the
// Frame (instead of the stub) we rely on the Frame not being
// 'active' until inside the stub. This normally happens by the
// stub setting the return address pointer in the Frame object
// inside the stub. On a normal return, the return address
// pointer is zeroed out so the Frame can be safely re-used, but
// if an exception occurs, nobody zeros out the return address
// pointer. Thus if we re-used the Frame object, it would go
// 'active' as soon as we link it into the Frame chain.
//
// Technically we only need to disable PInvoke inlining if we're
// in a handler or if we're in a try body with a catch or
// filter/except where other non-handler code in this method
// might run and try to re-use the dirty Frame object.
//
// A desktop test case where this seems to matter is
// jit\jit64\ebvts\mcpp\sources2\ijw\__clrcall\vector_ctor_dtor.02\deldtor_clr.exe
if (block->hasTryIndex())
{
// This does not apply to the raw pinvoke call that is inside the pinvoke
// ILStub. In this case, we have to inline the raw pinvoke call into the stub,
// otherwise we would end up with a stub that recursively calls itself, and end
// up with a stack overflow.
if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && opts.ShouldUsePInvokeHelpers())
{
return true;
}
return false;
}
#endif // TARGET_64BIT
return true;
}
//------------------------------------------------------------------------
// impCheckForPInvokeCall examine call to see if it is a pinvoke and if so
// if it can be expressed as an inline pinvoke.
//
// Arguments:
// call - tree for the call
// methHnd - handle for the method being called (may be null)
// sig - signature of the method being called
// mflags - method flags for the method being called
// block - block contaning the call, or for inlinees, block
// containing the call being inlined
//
// Notes:
// Sets GTF_CALL_M_PINVOKE on the call for pinvokes.
//
// Also sets GTF_CALL_UNMANAGED on call for inline pinvokes if the
// call passes a combination of legality and profitabilty checks.
//
// If GTF_CALL_UNMANAGED is set, increments info.compUnmanagedCallCountWithGCTransition
void Compiler::impCheckForPInvokeCall(
GenTreeCall* call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block)
{
CorInfoCallConvExtension unmanagedCallConv;
// If VM flagged it as Pinvoke, flag the call node accordingly
if ((mflags & CORINFO_FLG_PINVOKE) != 0)
{
call->gtCallMoreFlags |= GTF_CALL_M_PINVOKE;
}
bool suppressGCTransition = false;
if (methHnd)
{
if ((mflags & CORINFO_FLG_PINVOKE) == 0)
{
return;
}
unmanagedCallConv = info.compCompHnd->getUnmanagedCallConv(methHnd, nullptr, &suppressGCTransition);
}
else
{
if (sig->getCallConv() == CORINFO_CALLCONV_DEFAULT || sig->getCallConv() == CORINFO_CALLCONV_VARARG)
{
return;
}
unmanagedCallConv = info.compCompHnd->getUnmanagedCallConv(nullptr, sig, &suppressGCTransition);
assert(!call->gtCallCookie);
}
if (suppressGCTransition)
{
call->gtCallMoreFlags |= GTF_CALL_M_SUPPRESS_GC_TRANSITION;
}
// If we can't get the unmanaged calling convention or the calling convention is unsupported in the JIT,
// return here without inlining the native call.
if (unmanagedCallConv == CorInfoCallConvExtension::Managed ||
unmanagedCallConv == CorInfoCallConvExtension::Fastcall ||
unmanagedCallConv == CorInfoCallConvExtension::FastcallMemberFunction)
{
return;
}
optNativeCallCount++;
if (methHnd == nullptr && (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) || IsTargetAbi(CORINFO_CORERT_ABI)))
{
// PInvoke in CoreRT ABI must be always inlined. Non-inlineable CALLI cases have been
// converted to regular method calls earlier using convertPInvokeCalliToCall.
// PInvoke CALLI in IL stubs must be inlined
}
else
{
// Check legality
if (!impCanPInvokeInlineCallSite(block))
{
return;
}
// Legal PInvoke CALL in PInvoke IL stubs must be inlined to avoid infinite recursive
// inlining in CoreRT. Skip the ambient conditions checks and profitability checks.
if (!IsTargetAbi(CORINFO_CORERT_ABI) || (info.compFlags & CORINFO_FLG_PINVOKE) == 0)
{
if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && opts.ShouldUsePInvokeHelpers())
{
// Raw PInvoke call in PInvoke IL stub generated must be inlined to avoid infinite
// recursive calls to the stub.
}
else
{
if (!impCanPInvokeInline())
{
return;
}
// Size-speed tradeoff: don't use inline pinvoke at rarely
// executed call sites. The non-inline version is more
// compact.
if (block->isRunRarely())
{
return;
}
}
}
// The expensive check should be last
if (info.compCompHnd->pInvokeMarshalingRequired(methHnd, sig))
{
return;
}
}
JITLOG((LL_INFO1000000, "\nInline a CALLI PINVOKE call from method %s\n", info.compFullName));
call->gtFlags |= GTF_CALL_UNMANAGED;
call->unmgdCallConv = unmanagedCallConv;
if (!call->IsSuppressGCTransition())
{
info.compUnmanagedCallCountWithGCTransition++;
}
// AMD64 convention is same for native and managed
if (unmanagedCallConv == CorInfoCallConvExtension::C ||
unmanagedCallConv == CorInfoCallConvExtension::CMemberFunction)
{
call->gtFlags |= GTF_CALL_POP_ARGS;
}
if (unmanagedCallConv == CorInfoCallConvExtension::Thiscall)
{
call->gtCallMoreFlags |= GTF_CALL_M_UNMGD_THISCALL;
}
}
GenTreeCall* Compiler::impImportIndirectCall(CORINFO_SIG_INFO* sig, const DebugInfo& di)
{
var_types callRetTyp = JITtype2varType(sig->retType);
/* The function pointer is on top of the stack - It may be a
* complex expression. As it is evaluated after the args,
* it may cause registered args to be spilled. Simply spill it.
*/
// Ignore this trivial case.
if (impStackTop().val->gtOper != GT_LCL_VAR)
{
impSpillStackEntry(verCurrentState.esStackDepth - 1,
BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impImportIndirectCall"));
}
/* Get the function pointer */
GenTree* fptr = impPopStack().val;
// The function pointer is typically a sized to match the target pointer size
// However, stubgen IL optimization can change LDC.I8 to LDC.I4
// See ILCodeStream::LowerOpcode
assert(genActualType(fptr->gtType) == TYP_I_IMPL || genActualType(fptr->gtType) == TYP_INT);
#ifdef DEBUG
// This temporary must never be converted to a double in stress mode,
// because that can introduce a call to the cast helper after the
// arguments have already been evaluated.
if (fptr->OperGet() == GT_LCL_VAR)
{
lvaTable[fptr->AsLclVarCommon()->GetLclNum()].lvKeepType = 1;
}
#endif
/* Create the call node */
GenTreeCall* call = gtNewIndCallNode(fptr, callRetTyp, nullptr, di);
call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
#ifdef UNIX_X86_ABI
call->gtFlags &= ~GTF_CALL_POP_ARGS;
#endif
return call;
}
/*****************************************************************************/
void Compiler::impPopArgsForUnmanagedCall(GenTree* call, CORINFO_SIG_INFO* sig)
{
assert(call->gtFlags & GTF_CALL_UNMANAGED);
/* Since we push the arguments in reverse order (i.e. right -> left)
* spill any side effects from the stack
*
* OBS: If there is only one side effect we do not need to spill it
* thus we have to spill all side-effects except last one
*/
unsigned lastLevelWithSideEffects = UINT_MAX;
unsigned argsToReverse = sig->numArgs;
// For "thiscall", the first argument goes in a register. Since its
// order does not need to be changed, we do not need to spill it
if (call->AsCall()->gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
{
assert(argsToReverse);
argsToReverse--;
}
#ifndef TARGET_X86
// Don't reverse args on ARM or x64 - first four args always placed in regs in order
argsToReverse = 0;
#endif
for (unsigned level = verCurrentState.esStackDepth - argsToReverse; level < verCurrentState.esStackDepth; level++)
{
if (verCurrentState.esStack[level].val->gtFlags & GTF_ORDER_SIDEEFF)
{
assert(lastLevelWithSideEffects == UINT_MAX);
impSpillStackEntry(level,
BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - other side effect"));
}
else if (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT)
{
if (lastLevelWithSideEffects != UINT_MAX)
{
/* We had a previous side effect - must spill it */
impSpillStackEntry(lastLevelWithSideEffects,
BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - side effect"));
/* Record the level for the current side effect in case we will spill it */
lastLevelWithSideEffects = level;
}
else
{
/* This is the first side effect encountered - record its level */
lastLevelWithSideEffects = level;
}
}
}
/* The argument list is now "clean" - no out-of-order side effects
* Pop the argument list in reverse order */
GenTreeCall::Use* args = impPopReverseCallArgs(sig->numArgs, sig, sig->numArgs - argsToReverse);
call->AsCall()->gtCallArgs = args;
if (call->AsCall()->gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
{
GenTree* thisPtr = args->GetNode();
impBashVarAddrsToI(thisPtr);
assert(thisPtr->TypeGet() == TYP_I_IMPL || thisPtr->TypeGet() == TYP_BYREF);
}
for (GenTreeCall::Use& argUse : GenTreeCall::UseList(args))
{
GenTree* arg = argUse.GetNode();
call->gtFlags |= arg->gtFlags & GTF_GLOB_EFFECT;
// We should not be passing gc typed args to an unmanaged call.
if (varTypeIsGC(arg->TypeGet()))
{
// Tolerate byrefs by retyping to native int.
//
// This is needed or we'll generate inconsistent GC info
// for this arg at the call site (gc info says byref,
// pinvoke sig says native int).
//
if (arg->TypeGet() == TYP_BYREF)
{
arg->ChangeType(TYP_I_IMPL);
}
else
{
assert(!"*** invalid IL: gc ref passed to unmanaged call");
}
}
}
}
//------------------------------------------------------------------------
// impInitClass: Build a node to initialize the class before accessing the
// field if necessary
//
// Arguments:
// pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
// by a call to CEEInfo::resolveToken().
//
// Return Value: If needed, a pointer to the node that will perform the class
// initializtion. Otherwise, nullptr.
//
GenTree* Compiler::impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken)
{
CorInfoInitClassResult initClassResult =
info.compCompHnd->initClass(pResolvedToken->hField, info.compMethodHnd, impTokenLookupContextHandle);
if ((initClassResult & CORINFO_INITCLASS_USE_HELPER) == 0)
{
return nullptr;
}
bool runtimeLookup;
GenTree* node = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup);
if (node == nullptr)
{
assert(compDonotInline());
return nullptr;
}
if (runtimeLookup)
{
node = gtNewHelperCallNode(CORINFO_HELP_INITCLASS, TYP_VOID, gtNewCallArgs(node));
}
else
{
// Call the shared non gc static helper, as its the fastest
node = fgGetSharedCCtor(pResolvedToken->hClass);
}
return node;
}
GenTree* Compiler::impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp)
{
GenTree* op1 = nullptr;
#if defined(DEBUG)
// If we're replaying under SuperPMI, we're going to read the data stored by SuperPMI and use it
// for optimization. Unfortunately, SuperPMI doesn't implement a guarantee on the alignment of
// this data, so for some platforms which don't allow unaligned access (e.g., Linux arm32),
// this can fault. We should fix SuperPMI to guarantee alignment, but that is a big change.
// Instead, simply fix up the data here for future use.
// This variable should be the largest size element, with the largest alignment requirement,
// and the native C++ compiler should guarantee sufficient alignment.
double aligned_data = 0.0;
void* p_aligned_data = &aligned_data;
if (info.compMethodSuperPMIIndex != -1)
{
switch (lclTyp)
{
case TYP_BOOL:
case TYP_BYTE:
case TYP_UBYTE:
static_assert_no_msg(sizeof(unsigned __int8) == sizeof(bool));
static_assert_no_msg(sizeof(unsigned __int8) == sizeof(signed char));
static_assert_no_msg(sizeof(unsigned __int8) == sizeof(unsigned char));
// No alignment necessary for byte.
break;
case TYP_SHORT:
case TYP_USHORT:
static_assert_no_msg(sizeof(unsigned __int16) == sizeof(short));
static_assert_no_msg(sizeof(unsigned __int16) == sizeof(unsigned short));
if ((size_t)fldAddr % sizeof(unsigned __int16) != 0)
{
*(unsigned __int16*)p_aligned_data = GET_UNALIGNED_16(fldAddr);
fldAddr = p_aligned_data;
}
break;
case TYP_INT:
case TYP_UINT:
case TYP_FLOAT:
static_assert_no_msg(sizeof(unsigned __int32) == sizeof(int));
static_assert_no_msg(sizeof(unsigned __int32) == sizeof(unsigned int));
static_assert_no_msg(sizeof(unsigned __int32) == sizeof(float));
if ((size_t)fldAddr % sizeof(unsigned __int32) != 0)
{
*(unsigned __int32*)p_aligned_data = GET_UNALIGNED_32(fldAddr);
fldAddr = p_aligned_data;
}
break;
case TYP_LONG:
case TYP_ULONG:
case TYP_DOUBLE:
static_assert_no_msg(sizeof(unsigned __int64) == sizeof(__int64));
static_assert_no_msg(sizeof(unsigned __int64) == sizeof(double));
if ((size_t)fldAddr % sizeof(unsigned __int64) != 0)
{
*(unsigned __int64*)p_aligned_data = GET_UNALIGNED_64(fldAddr);
fldAddr = p_aligned_data;
}
break;
default:
assert(!"Unexpected lclTyp");
break;
}
}
#endif // DEBUG
switch (lclTyp)
{
int ival;
__int64 lval;
double dval;
case TYP_BOOL:
ival = *((bool*)fldAddr);
goto IVAL_COMMON;
case TYP_BYTE:
ival = *((signed char*)fldAddr);
goto IVAL_COMMON;
case TYP_UBYTE:
ival = *((unsigned char*)fldAddr);
goto IVAL_COMMON;
case TYP_SHORT:
ival = *((short*)fldAddr);
goto IVAL_COMMON;
case TYP_USHORT:
ival = *((unsigned short*)fldAddr);
goto IVAL_COMMON;
case TYP_UINT:
case TYP_INT:
ival = *((int*)fldAddr);
IVAL_COMMON:
op1 = gtNewIconNode(ival);
break;
case TYP_LONG:
case TYP_ULONG:
lval = *((__int64*)fldAddr);
op1 = gtNewLconNode(lval);
break;
case TYP_FLOAT:
dval = *((float*)fldAddr);
op1 = gtNewDconNode(dval);
op1->gtType = TYP_FLOAT;
break;
case TYP_DOUBLE:
dval = *((double*)fldAddr);
op1 = gtNewDconNode(dval);
break;
default:
assert(!"Unexpected lclTyp");
break;
}
return op1;
}
GenTree* Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_ACCESS_FLAGS access,
CORINFO_FIELD_INFO* pFieldInfo,
var_types lclTyp)
{
// Ordinary static fields never overlap. RVA statics, however, can overlap (if they're
// mapped to the same ".data" declaration). That said, such mappings only appear to be
// possible with ILASM, and in ILASM-produced (ILONLY) images, RVA statics are always
// read-only (using "stsfld" on them is UB). In mixed-mode assemblies, RVA statics can
// be mutable, but the only current producer of such images, the C++/CLI compiler, does
// not appear to support mapping different fields to the same address. So we will say
// that "mutable overlapping RVA statics" are UB as well. If this ever changes, code in
// value numbering will need to be updated to respect "NotAField FldSeq".
// For statics that are not "boxed", the initial address tree will contain the field sequence.
// For those that are, we will attach it later, when adding the indirection for the box, since
// that tree will represent the true address.
bool isBoxedStatic = (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) != 0;
bool isSharedStatic = (pFieldInfo->fieldAccessor == CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER) ||
(pFieldInfo->fieldAccessor == CORINFO_FIELD_STATIC_READYTORUN_HELPER);
FieldSeqNode::FieldKind fieldKind =
isSharedStatic ? FieldSeqNode::FieldKind::SharedStatic : FieldSeqNode::FieldKind::SimpleStatic;
FieldSeqNode* innerFldSeq = !isBoxedStatic ? GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField, fieldKind)
: FieldSeqStore::NotAField();
GenTree* op1;
switch (pFieldInfo->fieldAccessor)
{
case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
{
assert(!compIsForInlining());
// We first call a special helper to get the statics base pointer
op1 = impParentClassTokenToHandle(pResolvedToken);
// compIsForInlining() is false so we should not get NULL here
assert(op1 != nullptr);
var_types type = TYP_BYREF;
switch (pFieldInfo->helper)
{
case CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE:
type = TYP_I_IMPL;
break;
case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE:
case CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE:
case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE:
break;
default:
assert(!"unknown generic statics helper");
break;
}
op1 = gtNewHelperCallNode(pFieldInfo->helper, type, gtNewCallArgs(op1));
op1 = gtNewOperNode(GT_ADD, type, op1, gtNewIconNode(pFieldInfo->offset, innerFldSeq));
}
break;
case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
{
#ifdef FEATURE_READYTORUN
if (opts.IsReadyToRun())
{
GenTreeFlags callFlags = GTF_EMPTY;
if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
{
callFlags |= GTF_CALL_HOISTABLE;
}
op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_STATIC_BASE, TYP_BYREF);
op1->gtFlags |= callFlags;
op1->AsCall()->setEntryPoint(pFieldInfo->fieldLookup);
}
else
#endif
{
op1 = fgGetStaticsCCtorHelper(pResolvedToken->hClass, pFieldInfo->helper);
}
op1 = gtNewOperNode(GT_ADD, op1->TypeGet(), op1, gtNewIconNode(pFieldInfo->offset, innerFldSeq));
break;
}
case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
{
#ifdef FEATURE_READYTORUN
assert(opts.IsReadyToRun());
assert(!compIsForInlining());
CORINFO_LOOKUP_KIND kind;
info.compCompHnd->getLocationOfThisType(info.compMethodHnd, &kind);
assert(kind.needsRuntimeLookup);
GenTree* ctxTree = getRuntimeContextTree(kind.runtimeLookupKind);
GenTreeCall::Use* args = gtNewCallArgs(ctxTree);
GenTreeFlags callFlags = GTF_EMPTY;
if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
{
callFlags |= GTF_CALL_HOISTABLE;
}
var_types type = TYP_BYREF;
op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE, type, args);
op1->gtFlags |= callFlags;
op1->AsCall()->setEntryPoint(pFieldInfo->fieldLookup);
op1 = gtNewOperNode(GT_ADD, type, op1, gtNewIconNode(pFieldInfo->offset, innerFldSeq));
#else
unreached();
#endif // FEATURE_READYTORUN
}
break;
default:
{
// Do we need the address of a static field?
//
if (access & CORINFO_ACCESS_ADDRESS)
{
void** pFldAddr = nullptr;
void* fldAddr = info.compCompHnd->getFieldAddress(pResolvedToken->hField, (void**)&pFldAddr);
// We should always be able to access this static's address directly.
assert(pFldAddr == nullptr);
// Create the address node.
GenTreeFlags handleKind = isBoxedStatic ? GTF_ICON_STATIC_BOX_PTR : GTF_ICON_STATIC_HDL;
op1 = gtNewIconHandleNode((size_t)fldAddr, handleKind, innerFldSeq);
#ifdef DEBUG
op1->AsIntCon()->gtTargetHandle = op1->AsIntCon()->gtIconVal;
#endif
if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
{
op1->gtFlags |= GTF_ICON_INITCLASS;
}
}
else // We need the value of a static field
{
// In future, it may be better to just create the right tree here instead of folding it later.
op1 = gtNewFieldRef(lclTyp, pResolvedToken->hField);
if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
{
op1->gtFlags |= GTF_FLD_INITCLASS;
}
if (isBoxedStatic)
{
FieldSeqNode* outerFldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField, fieldKind);
op1->ChangeType(TYP_REF); // points at boxed object
op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, gtNewIconNode(TARGET_POINTER_SIZE, outerFldSeq));
if (varTypeIsStruct(lclTyp))
{
// Constructor adds GTF_GLOB_REF. Note that this is *not* GTF_EXCEPT.
op1 = gtNewObjNode(pFieldInfo->structType, op1);
}
else
{
op1 = gtNewOperNode(GT_IND, lclTyp, op1);
op1->gtFlags |= (GTF_GLOB_REF | GTF_IND_NONFAULTING);
}
}
return op1;
}
break;
}
}
if (isBoxedStatic)
{
FieldSeqNode* outerFldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField, fieldKind);
op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
op1->gtFlags |= (GTF_IND_INVARIANT | GTF_IND_NONFAULTING | GTF_IND_NONNULL);
op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, gtNewIconNode(TARGET_POINTER_SIZE, outerFldSeq));
}
if (!(access & CORINFO_ACCESS_ADDRESS))
{
if (varTypeIsStruct(lclTyp))
{
// Constructor adds GTF_GLOB_REF. Note that this is *not* GTF_EXCEPT.
op1 = gtNewObjNode(pFieldInfo->structType, op1);
}
else
{
op1 = gtNewOperNode(GT_IND, lclTyp, op1);
op1->gtFlags |= GTF_GLOB_REF;
}
}
return op1;
}
// In general try to call this before most of the verification work. Most people expect the access
// exceptions before the verification exceptions. If you do this after, that usually doesn't happen. Turns
// out if you can't access something we also think that you're unverifiable for other reasons.
void Compiler::impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
{
if (result != CORINFO_ACCESS_ALLOWED)
{
impHandleAccessAllowedInternal(result, helperCall);
}
}
void Compiler::impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
{
switch (result)
{
case CORINFO_ACCESS_ALLOWED:
break;
case CORINFO_ACCESS_ILLEGAL:
// if we're verifying, then we need to reject the illegal access to ensure that we don't think the
// method is verifiable. Otherwise, delay the exception to runtime.
if (compIsForImportOnly())
{
info.compCompHnd->ThrowExceptionForHelper(helperCall);
}
else
{
impInsertHelperCall(helperCall);
}
break;
}
}
void Compiler::impInsertHelperCall(CORINFO_HELPER_DESC* helperInfo)
{
// Construct the argument list
GenTreeCall::Use* args = nullptr;
assert(helperInfo->helperNum != CORINFO_HELP_UNDEF);
for (unsigned i = helperInfo->numArgs; i > 0; --i)
{
const CORINFO_HELPER_ARG& helperArg = helperInfo->args[i - 1];
GenTree* currentArg = nullptr;
switch (helperArg.argType)
{
case CORINFO_HELPER_ARG_TYPE_Field:
info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
info.compCompHnd->getFieldClass(helperArg.fieldHandle));
currentArg = gtNewIconEmbFldHndNode(helperArg.fieldHandle);
break;
case CORINFO_HELPER_ARG_TYPE_Method:
info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(helperArg.methodHandle);
currentArg = gtNewIconEmbMethHndNode(helperArg.methodHandle);
break;
case CORINFO_HELPER_ARG_TYPE_Class:
info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(helperArg.classHandle);
currentArg = gtNewIconEmbClsHndNode(helperArg.classHandle);
break;
case CORINFO_HELPER_ARG_TYPE_Module:
currentArg = gtNewIconEmbScpHndNode(helperArg.moduleHandle);
break;
case CORINFO_HELPER_ARG_TYPE_Const:
currentArg = gtNewIconNode(helperArg.constant);
break;
default:
NO_WAY("Illegal helper arg type");
}
args = gtPrependNewCallArg(currentArg, args);
}
/* TODO-Review:
* Mark as CSE'able, and hoistable. Consider marking hoistable unless you're in the inlinee.
* Also, consider sticking this in the first basic block.
*/
GenTree* callout = gtNewHelperCallNode(helperInfo->helperNum, TYP_VOID, args);
impAppendTree(callout, (unsigned)CHECK_SPILL_NONE, impCurStmtDI);
}
//------------------------------------------------------------------------
// impTailCallRetTypeCompatible: Checks whether the return types of caller
// and callee are compatible so that calle can be tail called.
// sizes are not supported integral type sizes return values to temps.
//
// Arguments:
// allowWidening -- whether to allow implicit widening by the callee.
// For instance, allowing int32 -> int16 tailcalls.
// The managed calling convention allows this, but
// we don't want explicit tailcalls to depend on this
// detail of the managed calling convention.
// callerRetType -- the caller's return type
// callerRetTypeClass - the caller's return struct type
// callerCallConv -- calling convention of the caller
// calleeRetType -- the callee's return type
// calleeRetTypeClass - the callee return struct type
// calleeCallConv -- calling convention of the callee
//
// Returns:
// True if the tailcall types are compatible.
//
// Remarks:
// Note that here we don't check compatibility in IL Verifier sense, but on the
// lines of return types getting returned in the same return register.
bool Compiler::impTailCallRetTypeCompatible(bool allowWidening,
var_types callerRetType,
CORINFO_CLASS_HANDLE callerRetTypeClass,
CorInfoCallConvExtension callerCallConv,
var_types calleeRetType,
CORINFO_CLASS_HANDLE calleeRetTypeClass,
CorInfoCallConvExtension calleeCallConv)
{
// Early out if the types are the same.
if (callerRetType == calleeRetType)
{
return true;
}
// For integral types the managed calling convention dictates that callee
// will widen the return value to 4 bytes, so we can allow implicit widening
// in managed to managed tailcalls when dealing with <= 4 bytes.
bool isManaged =
(callerCallConv == CorInfoCallConvExtension::Managed) && (calleeCallConv == CorInfoCallConvExtension::Managed);
if (allowWidening && isManaged && varTypeIsIntegral(callerRetType) && varTypeIsIntegral(calleeRetType) &&
(genTypeSize(callerRetType) <= 4) && (genTypeSize(calleeRetType) <= genTypeSize(callerRetType)))
{
return true;
}
// If the class handles are the same and not null, the return types are compatible.
if ((callerRetTypeClass != nullptr) && (callerRetTypeClass == calleeRetTypeClass))
{
return true;
}
#if defined(TARGET_AMD64) || defined(TARGET_ARMARCH)
// Jit64 compat:
if (callerRetType == TYP_VOID)
{
// This needs to be allowed to support the following IL pattern that Jit64 allows:
// tail.call
// pop
// ret
//
// Note that the above IL pattern is not valid as per IL verification rules.
// Therefore, only full trust code can take advantage of this pattern.
return true;
}
// These checks return true if the return value type sizes are the same and
// get returned in the same return register i.e. caller doesn't need to normalize
// return value. Some of the tail calls permitted by below checks would have
// been rejected by IL Verifier before we reached here. Therefore, only full
// trust code can make those tail calls.
unsigned callerRetTypeSize = 0;
unsigned calleeRetTypeSize = 0;
bool isCallerRetTypMBEnreg = VarTypeIsMultiByteAndCanEnreg(callerRetType, callerRetTypeClass, &callerRetTypeSize,
true, info.compIsVarArgs, callerCallConv);
bool isCalleeRetTypMBEnreg = VarTypeIsMultiByteAndCanEnreg(calleeRetType, calleeRetTypeClass, &calleeRetTypeSize,
true, info.compIsVarArgs, calleeCallConv);
if (varTypeIsIntegral(callerRetType) || isCallerRetTypMBEnreg)
{
return (varTypeIsIntegral(calleeRetType) || isCalleeRetTypMBEnreg) && (callerRetTypeSize == calleeRetTypeSize);
}
#endif // TARGET_AMD64 || TARGET_ARMARCH
return false;
}
/********************************************************************************
*
* Returns true if the current opcode and and the opcodes following it correspond
* to a supported tail call IL pattern.
*
*/
bool Compiler::impIsTailCallILPattern(
bool tailPrefixed, OPCODE curOpcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, bool isRecursive)
{
// Bail out if the current opcode is not a call.
if (!impOpcodeIsCallOpcode(curOpcode))
{
return false;
}
#if !FEATURE_TAILCALL_OPT_SHARED_RETURN
// If shared ret tail opt is not enabled, we will enable
// it for recursive methods.
if (isRecursive)
#endif
{
// we can actually handle if the ret is in a fallthrough block, as long as that is the only part of the
// sequence. Make sure we don't go past the end of the IL however.
codeEnd = min(codeEnd + 1, info.compCode + info.compILCodeSize);
}
// Bail out if there is no next opcode after call
if (codeAddrOfNextOpcode >= codeEnd)
{
return false;
}
OPCODE nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
return (nextOpcode == CEE_RET);
}
/*****************************************************************************
*
* Determine whether the call could be converted to an implicit tail call
*
*/
bool Compiler::impIsImplicitTailCallCandidate(
OPCODE opcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive)
{
#if FEATURE_TAILCALL_OPT
if (!opts.compTailCallOpt)
{
return false;
}
if (opts.OptimizationDisabled())
{
return false;
}
// must not be tail prefixed
if (prefixFlags & PREFIX_TAILCALL_EXPLICIT)
{
return false;
}
#if !FEATURE_TAILCALL_OPT_SHARED_RETURN
// the block containing call is marked as BBJ_RETURN
// We allow shared ret tail call optimization on recursive calls even under
// !FEATURE_TAILCALL_OPT_SHARED_RETURN.
if (!isRecursive && (compCurBB->bbJumpKind != BBJ_RETURN))
return false;
#endif // !FEATURE_TAILCALL_OPT_SHARED_RETURN
// must be call+ret or call+pop+ret
if (!impIsTailCallILPattern(false, opcode, codeAddrOfNextOpcode, codeEnd, isRecursive))
{
return false;
}
return true;
#else
return false;
#endif // FEATURE_TAILCALL_OPT
}
//------------------------------------------------------------------------
// impImportCall: import a call-inspiring opcode
//
// Arguments:
// opcode - opcode that inspires the call
// pResolvedToken - resolved token for the call target
// pConstrainedResolvedToken - resolved constraint token (or nullptr)
// newObjThis - tree for this pointer or uninitalized newobj temp (or nullptr)
// prefixFlags - IL prefix flags for the call
// callInfo - EE supplied info for the call
// rawILOffset - IL offset of the opcode, used for guarded devirtualization.
//
// Returns:
// Type of the call's return value.
// If we're importing an inlinee and have realized the inline must fail, the call return type should be TYP_UNDEF.
// However we can't assert for this here yet because there are cases we miss. See issue #13272.
//
//
// Notes:
// opcode can be CEE_CALL, CEE_CALLI, CEE_CALLVIRT, or CEE_NEWOBJ.
//
// For CEE_NEWOBJ, newobjThis should be the temp grabbed for the allocated
// uninitalized object.
#ifdef _PREFAST_
#pragma warning(push)
#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
var_types Compiler::impImportCall(OPCODE opcode,
CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
GenTree* newobjThis,
int prefixFlags,
CORINFO_CALL_INFO* callInfo,
IL_OFFSET rawILOffset)
{
assert(opcode == CEE_CALL || opcode == CEE_CALLVIRT || opcode == CEE_NEWOBJ || opcode == CEE_CALLI);
// The current statement DI may not refer to the exact call, but for calls
// we wish to be able to attach the exact IL instruction to get "return
// value" support in the debugger, so create one with the exact IL offset.
DebugInfo di = impCreateDIWithCurrentStackInfo(rawILOffset, true);
var_types callRetTyp = TYP_COUNT;
CORINFO_SIG_INFO* sig = nullptr;
CORINFO_METHOD_HANDLE methHnd = nullptr;
CORINFO_CLASS_HANDLE clsHnd = nullptr;
unsigned clsFlags = 0;
unsigned mflags = 0;
GenTree* call = nullptr;
GenTreeCall::Use* args = nullptr;
CORINFO_THIS_TRANSFORM constraintCallThisTransform = CORINFO_NO_THIS_TRANSFORM;
CORINFO_CONTEXT_HANDLE exactContextHnd = nullptr;
bool exactContextNeedsRuntimeLookup = false;
bool canTailCall = true;
const char* szCanTailCallFailReason = nullptr;
const int tailCallFlags = (prefixFlags & PREFIX_TAILCALL);
const bool isReadonlyCall = (prefixFlags & PREFIX_READONLY) != 0;
methodPointerInfo* ldftnInfo = nullptr;
// Synchronized methods need to call CORINFO_HELP_MON_EXIT at the end. We could
// do that before tailcalls, but that is probably not the intended
// semantic. So just disallow tailcalls from synchronized methods.
// Also, popping arguments in a varargs function is more work and NYI
// If we have a security object, we have to keep our frame around for callers
// to see any imperative security.
// Reverse P/Invokes need a call to CORINFO_HELP_JIT_REVERSE_PINVOKE_EXIT
// at the end, so tailcalls should be disabled.
if (info.compFlags & CORINFO_FLG_SYNCH)
{
canTailCall = false;
szCanTailCallFailReason = "Caller is synchronized";
}
else if (opts.IsReversePInvoke())
{
canTailCall = false;
szCanTailCallFailReason = "Caller is Reverse P/Invoke";
}
#if !FEATURE_FIXED_OUT_ARGS
else if (info.compIsVarArgs)
{
canTailCall = false;
szCanTailCallFailReason = "Caller is varargs";
}
#endif // FEATURE_FIXED_OUT_ARGS
// We only need to cast the return value of pinvoke inlined calls that return small types
// TODO-AMD64-Cleanup: Remove this when we stop interoperating with JIT64, or if we decide to stop
// widening everything! CoreCLR does not support JIT64 interoperation so no need to widen there.
// The existing x64 JIT doesn't bother widening all types to int, so we have to assume for
// the time being that the callee might be compiled by the other JIT and thus the return
// value will need to be widened by us (or not widened at all...)
// ReadyToRun code sticks with default calling convention that does not widen small return types.
bool checkForSmallType = opts.IsReadyToRun();
bool bIntrinsicImported = false;
CORINFO_SIG_INFO calliSig;
GenTreeCall::Use* extraArg = nullptr;
/*-------------------------------------------------------------------------
* First create the call node
*/
if (opcode == CEE_CALLI)
{
if (IsTargetAbi(CORINFO_CORERT_ABI))
{
// See comment in impCheckForPInvokeCall
BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB;
if (info.compCompHnd->convertPInvokeCalliToCall(pResolvedToken, !impCanPInvokeInlineCallSite(block)))
{
eeGetCallInfo(pResolvedToken, nullptr, CORINFO_CALLINFO_ALLOWINSTPARAM, callInfo);
return impImportCall(CEE_CALL, pResolvedToken, nullptr, nullptr, prefixFlags, callInfo, rawILOffset);
}
}
/* Get the call site sig */
eeGetSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &calliSig);
callRetTyp = JITtype2varType(calliSig.retType);
call = impImportIndirectCall(&calliSig, di);
// We don't know the target method, so we have to infer the flags, or
// assume the worst-case.
mflags = (calliSig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
#ifdef DEBUG
if (verbose)
{
unsigned structSize = (callRetTyp == TYP_STRUCT) ? eeTryGetClassSize(calliSig.retTypeSigClass) : 0;
printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %u\n",
opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
}
#endif
sig = &calliSig;
}
else // (opcode != CEE_CALLI)
{
NamedIntrinsic ni = NI_Illegal;
// Passing CORINFO_CALLINFO_ALLOWINSTPARAM indicates that this JIT is prepared to
// supply the instantiation parameters necessary to make direct calls to underlying
// shared generic code, rather than calling through instantiating stubs. If the
// returned signature has CORINFO_CALLCONV_PARAMTYPE then this indicates that the JIT
// must indeed pass an instantiation parameter.
methHnd = callInfo->hMethod;
sig = &(callInfo->sig);
callRetTyp = JITtype2varType(sig->retType);
mflags = callInfo->methodFlags;
#ifdef DEBUG
if (verbose)
{
unsigned structSize = (callRetTyp == TYP_STRUCT) ? eeTryGetClassSize(sig->retTypeSigClass) : 0;
printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %u\n",
opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
}
#endif
if (compIsForInlining())
{
/* Does the inlinee use StackCrawlMark */
if (mflags & CORINFO_FLG_DONT_INLINE_CALLER)
{
compInlineResult->NoteFatal(InlineObservation::CALLEE_STACK_CRAWL_MARK);
return TYP_UNDEF;
}
/* For now ignore varargs */
if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
{
compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NATIVE_VARARGS);
return TYP_UNDEF;
}
if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
{
compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
return TYP_UNDEF;
}
if ((mflags & CORINFO_FLG_VIRTUAL) && (sig->sigInst.methInstCount != 0) && (opcode == CEE_CALLVIRT))
{
compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_GENERIC_VIRTUAL);
return TYP_UNDEF;
}
}
clsHnd = pResolvedToken->hClass;
clsFlags = callInfo->classFlags;
#ifdef DEBUG
// If this is a call to JitTestLabel.Mark, do "early inlining", and record the test attribute.
// This recognition should really be done by knowing the methHnd of the relevant Mark method(s).
// These should be in corelib.h, and available through a JIT/EE interface call.
const char* modName;
const char* className;
const char* methodName;
if ((className = eeGetClassName(clsHnd)) != nullptr &&
strcmp(className, "System.Runtime.CompilerServices.JitTestLabel") == 0 &&
(methodName = eeGetMethodName(methHnd, &modName)) != nullptr && strcmp(methodName, "Mark") == 0)
{
return impImportJitTestLabelMark(sig->numArgs);
}
#endif // DEBUG
// <NICE> Factor this into getCallInfo </NICE>
bool isSpecialIntrinsic = false;
if ((mflags & (CORINFO_FLG_INTRINSIC | CORINFO_FLG_INTRINSIC)) != 0)
{
const bool isTailCall = canTailCall && (tailCallFlags != 0);
call =
impIntrinsic(newobjThis, clsHnd, methHnd, sig, mflags, pResolvedToken->token, isReadonlyCall,
isTailCall, pConstrainedResolvedToken, callInfo->thisTransform, &ni, &isSpecialIntrinsic);
if (compDonotInline())
{
return TYP_UNDEF;
}
if (call != nullptr)
{
#ifdef FEATURE_READYTORUN
if (call->OperGet() == GT_INTRINSIC)
{
if (opts.IsReadyToRun())
{
noway_assert(callInfo->kind == CORINFO_CALL);
call->AsIntrinsic()->gtEntryPoint = callInfo->codePointerLookup.constLookup;
}
else
{
call->AsIntrinsic()->gtEntryPoint.addr = nullptr;
call->AsIntrinsic()->gtEntryPoint.accessType = IAT_VALUE;
}
}
#endif
bIntrinsicImported = true;
goto DONE_CALL;
}
}
#ifdef FEATURE_SIMD
call = impSIMDIntrinsic(opcode, newobjThis, clsHnd, methHnd, sig, mflags, pResolvedToken->token);
if (call != nullptr)
{
bIntrinsicImported = true;
goto DONE_CALL;
}
#endif // FEATURE_SIMD
if ((mflags & CORINFO_FLG_VIRTUAL) && (mflags & CORINFO_FLG_EnC) && (opcode == CEE_CALLVIRT))
{
NO_WAY("Virtual call to a function added via EnC is not supported");
}
if ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT &&
(sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
(sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG)
{
BADCODE("Bad calling convention");
}
//-------------------------------------------------------------------------
// Construct the call node
//
// Work out what sort of call we're making.
// Dispense with virtual calls implemented via LDVIRTFTN immediately.
constraintCallThisTransform = callInfo->thisTransform;
exactContextHnd = callInfo->contextHandle;
exactContextNeedsRuntimeLookup = callInfo->exactContextNeedsRuntimeLookup;
switch (callInfo->kind)
{
case CORINFO_VIRTUALCALL_STUB:
{
assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
if (callInfo->stubLookup.lookupKind.needsRuntimeLookup)
{
if (callInfo->stubLookup.lookupKind.runtimeLookupKind == CORINFO_LOOKUP_NOT_SUPPORTED)
{
// Runtime does not support inlining of all shapes of runtime lookups
// Inlining has to be aborted in such a case
compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_COMPLEX_HANDLE);
return TYP_UNDEF;
}
GenTree* stubAddr = impRuntimeLookupToTree(pResolvedToken, &callInfo->stubLookup, methHnd);
assert(!compDonotInline());
// This is the rough code to set up an indirect stub call
assert(stubAddr != nullptr);
// The stubAddr may be a
// complex expression. As it is evaluated after the args,
// it may cause registered args to be spilled. Simply spill it.
unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall with runtime lookup"));
impAssignTempGen(lclNum, stubAddr, (unsigned)CHECK_SPILL_NONE);
stubAddr = gtNewLclvNode(lclNum, TYP_I_IMPL);
// Create the actual call node
assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
(sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
call = gtNewIndCallNode(stubAddr, callRetTyp, nullptr);
call->gtFlags |= GTF_EXCEPT | (stubAddr->gtFlags & GTF_GLOB_EFFECT);
call->gtFlags |= GTF_CALL_VIRT_STUB;
#ifdef TARGET_X86
// No tailcalls allowed for these yet...
canTailCall = false;
szCanTailCallFailReason = "VirtualCall with runtime lookup";
#endif
}
else
{
// The stub address is known at compile time
call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, di);
call->AsCall()->gtStubCallStubAddr = callInfo->stubLookup.constLookup.addr;
call->gtFlags |= GTF_CALL_VIRT_STUB;
assert(callInfo->stubLookup.constLookup.accessType != IAT_PPVALUE &&
callInfo->stubLookup.constLookup.accessType != IAT_RELPVALUE);
if (callInfo->stubLookup.constLookup.accessType == IAT_PVALUE)
{
call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_VIRTSTUB_REL_INDIRECT;
}
}
#ifdef FEATURE_READYTORUN
if (opts.IsReadyToRun())
{
// Null check is sometimes needed for ready to run to handle
// non-virtual <-> virtual changes between versions
if (callInfo->nullInstanceCheck)
{
call->gtFlags |= GTF_CALL_NULLCHECK;
}
}
#endif
break;
}
case CORINFO_VIRTUALCALL_VTABLE:
{
assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, di);
call->gtFlags |= GTF_CALL_VIRT_VTABLE;
// Should we expand virtual call targets early for this method?
//
if (opts.compExpandCallsEarly)
{
// Mark this method to expand the virtual call target early in fgMorpgCall
call->AsCall()->SetExpandedEarly();
}
break;
}
case CORINFO_VIRTUALCALL_LDVIRTFTN:
{
if (compIsForInlining())
{
compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_CALL_VIA_LDVIRTFTN);
return TYP_UNDEF;
}
assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
// OK, We've been told to call via LDVIRTFTN, so just
// take the call now....
GenTreeCall::Use* args = impPopCallArgs(sig->numArgs, sig);
GenTree* thisPtr = impPopStack().val;
thisPtr = impTransformThis(thisPtr, pConstrainedResolvedToken, callInfo->thisTransform);
assert(thisPtr != nullptr);
// Clone the (possibly transformed) "this" pointer
GenTree* thisPtrCopy;
thisPtr = impCloneExpr(thisPtr, &thisPtrCopy, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("LDVIRTFTN this pointer"));
GenTree* fptr = impImportLdvirtftn(thisPtr, pResolvedToken, callInfo);
assert(fptr != nullptr);
thisPtr = nullptr; // can't reuse it
// Now make an indirect call through the function pointer
unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall through function pointer"));
impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
// Create the actual call node
call = gtNewIndCallNode(fptr, callRetTyp, args, di);
call->AsCall()->gtCallThisArg = gtNewCallArgs(thisPtrCopy);
call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
if ((sig->sigInst.methInstCount != 0) && IsTargetAbi(CORINFO_CORERT_ABI))
{
// CoreRT generic virtual method: need to handle potential fat function pointers
addFatPointerCandidate(call->AsCall());
}
#ifdef FEATURE_READYTORUN
if (opts.IsReadyToRun())
{
// Null check is needed for ready to run to handle
// non-virtual <-> virtual changes between versions
call->gtFlags |= GTF_CALL_NULLCHECK;
}
#endif
// Sine we are jumping over some code, check that its OK to skip that code
assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
(sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
goto DONE;
}
case CORINFO_CALL:
{
// This is for a non-virtual, non-interface etc. call
call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, di);
// We remove the nullcheck for the GetType call intrinsic.
// TODO-CQ: JIT64 does not introduce the null check for many more helper calls
// and intrinsics.
if (callInfo->nullInstanceCheck &&
!((mflags & CORINFO_FLG_INTRINSIC) != 0 && (ni == NI_System_Object_GetType)))
{
call->gtFlags |= GTF_CALL_NULLCHECK;
}
#ifdef FEATURE_READYTORUN
if (opts.IsReadyToRun())
{
call->AsCall()->setEntryPoint(callInfo->codePointerLookup.constLookup);
}
#endif
break;
}
case CORINFO_CALL_CODE_POINTER:
{
// The EE has asked us to call by computing a code pointer and then doing an
// indirect call. This is because a runtime lookup is required to get the code entry point.
// These calls always follow a uniform calling convention, i.e. no extra hidden params
assert((sig->callConv & CORINFO_CALLCONV_PARAMTYPE) == 0);
assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG);
assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
GenTree* fptr =
impLookupToTree(pResolvedToken, &callInfo->codePointerLookup, GTF_ICON_FTN_ADDR, callInfo->hMethod);
if (compDonotInline())
{
return TYP_UNDEF;
}
// Now make an indirect call through the function pointer
unsigned lclNum = lvaGrabTemp(true DEBUGARG("Indirect call through function pointer"));
impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
call = gtNewIndCallNode(fptr, callRetTyp, nullptr, di);
call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
if (callInfo->nullInstanceCheck)
{
call->gtFlags |= GTF_CALL_NULLCHECK;
}
break;
}
default:
assert(!"unknown call kind");
break;
}
//-------------------------------------------------------------------------
// Set more flags
PREFIX_ASSUME(call != nullptr);
if (mflags & CORINFO_FLG_NOGCCHECK)
{
call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_NOGCCHECK;
}
// Mark call if it's one of the ones we will maybe treat as an intrinsic
if (isSpecialIntrinsic)
{
call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_SPECIAL_INTRINSIC;
}
}
assert(sig);
assert(clsHnd || (opcode == CEE_CALLI)); // We're never verifying for CALLI, so this is not set.
/* Some sanity checks */
// CALL_VIRT and NEWOBJ must have a THIS pointer
assert((opcode != CEE_CALLVIRT && opcode != CEE_NEWOBJ) || (sig->callConv & CORINFO_CALLCONV_HASTHIS));
// static bit and hasThis are negations of one another
assert(((mflags & CORINFO_FLG_STATIC) != 0) == ((sig->callConv & CORINFO_CALLCONV_HASTHIS) == 0));
assert(call != nullptr);
/*-------------------------------------------------------------------------
* Check special-cases etc
*/
/* Special case - Check if it is a call to Delegate.Invoke(). */
if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
{
assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
assert(mflags & CORINFO_FLG_FINAL);
/* Set the delegate flag */
call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_DELEGATE_INV;
if (callInfo->wrapperDelegateInvoke)
{
call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_WRAPPER_DELEGATE_INV;
}
if (opcode == CEE_CALLVIRT)
{
assert(mflags & CORINFO_FLG_FINAL);
/* It should have the GTF_CALL_NULLCHECK flag set. Reset it */
assert(call->gtFlags & GTF_CALL_NULLCHECK);
call->gtFlags &= ~GTF_CALL_NULLCHECK;
}
}
CORINFO_CLASS_HANDLE actualMethodRetTypeSigClass;
actualMethodRetTypeSigClass = sig->retTypeSigClass;
/* Check for varargs */
if (!compFeatureVarArg() && ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
(sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG))
{
BADCODE("Varargs not supported.");
}
if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
(sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
{
assert(!compIsForInlining());
/* Set the right flags */
call->gtFlags |= GTF_CALL_POP_ARGS;
call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_VARARGS;
/* Can't allow tailcall for varargs as it is caller-pop. The caller
will be expecting to pop a certain number of arguments, but if we
tailcall to a function with a different number of arguments, we
are hosed. There are ways around this (caller remembers esp value,
varargs is not caller-pop, etc), but not worth it. */
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_X86
if (canTailCall)
{
canTailCall = false;
szCanTailCallFailReason = "Callee is varargs";
}
#endif
/* Get the total number of arguments - this is already correct
* for CALLI - for methods we have to get it from the call site */
if (opcode != CEE_CALLI)
{
#ifdef DEBUG
unsigned numArgsDef = sig->numArgs;
#endif
eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
// For vararg calls we must be sure to load the return type of the
// method actually being called, as well as the return types of the
// specified in the vararg signature. With type equivalency, these types
// may not be the same.
if (sig->retTypeSigClass != actualMethodRetTypeSigClass)
{
if (actualMethodRetTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR &&
sig->retType != CORINFO_TYPE_VAR)
{
// Make sure that all valuetypes (including enums) that we push are loaded.
// This is to guarantee that if a GC is triggerred from the prestub of this methods,
// all valuetypes in the method signature are already loaded.
// We need to be able to find the size of the valuetypes, but we cannot
// do a class-load from within GC.
info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(actualMethodRetTypeSigClass);
}
}
assert(numArgsDef <= sig->numArgs);
}
/* We will have "cookie" as the last argument but we cannot push
* it on the operand stack because we may overflow, so we append it
* to the arg list next after we pop them */
}
//--------------------------- Inline NDirect ------------------------------
// For inline cases we technically should look at both the current
// block and the call site block (or just the latter if we've
// fused the EH trees). However the block-related checks pertain to
// EH and we currently won't inline a method with EH. So for
// inlinees, just checking the call site block is sufficient.
{
// New lexical block here to avoid compilation errors because of GOTOs.
BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB;
impCheckForPInvokeCall(call->AsCall(), methHnd, sig, mflags, block);
}
#ifdef UNIX_X86_ABI
// On Unix x86 we use caller-cleaned convention.
if ((call->gtFlags & GTF_CALL_UNMANAGED) == 0)
call->gtFlags |= GTF_CALL_POP_ARGS;
#endif // UNIX_X86_ABI
if (call->gtFlags & GTF_CALL_UNMANAGED)
{
// We set up the unmanaged call by linking the frame, disabling GC, etc
// This needs to be cleaned up on return.
// In addition, native calls have different normalization rules than managed code
// (managed calling convention always widens return values in the callee)
if (canTailCall)
{
canTailCall = false;
szCanTailCallFailReason = "Callee is native";
}
checkForSmallType = true;
impPopArgsForUnmanagedCall(call, sig);
goto DONE;
}
else if ((opcode == CEE_CALLI) && ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT) &&
((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG))
{
if (!info.compCompHnd->canGetCookieForPInvokeCalliSig(sig))
{
// Normally this only happens with inlining.
// However, a generic method (or type) being NGENd into another module
// can run into this issue as well. There's not an easy fall-back for NGEN
// so instead we fallback to JIT.
if (compIsForInlining())
{
compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_PINVOKE_COOKIE);
}
else
{
IMPL_LIMITATION("Can't get PInvoke cookie (cross module generics)");
}
return TYP_UNDEF;
}
GenTree* cookie = eeGetPInvokeCookie(sig);
// This cookie is required to be either a simple GT_CNS_INT or
// an indirection of a GT_CNS_INT
//
GenTree* cookieConst = cookie;
if (cookie->gtOper == GT_IND)
{
cookieConst = cookie->AsOp()->gtOp1;
}
assert(cookieConst->gtOper == GT_CNS_INT);
// Setting GTF_DONT_CSE on the GT_CNS_INT as well as on the GT_IND (if it exists) will ensure that
// we won't allow this tree to participate in any CSE logic
//
cookie->gtFlags |= GTF_DONT_CSE;
cookieConst->gtFlags |= GTF_DONT_CSE;
call->AsCall()->gtCallCookie = cookie;
if (canTailCall)
{
canTailCall = false;
szCanTailCallFailReason = "PInvoke calli";
}
}
/*-------------------------------------------------------------------------
* Create the argument list
*/
//-------------------------------------------------------------------------
// Special case - for varargs we have an implicit last argument
if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
{
assert(!compIsForInlining());
void *varCookie, *pVarCookie;
if (!info.compCompHnd->canGetVarArgsHandle(sig))
{
compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_VARARGS_COOKIE);
return TYP_UNDEF;
}
varCookie = info.compCompHnd->getVarArgsHandle(sig, &pVarCookie);
assert((!varCookie) != (!pVarCookie));
GenTree* cookie = gtNewIconEmbHndNode(varCookie, pVarCookie, GTF_ICON_VARG_HDL, sig);
assert(extraArg == nullptr);
extraArg = gtNewCallArgs(cookie);
}
//-------------------------------------------------------------------------
// Extra arg for shared generic code and array methods
//
// Extra argument containing instantiation information is passed in the
// following circumstances:
// (a) To the "Address" method on array classes; the extra parameter is
// the array's type handle (a TypeDesc)
// (b) To shared-code instance methods in generic structs; the extra parameter
// is the struct's type handle (a vtable ptr)
// (c) To shared-code per-instantiation non-generic static methods in generic
// classes and structs; the extra parameter is the type handle
// (d) To shared-code generic methods; the extra parameter is an
// exact-instantiation MethodDesc
//
// We also set the exact type context associated with the call so we can
// inline the call correctly later on.
if (sig->callConv & CORINFO_CALLCONV_PARAMTYPE)
{
assert(call->AsCall()->gtCallType == CT_USER_FUNC);
if (clsHnd == nullptr)
{
NO_WAY("CALLI on parameterized type");
}
assert(opcode != CEE_CALLI);
GenTree* instParam;
bool runtimeLookup;
// Instantiated generic method
if (((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_METHOD)
{
assert(exactContextHnd != METHOD_BEING_COMPILED_CONTEXT());
CORINFO_METHOD_HANDLE exactMethodHandle =
(CORINFO_METHOD_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
if (!exactContextNeedsRuntimeLookup)
{
#ifdef FEATURE_READYTORUN
if (opts.IsReadyToRun())
{
instParam =
impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_METHOD_HDL, exactMethodHandle);
if (instParam == nullptr)
{
assert(compDonotInline());
return TYP_UNDEF;
}
}
else
#endif
{
instParam = gtNewIconEmbMethHndNode(exactMethodHandle);
info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(exactMethodHandle);
}
}
else
{
instParam = impTokenToHandle(pResolvedToken, &runtimeLookup, true /*mustRestoreHandle*/);
if (instParam == nullptr)
{
assert(compDonotInline());
return TYP_UNDEF;
}
}
}
// otherwise must be an instance method in a generic struct,
// a static method in a generic type, or a runtime-generated array method
else
{
assert(((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS);
CORINFO_CLASS_HANDLE exactClassHandle = eeGetClassFromContext(exactContextHnd);
if (compIsForInlining() && (clsFlags & CORINFO_FLG_ARRAY) != 0)
{
compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_ARRAY_METHOD);
return TYP_UNDEF;
}
if ((clsFlags & CORINFO_FLG_ARRAY) && isReadonlyCall)
{
// We indicate "readonly" to the Address operation by using a null
// instParam.
instParam = gtNewIconNode(0, TYP_REF);
}
else if (!exactContextNeedsRuntimeLookup)
{
#ifdef FEATURE_READYTORUN
if (opts.IsReadyToRun())
{
instParam =
impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_CLASS_HDL, exactClassHandle);
if (instParam == nullptr)
{
assert(compDonotInline());
return TYP_UNDEF;
}
}
else
#endif
{
instParam = gtNewIconEmbClsHndNode(exactClassHandle);
info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(exactClassHandle);
}
}
else
{
instParam = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup, true /*mustRestoreHandle*/);
if (instParam == nullptr)
{
assert(compDonotInline());
return TYP_UNDEF;
}
}
}
assert(extraArg == nullptr);
extraArg = gtNewCallArgs(instParam);
}
if ((opcode == CEE_NEWOBJ) && ((clsFlags & CORINFO_FLG_DELEGATE) != 0))
{
// Only verifiable cases are supported.
// dup; ldvirtftn; newobj; or ldftn; newobj.
// IL test could contain unverifiable sequence, in this case optimization should not be done.
if (impStackHeight() > 0)
{
typeInfo delegateTypeInfo = impStackTop().seTypeInfo;
if (delegateTypeInfo.IsMethod())
{
ldftnInfo = delegateTypeInfo.GetMethodPointerInfo();
}
}
}
//-------------------------------------------------------------------------
// The main group of arguments
args = impPopCallArgs(sig->numArgs, sig, extraArg);
call->AsCall()->gtCallArgs = args;
for (GenTreeCall::Use& use : call->AsCall()->Args())
{
call->gtFlags |= use.GetNode()->gtFlags & GTF_GLOB_EFFECT;
}
//-------------------------------------------------------------------------
// The "this" pointer
if (((mflags & CORINFO_FLG_STATIC) == 0) && ((sig->callConv & CORINFO_CALLCONV_EXPLICITTHIS) == 0) &&
!((opcode == CEE_NEWOBJ) && (newobjThis == nullptr)))
{
GenTree* obj;
if (opcode == CEE_NEWOBJ)
{
obj = newobjThis;
}
else
{
obj = impPopStack().val;
obj = impTransformThis(obj, pConstrainedResolvedToken, constraintCallThisTransform);
if (compDonotInline())
{
return TYP_UNDEF;
}
}
// Store the "this" value in the call
call->gtFlags |= obj->gtFlags & GTF_GLOB_EFFECT;
call->AsCall()->gtCallThisArg = gtNewCallArgs(obj);
// Is this a virtual or interface call?
if (call->AsCall()->IsVirtual())
{
// only true object pointers can be virtual
assert(obj->gtType == TYP_REF);
// See if we can devirtualize.
const bool isExplicitTailCall = (tailCallFlags & PREFIX_TAILCALL_EXPLICIT) != 0;
const bool isLateDevirtualization = false;
impDevirtualizeCall(call->AsCall(), pResolvedToken, &callInfo->hMethod, &callInfo->methodFlags,
&callInfo->contextHandle, &exactContextHnd, isLateDevirtualization, isExplicitTailCall,
// Take care to pass raw IL offset here as the 'debug info' might be different for
// inlinees.
rawILOffset);
// Devirtualization may change which method gets invoked. Update our local cache.
//
methHnd = callInfo->hMethod;
}
if (impIsThis(obj))
{
call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_NONVIRT_SAME_THIS;
}
}
//-------------------------------------------------------------------------
// The "this" pointer for "newobj"
if (opcode == CEE_NEWOBJ)
{
if (clsFlags & CORINFO_FLG_VAROBJSIZE)
{
assert(!(clsFlags & CORINFO_FLG_ARRAY)); // arrays handled separately
// This is a 'new' of a variable sized object, wher
// the constructor is to return the object. In this case
// the constructor claims to return VOID but we know it
// actually returns the new object
assert(callRetTyp == TYP_VOID);
callRetTyp = TYP_REF;
call->gtType = TYP_REF;
impSpillSpecialSideEff();
impPushOnStack(call, typeInfo(TI_REF, clsHnd));
}
else
{
if (clsFlags & CORINFO_FLG_DELEGATE)
{
// New inliner morph it in impImportCall.
// This will allow us to inline the call to the delegate constructor.
call = fgOptimizeDelegateConstructor(call->AsCall(), &exactContextHnd, ldftnInfo);
}
if (!bIntrinsicImported)
{
#if defined(DEBUG) || defined(INLINE_DATA)
// Keep track of the raw IL offset of the call
call->AsCall()->gtRawILOffset = rawILOffset;
#endif // defined(DEBUG) || defined(INLINE_DATA)
// Is it an inline candidate?
impMarkInlineCandidate(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo);
}
// append the call node.
impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtDI);
// Now push the value of the 'new onto the stack
// This is a 'new' of a non-variable sized object.
// Append the new node (op1) to the statement list,
// and then push the local holding the value of this
// new instruction on the stack.
if (clsFlags & CORINFO_FLG_VALUECLASS)
{
assert(newobjThis->gtOper == GT_ADDR && newobjThis->AsOp()->gtOp1->gtOper == GT_LCL_VAR);
unsigned tmp = newobjThis->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum();
impPushOnStack(gtNewLclvNode(tmp, lvaGetRealType(tmp)), verMakeTypeInfo(clsHnd).NormaliseForStack());
}
else
{
if (newobjThis->gtOper == GT_COMMA)
{
// We must have inserted the callout. Get the real newobj.
newobjThis = newobjThis->AsOp()->gtOp2;
}
assert(newobjThis->gtOper == GT_LCL_VAR);
impPushOnStack(gtNewLclvNode(newobjThis->AsLclVarCommon()->GetLclNum(), TYP_REF),
typeInfo(TI_REF, clsHnd));
}
}
return callRetTyp;
}
DONE:
#ifdef DEBUG
// In debug we want to be able to register callsites with the EE.
assert(call->AsCall()->callSig == nullptr);
call->AsCall()->callSig = new (this, CMK_Generic) CORINFO_SIG_INFO;
*call->AsCall()->callSig = *sig;
#endif
// Final importer checks for calls flagged as tail calls.
//
if (tailCallFlags != 0)
{
const bool isExplicitTailCall = (tailCallFlags & PREFIX_TAILCALL_EXPLICIT) != 0;
const bool isImplicitTailCall = (tailCallFlags & PREFIX_TAILCALL_IMPLICIT) != 0;
const bool isStressTailCall = (tailCallFlags & PREFIX_TAILCALL_STRESS) != 0;
// Exactly one of these should be true.
assert(isExplicitTailCall != isImplicitTailCall);
// This check cannot be performed for implicit tail calls for the reason
// that impIsImplicitTailCallCandidate() is not checking whether return
// types are compatible before marking a call node with PREFIX_TAILCALL_IMPLICIT.
// As a result it is possible that in the following case, we find that
// the type stack is non-empty if Callee() is considered for implicit
// tail calling.
// int Caller(..) { .... void Callee(); ret val; ... }
//
// Note that we cannot check return type compatibility before ImpImportCall()
// as we don't have required info or need to duplicate some of the logic of
// ImpImportCall().
//
// For implicit tail calls, we perform this check after return types are
// known to be compatible.
if (isExplicitTailCall && (verCurrentState.esStackDepth != 0))
{
BADCODE("Stack should be empty after tailcall");
}
// For opportunistic tailcalls we allow implicit widening, i.e. tailcalls from int32 -> int16, since the
// managed calling convention dictates that the callee widens the value. For explicit tailcalls we don't
// want to require this detail of the calling convention to bubble up to the tailcall helpers
bool allowWidening = isImplicitTailCall;
if (canTailCall &&
!impTailCallRetTypeCompatible(allowWidening, info.compRetType, info.compMethodInfo->args.retTypeClass,
info.compCallConv, callRetTyp, sig->retTypeClass,
call->AsCall()->GetUnmanagedCallConv()))
{
canTailCall = false;
szCanTailCallFailReason = "Return types are not tail call compatible";
}
// Stack empty check for implicit tail calls.
if (canTailCall && isImplicitTailCall && (verCurrentState.esStackDepth != 0))
{
#ifdef TARGET_AMD64
// JIT64 Compatibility: Opportunistic tail call stack mismatch throws a VerificationException
// in JIT64, not an InvalidProgramException.
Verify(false, "Stack should be empty after tailcall");
#else // TARGET_64BIT
BADCODE("Stack should be empty after tailcall");
#endif //! TARGET_64BIT
}
// assert(compCurBB is not a catch, finally or filter block);
// assert(compCurBB is not a try block protected by a finally block);
assert(!isExplicitTailCall || compCurBB->bbJumpKind == BBJ_RETURN);
// Ask VM for permission to tailcall
if (canTailCall)
{
// True virtual or indirect calls, shouldn't pass in a callee handle.
CORINFO_METHOD_HANDLE exactCalleeHnd =
((call->AsCall()->gtCallType != CT_USER_FUNC) || call->AsCall()->IsVirtual()) ? nullptr : methHnd;
if (info.compCompHnd->canTailCall(info.compMethodHnd, methHnd, exactCalleeHnd, isExplicitTailCall))
{
if (isExplicitTailCall)
{
// In case of explicit tail calls, mark it so that it is not considered
// for in-lining.
call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_EXPLICIT_TAILCALL;
JITDUMP("\nGTF_CALL_M_EXPLICIT_TAILCALL set for call [%06u]\n", dspTreeID(call));
if (isStressTailCall)
{
call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_STRESS_TAILCALL;
JITDUMP("\nGTF_CALL_M_STRESS_TAILCALL set for call [%06u]\n", dspTreeID(call));
}
}
else
{
#if FEATURE_TAILCALL_OPT
// Must be an implicit tail call.
assert(isImplicitTailCall);
// It is possible that a call node is both an inline candidate and marked
// for opportunistic tail calling. In-lining happens before morhphing of
// trees. If in-lining of an in-line candidate gets aborted for whatever
// reason, it will survive to the morphing stage at which point it will be
// transformed into a tail call after performing additional checks.
call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_IMPLICIT_TAILCALL;
JITDUMP("\nGTF_CALL_M_IMPLICIT_TAILCALL set for call [%06u]\n", dspTreeID(call));
#else //! FEATURE_TAILCALL_OPT
NYI("Implicit tail call prefix on a target which doesn't support opportunistic tail calls");
#endif // FEATURE_TAILCALL_OPT
}
// This might or might not turn into a tailcall. We do more
// checks in morph. For explicit tailcalls we need more
// information in morph in case it turns out to be a
// helper-based tailcall.
if (isExplicitTailCall)
{
assert(call->AsCall()->tailCallInfo == nullptr);
call->AsCall()->tailCallInfo = new (this, CMK_CorTailCallInfo) TailCallSiteInfo;
switch (opcode)
{
case CEE_CALLI:
call->AsCall()->tailCallInfo->SetCalli(sig);
break;
case CEE_CALLVIRT:
call->AsCall()->tailCallInfo->SetCallvirt(sig, pResolvedToken);
break;
default:
call->AsCall()->tailCallInfo->SetCall(sig, pResolvedToken);
break;
}
}
}
else
{
// canTailCall reported its reasons already
canTailCall = false;
JITDUMP("\ninfo.compCompHnd->canTailCall returned false for call [%06u]\n", dspTreeID(call));
}
}
else
{
// If this assert fires it means that canTailCall was set to false without setting a reason!
assert(szCanTailCallFailReason != nullptr);
JITDUMP("\nRejecting %splicit tail call for [%06u], reason: '%s'\n", isExplicitTailCall ? "ex" : "im",
dspTreeID(call), szCanTailCallFailReason);
info.compCompHnd->reportTailCallDecision(info.compMethodHnd, methHnd, isExplicitTailCall, TAILCALL_FAIL,
szCanTailCallFailReason);
}
}
// Note: we assume that small return types are already normalized by the managed callee
// or by the pinvoke stub for calls to unmanaged code.
if (!bIntrinsicImported)
{
//
// Things needed to be checked when bIntrinsicImported is false.
//
assert(call->gtOper == GT_CALL);
assert(callInfo != nullptr);
if (compIsForInlining() && opcode == CEE_CALLVIRT)
{
GenTree* callObj = call->AsCall()->gtCallThisArg->GetNode();
if ((call->AsCall()->IsVirtual() || (call->gtFlags & GTF_CALL_NULLCHECK)) &&
impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, call->AsCall()->gtCallArgs, callObj,
impInlineInfo->inlArgInfo))
{
impInlineInfo->thisDereferencedFirst = true;
}
}
#if defined(DEBUG) || defined(INLINE_DATA)
// Keep track of the raw IL offset of the call
call->AsCall()->gtRawILOffset = rawILOffset;
#endif // defined(DEBUG) || defined(INLINE_DATA)
// Is it an inline candidate?
impMarkInlineCandidate(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo);
}
// Extra checks for tail calls and tail recursion.
//
// A tail recursive call is a potential loop from the current block to the start of the root method.
// If we see a tail recursive call, mark the blocks from the call site back to the entry as potentially
// being in a loop.
//
// Note: if we're importing an inlinee we don't mark the right set of blocks, but by then it's too
// late. Currently this doesn't lead to problems. See GitHub issue 33529.
//
// OSR also needs to handle tail calls specially:
// * block profiling in OSR methods needs to ensure probes happen before tail calls, not after.
// * the root method entry must be imported if there's a recursive tail call or a potentially
// inlineable tail call.
//
if ((tailCallFlags != 0) && canTailCall)
{
if (gtIsRecursiveCall(methHnd))
{
assert(verCurrentState.esStackDepth == 0);
BasicBlock* loopHead = nullptr;
if (!compIsForInlining() && opts.IsOSR())
{
// For root method OSR we may branch back to the actual method entry,
// which is not fgFirstBB, and which we will need to import.
assert(fgEntryBB != nullptr);
loopHead = fgEntryBB;
}
else
{
// For normal jitting we may branch back to the firstBB; this
// should already be imported.
loopHead = fgFirstBB;
}
JITDUMP("\nTail recursive call [%06u] in the method. Mark " FMT_BB " to " FMT_BB
" as having a backward branch.\n",
dspTreeID(call), loopHead->bbNum, compCurBB->bbNum);
fgMarkBackwardJump(loopHead, compCurBB);
}
// We only do these OSR checks in the root method because:
// * If we fail to import the root method entry when importing the root method, we can't go back
// and import it during inlining. So instead of checking jsut for recursive tail calls we also
// have to check for anything that might introduce a recursive tail call.
// * We only instrument root method blocks in OSR methods,
//
if (opts.IsOSR() && !compIsForInlining())
{
// If a root method tail call candidate block is not a BBJ_RETURN, it should have a unique
// BBJ_RETURN successor. Mark that successor so we can handle it specially during profile
// instrumentation.
//
if (compCurBB->bbJumpKind != BBJ_RETURN)
{
BasicBlock* const successor = compCurBB->GetUniqueSucc();
assert(successor->bbJumpKind == BBJ_RETURN);
successor->bbFlags |= BBF_TAILCALL_SUCCESSOR;
optMethodFlags |= OMF_HAS_TAILCALL_SUCCESSOR;
}
// If this call might eventually turn into a loop back to method entry, make sure we
// import the method entry.
//
assert(call->IsCall());
GenTreeCall* const actualCall = call->AsCall();
const bool mustImportEntryBlock = gtIsRecursiveCall(methHnd) || actualCall->IsInlineCandidate() ||
actualCall->IsGuardedDevirtualizationCandidate();
// Only schedule importation if we're not currently importing.
//
if (mustImportEntryBlock && (compCurBB != fgEntryBB))
{
JITDUMP("\nOSR: inlineable or recursive tail call [%06u] in the method, so scheduling " FMT_BB
" for importation\n",
dspTreeID(call), fgEntryBB->bbNum);
impImportBlockPending(fgEntryBB);
}
}
}
if ((sig->flags & CORINFO_SIGFLAG_FAT_CALL) != 0)
{
assert(opcode == CEE_CALLI || callInfo->kind == CORINFO_CALL_CODE_POINTER);
addFatPointerCandidate(call->AsCall());
}
DONE_CALL:
// Push or append the result of the call
if (callRetTyp == TYP_VOID)
{
if (opcode == CEE_NEWOBJ)
{
// we actually did push something, so don't spill the thing we just pushed.
assert(verCurrentState.esStackDepth > 0);
impAppendTree(call, verCurrentState.esStackDepth - 1, impCurStmtDI);
}
else
{
impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtDI);
}
}
else
{
impSpillSpecialSideEff();
if (clsFlags & CORINFO_FLG_ARRAY)
{
eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
}
typeInfo tiRetVal = verMakeTypeInfo(sig->retType, sig->retTypeClass);
tiRetVal.NormaliseForStack();
// The CEE_READONLY prefix modifies the verification semantics of an Address
// operation on an array type.
if ((clsFlags & CORINFO_FLG_ARRAY) && isReadonlyCall && tiRetVal.IsByRef())
{
tiRetVal.SetIsReadonlyByRef();
}
if (call->IsCall())
{
// Sometimes "call" is not a GT_CALL (if we imported an intrinsic that didn't turn into a call)
GenTreeCall* origCall = call->AsCall();
const bool isFatPointerCandidate = origCall->IsFatPointerCandidate();
const bool isInlineCandidate = origCall->IsInlineCandidate();
const bool isGuardedDevirtualizationCandidate = origCall->IsGuardedDevirtualizationCandidate();
if (varTypeIsStruct(callRetTyp))
{
// Need to treat all "split tree" cases here, not just inline candidates
call = impFixupCallStructReturn(call->AsCall(), sig->retTypeClass);
}
// TODO: consider handling fatcalli cases this way too...?
if (isInlineCandidate || isGuardedDevirtualizationCandidate)
{
// We should not have made any adjustments in impFixupCallStructReturn
// as we defer those until we know the fate of the call.
assert(call == origCall);
assert(opts.OptEnabled(CLFLG_INLINING));
assert(!isFatPointerCandidate); // We should not try to inline calli.
// Make the call its own tree (spill the stack if needed).
// Do not consume the debug info here. This is particularly
// important if we give up on the inline, in which case the
// call will typically end up in the statement that contains
// the GT_RET_EXPR that we leave on the stack.
impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtDI, false);
// TODO: Still using the widened type.
GenTree* retExpr = gtNewInlineCandidateReturnExpr(call, genActualType(callRetTyp), compCurBB->bbFlags);
// Link the retExpr to the call so if necessary we can manipulate it later.
origCall->gtInlineCandidateInfo->retExpr = retExpr;
// Propagate retExpr as the placeholder for the call.
call = retExpr;
}
else
{
// If the call is virtual, and has a generics context, and is not going to have a class probe,
// record the context for possible use during late devirt.
//
// If we ever want to devirt at Tier0, and/or see issues where OSR methods under PGO lose
// important devirtualizations, we'll want to allow both a class probe and a captured context.
//
if (origCall->IsVirtual() && (origCall->gtCallType != CT_INDIRECT) && (exactContextHnd != nullptr) &&
(origCall->gtClassProfileCandidateInfo == nullptr))
{
JITDUMP("\nSaving context %p for call [%06u]\n", exactContextHnd, dspTreeID(origCall));
origCall->gtCallMoreFlags |= GTF_CALL_M_LATE_DEVIRT;
LateDevirtualizationInfo* const info = new (this, CMK_Inlining) LateDevirtualizationInfo;
info->exactContextHnd = exactContextHnd;
origCall->gtLateDevirtualizationInfo = info;
}
if (isFatPointerCandidate)
{
// fatPointer candidates should be in statements of the form call() or var = call().
// Such form allows to find statements with fat calls without walking through whole trees
// and removes problems with cutting trees.
assert(!bIntrinsicImported);
assert(IsTargetAbi(CORINFO_CORERT_ABI));
if (call->OperGet() != GT_LCL_VAR) // can be already converted by impFixupCallStructReturn.
{
unsigned calliSlot = lvaGrabTemp(true DEBUGARG("calli"));
LclVarDsc* varDsc = lvaGetDesc(calliSlot);
varDsc->lvVerTypeInfo = tiRetVal;
impAssignTempGen(calliSlot, call, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_NONE);
// impAssignTempGen can change src arg list and return type for call that returns struct.
var_types type = genActualType(lvaTable[calliSlot].TypeGet());
call = gtNewLclvNode(calliSlot, type);
}
}
// For non-candidates we must also spill, since we
// might have locals live on the eval stack that this
// call can modify.
//
// Suppress this for certain well-known call targets
// that we know won't modify locals, eg calls that are
// recognized in gtCanOptimizeTypeEquality. Otherwise
// we may break key fragile pattern matches later on.
bool spillStack = true;
if (call->IsCall())
{
GenTreeCall* callNode = call->AsCall();
if ((callNode->gtCallType == CT_HELPER) && (gtIsTypeHandleToRuntimeTypeHelper(callNode) ||
gtIsTypeHandleToRuntimeTypeHandleHelper(callNode)))
{
spillStack = false;
}
else if ((callNode->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) != 0)
{
spillStack = false;
}
}
if (spillStack)
{
impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("non-inline candidate call"));
}
}
}
if (!bIntrinsicImported)
{
//-------------------------------------------------------------------------
//
/* If the call is of a small type and the callee is managed, the callee will normalize the result
before returning.
However, we need to normalize small type values returned by unmanaged
functions (pinvoke). The pinvoke stub does the normalization, but we need to do it here
if we use the shorter inlined pinvoke stub. */
if (checkForSmallType && varTypeIsIntegral(callRetTyp) && genTypeSize(callRetTyp) < genTypeSize(TYP_INT))
{
call = gtNewCastNode(genActualType(callRetTyp), call, false, callRetTyp);
}
}
impPushOnStack(call, tiRetVal);
}
// VSD functions get a new call target each time we getCallInfo, so clear the cache.
// Also, the call info cache for CALLI instructions is largely incomplete, so clear it out.
// if ( (opcode == CEE_CALLI) || (callInfoCache.fetchCallInfo().kind == CORINFO_VIRTUALCALL_STUB))
// callInfoCache.uncacheCallInfo();
return callRetTyp;
}
#ifdef _PREFAST_
#pragma warning(pop)
#endif
bool Compiler::impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo, CorInfoCallConvExtension callConv)
{
CorInfoType corType = methInfo->args.retType;
if ((corType == CORINFO_TYPE_VALUECLASS) || (corType == CORINFO_TYPE_REFANY))
{
// We have some kind of STRUCT being returned
structPassingKind howToReturnStruct = SPK_Unknown;
var_types returnType = getReturnTypeForStruct(methInfo->args.retTypeClass, callConv, &howToReturnStruct);
if (howToReturnStruct == SPK_ByReference)
{
return true;
}
}
return false;
}
#ifdef DEBUG
//
var_types Compiler::impImportJitTestLabelMark(int numArgs)
{
TestLabelAndNum tlAndN;
if (numArgs == 2)
{
tlAndN.m_num = 0;
StackEntry se = impPopStack();
assert(se.seTypeInfo.GetType() == TI_INT);
GenTree* val = se.val;
assert(val->IsCnsIntOrI());
tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
}
else if (numArgs == 3)
{
StackEntry se = impPopStack();
assert(se.seTypeInfo.GetType() == TI_INT);
GenTree* val = se.val;
assert(val->IsCnsIntOrI());
tlAndN.m_num = val->AsIntConCommon()->IconValue();
se = impPopStack();
assert(se.seTypeInfo.GetType() == TI_INT);
val = se.val;
assert(val->IsCnsIntOrI());
tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
}
else
{
assert(false);
}
StackEntry expSe = impPopStack();
GenTree* node = expSe.val;
// There are a small number of special cases, where we actually put the annotation on a subnode.
if (tlAndN.m_tl == TL_LoopHoist && tlAndN.m_num >= 100)
{
// A loop hoist annotation with value >= 100 means that the expression should be a static field access,
// a GT_IND of a static field address, which should be the sum of a (hoistable) helper call and possibly some
// offset within the the static field block whose address is returned by the helper call.
// The annotation is saying that this address calculation, but not the entire access, should be hoisted.
assert(node->OperGet() == GT_IND);
tlAndN.m_num -= 100;
GetNodeTestData()->Set(node->AsOp()->gtOp1, tlAndN);
GetNodeTestData()->Remove(node);
}
else
{
GetNodeTestData()->Set(node, tlAndN);
}
impPushOnStack(node, expSe.seTypeInfo);
return node->TypeGet();
}
#endif // DEBUG
//-----------------------------------------------------------------------------------
// impFixupCallStructReturn: For a call node that returns a struct do one of the following:
// - set the flag to indicate struct return via retbuf arg;
// - adjust the return type to a SIMD type if it is returned in 1 reg;
// - spill call result into a temp if it is returned into 2 registers or more and not tail call or inline candidate.
//
// Arguments:
// call - GT_CALL GenTree node
// retClsHnd - Class handle of return type of the call
//
// Return Value:
// Returns new GenTree node after fixing struct return of call node
//
GenTree* Compiler::impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HANDLE retClsHnd)
{
if (!varTypeIsStruct(call))
{
return call;
}
call->gtRetClsHnd = retClsHnd;
#if FEATURE_MULTIREG_RET
call->InitializeStructReturnType(this, retClsHnd, call->GetUnmanagedCallConv());
const ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
const unsigned retRegCount = retTypeDesc->GetReturnRegCount();
#else // !FEATURE_MULTIREG_RET
const unsigned retRegCount = 1;
#endif // !FEATURE_MULTIREG_RET
structPassingKind howToReturnStruct;
var_types returnType = getReturnTypeForStruct(retClsHnd, call->GetUnmanagedCallConv(), &howToReturnStruct);
if (howToReturnStruct == SPK_ByReference)
{
assert(returnType == TYP_UNKNOWN);
call->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
return call;
}
// Recognize SIMD types as we do for LCL_VARs,
// note it could be not the ABI specific type, for example, on x64 we can set 'TYP_SIMD8`
// for `System.Numerics.Vector2` here but lower will change it to long as ABI dictates.
var_types simdReturnType = impNormStructType(call->gtRetClsHnd);
if (simdReturnType != call->TypeGet())
{
assert(varTypeIsSIMD(simdReturnType));
JITDUMP("changing the type of a call [%06u] from %s to %s\n", dspTreeID(call), varTypeName(call->TypeGet()),
varTypeName(simdReturnType));
call->ChangeType(simdReturnType);
}
if (retRegCount == 1)
{
return call;
}
#if FEATURE_MULTIREG_RET
assert(varTypeIsStruct(call)); // It could be a SIMD returned in several regs.
assert(returnType == TYP_STRUCT);
assert((howToReturnStruct == SPK_ByValueAsHfa) || (howToReturnStruct == SPK_ByValue));
#ifdef UNIX_AMD64_ABI
// must be a struct returned in two registers
assert(retRegCount == 2);
#else // not UNIX_AMD64_ABI
assert(retRegCount >= 2);
#endif // not UNIX_AMD64_ABI
if (!call->CanTailCall() && !call->IsInlineCandidate())
{
// Force a call returning multi-reg struct to be always of the IR form
// tmp = call
//
// No need to assign a multi-reg struct to a local var if:
// - It is a tail call or
// - The call is marked for in-lining later
return impAssignMultiRegTypeToVar(call, retClsHnd DEBUGARG(call->GetUnmanagedCallConv()));
}
return call;
#endif // FEATURE_MULTIREG_RET
}
/*****************************************************************************
For struct return values, re-type the operand in the case where the ABI
does not use a struct return buffer
*/
//------------------------------------------------------------------------
// impFixupStructReturnType: For struct return values it sets appropriate flags in MULTIREG returns case;
// in non-multiref case it handles two special helpers: `CORINFO_HELP_GETFIELDSTRUCT`, `CORINFO_HELP_UNBOX_NULLABLE`.
//
// Arguments:
// op - the return value;
// retClsHnd - the struct handle;
// unmgdCallConv - the calling convention of the function that returns this struct.
//
// Return Value:
// the result tree that does the return.
//
GenTree* Compiler::impFixupStructReturnType(GenTree* op,
CORINFO_CLASS_HANDLE retClsHnd,
CorInfoCallConvExtension unmgdCallConv)
{
assert(varTypeIsStruct(info.compRetType));
assert(info.compRetBuffArg == BAD_VAR_NUM);
JITDUMP("\nimpFixupStructReturnType: retyping\n");
DISPTREE(op);
#if defined(TARGET_XARCH)
#if FEATURE_MULTIREG_RET
// No VarArgs for CoreCLR on x64 Unix
UNIX_AMD64_ABI_ONLY(assert(!info.compIsVarArgs));
// Is method returning a multi-reg struct?
if (varTypeIsStruct(info.compRetNativeType) && IsMultiRegReturnedType(retClsHnd, unmgdCallConv))
{
// In case of multi-reg struct return, we force IR to be one of the following:
// GT_RETURN(lclvar) or GT_RETURN(call). If op is anything other than a
// lclvar or call, it is assigned to a temp to create: temp = op and GT_RETURN(tmp).
if (op->gtOper == GT_LCL_VAR)
{
// Note that this is a multi-reg return.
unsigned lclNum = op->AsLclVarCommon()->GetLclNum();
lvaTable[lclNum].lvIsMultiRegRet = true;
// TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
op->gtFlags |= GTF_DONT_CSE;
return op;
}
if (op->gtOper == GT_CALL)
{
return op;
}
return impAssignMultiRegTypeToVar(op, retClsHnd DEBUGARG(unmgdCallConv));
}
#else
assert(info.compRetNativeType != TYP_STRUCT);
#endif // defined(UNIX_AMD64_ABI) || defined(TARGET_X86)
#elif FEATURE_MULTIREG_RET && defined(TARGET_ARM)
if (varTypeIsStruct(info.compRetNativeType) && !info.compIsVarArgs && IsHfa(retClsHnd))
{
if (op->gtOper == GT_LCL_VAR)
{
// This LCL_VAR is an HFA return value, it stays as a TYP_STRUCT
unsigned lclNum = op->AsLclVarCommon()->GetLclNum();
// Make sure this struct type stays as struct so that we can return it as an HFA
lvaTable[lclNum].lvIsMultiRegRet = true;
// TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
op->gtFlags |= GTF_DONT_CSE;
return op;
}
if (op->gtOper == GT_CALL)
{
if (op->AsCall()->IsVarargs())
{
// We cannot tail call because control needs to return to fixup the calling
// convention for result return.
op->AsCall()->gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
op->AsCall()->gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
}
else
{
return op;
}
}
return impAssignMultiRegTypeToVar(op, retClsHnd DEBUGARG(unmgdCallConv));
}
#elif FEATURE_MULTIREG_RET && defined(TARGET_ARM64)
// Is method returning a multi-reg struct?
if (IsMultiRegReturnedType(retClsHnd, unmgdCallConv))
{
if (op->gtOper == GT_LCL_VAR)
{
// This LCL_VAR stays as a TYP_STRUCT
unsigned lclNum = op->AsLclVarCommon()->GetLclNum();
if (!lvaIsImplicitByRefLocal(lclNum))
{
// Make sure this struct type is not struct promoted
lvaTable[lclNum].lvIsMultiRegRet = true;
// TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
op->gtFlags |= GTF_DONT_CSE;
return op;
}
}
if (op->gtOper == GT_CALL)
{
if (op->AsCall()->IsVarargs())
{
// We cannot tail call because control needs to return to fixup the calling
// convention for result return.
op->AsCall()->gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
op->AsCall()->gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
}
else
{
return op;
}
}
return impAssignMultiRegTypeToVar(op, retClsHnd DEBUGARG(unmgdCallConv));
}
#endif // FEATURE_MULTIREG_RET && TARGET_ARM64
if (!op->IsCall() || !op->AsCall()->TreatAsHasRetBufArg(this))
{
// Don't retype `struct` as a primitive type in `ret` instruction.
return op;
}
// This must be one of those 'special' helpers that don't
// really have a return buffer, but instead use it as a way
// to keep the trees cleaner with fewer address-taken temps.
//
// Well now we have to materialize the the return buffer as
// an address-taken temp. Then we can return the temp.
//
// NOTE: this code assumes that since the call directly
// feeds the return, then the call must be returning the
// same structure/class/type.
//
unsigned tmpNum = lvaGrabTemp(true DEBUGARG("pseudo return buffer"));
// No need to spill anything as we're about to return.
impAssignTempGen(tmpNum, op, info.compMethodInfo->args.retTypeClass, (unsigned)CHECK_SPILL_NONE);
op = gtNewLclvNode(tmpNum, info.compRetType);
JITDUMP("\nimpFixupStructReturnType: created a pseudo-return buffer for a special helper\n");
DISPTREE(op);
return op;
}
/*****************************************************************************
CEE_LEAVE may be jumping out of a protected block, viz, a catch or a
finally-protected try. We find the finally blocks protecting the current
offset (in order) by walking over the complete exception table and
finding enclosing clauses. This assumes that the table is sorted.
This will create a series of BBJ_CALLFINALLY -> BBJ_CALLFINALLY ... -> BBJ_ALWAYS.
If we are leaving a catch handler, we need to attach the
CPX_ENDCATCHes to the correct BBJ_CALLFINALLY blocks.
After this function, the BBJ_LEAVE block has been converted to a different type.
*/
#if !defined(FEATURE_EH_FUNCLETS)
void Compiler::impImportLeave(BasicBlock* block)
{
#ifdef DEBUG
if (verbose)
{
printf("\nBefore import CEE_LEAVE:\n");
fgDispBasicBlocks();
fgDispHandlerTab();
}
#endif // DEBUG
bool invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
unsigned blkAddr = block->bbCodeOffs;
BasicBlock* leaveTarget = block->bbJumpDest;
unsigned jmpAddr = leaveTarget->bbCodeOffs;
// LEAVE clears the stack, spill side effects, and set stack to 0
impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
verCurrentState.esStackDepth = 0;
assert(block->bbJumpKind == BBJ_LEAVE);
assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != NULL); // should be a BB boundary
BasicBlock* step = DUMMY_INIT(NULL);
unsigned encFinallies = 0; // Number of enclosing finallies.
GenTree* endCatches = NULL;
Statement* endLFinStmt = NULL; // The statement tree to indicate the end of locally-invoked finally.
unsigned XTnum;
EHblkDsc* HBtab;
for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
{
// Grab the handler offsets
IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
/* Is this a catch-handler we are CEE_LEAVEing out of?
* If so, we need to call CORINFO_HELP_ENDCATCH.
*/
if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
{
// Can't CEE_LEAVE out of a finally/fault handler
if (HBtab->HasFinallyOrFaultHandler())
BADCODE("leave out of fault/finally block");
// Create the call to CORINFO_HELP_ENDCATCH
GenTree* endCatch = gtNewHelperCallNode(CORINFO_HELP_ENDCATCH, TYP_VOID);
// Make a list of all the currently pending endCatches
if (endCatches)
endCatches = gtNewOperNode(GT_COMMA, TYP_VOID, endCatches, endCatch);
else
endCatches = endCatch;
#ifdef DEBUG
if (verbose)
{
printf("impImportLeave - " FMT_BB " jumping out of catch handler EH#%u, adding call to "
"CORINFO_HELP_ENDCATCH\n",
block->bbNum, XTnum);
}
#endif
}
else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
!jitIsBetween(jmpAddr, tryBeg, tryEnd))
{
/* This is a finally-protected try we are jumping out of */
/* If there are any pending endCatches, and we have already
jumped out of a finally-protected try, then the endCatches
have to be put in a block in an outer try for async
exceptions to work correctly.
Else, just use append to the original block */
BasicBlock* callBlock;
assert(!encFinallies ==
!endLFinStmt); // if we have finallies, we better have an endLFin tree, and vice-versa
if (encFinallies == 0)
{
assert(step == DUMMY_INIT(NULL));
callBlock = block;
callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
if (endCatches)
impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtDI);
#ifdef DEBUG
if (verbose)
{
printf("impImportLeave - jumping out of a finally-protected try, convert block to BBJ_CALLFINALLY "
"block %s\n",
callBlock->dspToString());
}
#endif
}
else
{
assert(step != DUMMY_INIT(NULL));
/* Calling the finally block */
callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step);
assert(step->bbJumpKind == BBJ_ALWAYS);
step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
// finally in the chain)
step->bbJumpDest->bbRefs++;
/* The new block will inherit this block's weight */
callBlock->inheritWeight(block);
#ifdef DEBUG
if (verbose)
{
printf("impImportLeave - jumping out of a finally-protected try, new BBJ_CALLFINALLY block %s\n",
callBlock->dspToString());
}
#endif
Statement* lastStmt;
if (endCatches)
{
lastStmt = gtNewStmt(endCatches);
endLFinStmt->SetNextStmt(lastStmt);
lastStmt->SetPrevStmt(endLFinStmt);
}
else
{
lastStmt = endLFinStmt;
}
// note that this sets BBF_IMPORTED on the block
impEndTreeList(callBlock, endLFinStmt, lastStmt);
}
step = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
/* The new block will inherit this block's weight */
step->inheritWeight(block);
step->bbFlags |= BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
#ifdef DEBUG
if (verbose)
{
printf("impImportLeave - jumping out of a finally-protected try, created step (BBJ_ALWAYS) block %s\n",
step->dspToString());
}
#endif
unsigned finallyNesting = compHndBBtab[XTnum].ebdHandlerNestingLevel;
assert(finallyNesting <= compHndBBtabCount);
callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
GenTree* endLFin = new (this, GT_END_LFIN) GenTreeVal(GT_END_LFIN, TYP_VOID, finallyNesting);
endLFinStmt = gtNewStmt(endLFin);
endCatches = NULL;
encFinallies++;
invalidatePreds = true;
}
}
/* Append any remaining endCatches, if any */
assert(!encFinallies == !endLFinStmt);
if (encFinallies == 0)
{
assert(step == DUMMY_INIT(NULL));
block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
if (endCatches)
impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtDI);
#ifdef DEBUG
if (verbose)
{
printf("impImportLeave - no enclosing finally-protected try blocks; convert CEE_LEAVE block to BBJ_ALWAYS "
"block %s\n",
block->dspToString());
}
#endif
}
else
{
// If leaveTarget is the start of another try block, we want to make sure that
// we do not insert finalStep into that try block. Hence, we find the enclosing
// try block.
unsigned tryIndex = bbFindInnermostCommonTryRegion(step, leaveTarget);
// Insert a new BB either in the try region indicated by tryIndex or
// the handler region indicated by leaveTarget->bbHndIndex,
// depending on which is the inner region.
BasicBlock* finalStep = fgNewBBinRegion(BBJ_ALWAYS, tryIndex, leaveTarget->bbHndIndex, step);
finalStep->bbFlags |= BBF_KEEP_BBJ_ALWAYS;
step->bbJumpDest = finalStep;
/* The new block will inherit this block's weight */
finalStep->inheritWeight(block);
#ifdef DEBUG
if (verbose)
{
printf("impImportLeave - finalStep block required (encFinallies(%d) > 0), new block %s\n", encFinallies,
finalStep->dspToString());
}
#endif
Statement* lastStmt;
if (endCatches)
{
lastStmt = gtNewStmt(endCatches);
endLFinStmt->SetNextStmt(lastStmt);
lastStmt->SetPrevStmt(endLFinStmt);
}
else
{
lastStmt = endLFinStmt;
}
impEndTreeList(finalStep, endLFinStmt, lastStmt);
finalStep->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
// Queue up the jump target for importing
impImportBlockPending(leaveTarget);
invalidatePreds = true;
}
if (invalidatePreds && fgComputePredsDone)
{
JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
fgRemovePreds();
}
#ifdef DEBUG
fgVerifyHandlerTab();
if (verbose)
{
printf("\nAfter import CEE_LEAVE:\n");
fgDispBasicBlocks();
fgDispHandlerTab();
}
#endif // DEBUG
}
#else // FEATURE_EH_FUNCLETS
void Compiler::impImportLeave(BasicBlock* block)
{
#ifdef DEBUG
if (verbose)
{
printf("\nBefore import CEE_LEAVE in " FMT_BB " (targetting " FMT_BB "):\n", block->bbNum,
block->bbJumpDest->bbNum);
fgDispBasicBlocks();
fgDispHandlerTab();
}
#endif // DEBUG
bool invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
unsigned blkAddr = block->bbCodeOffs;
BasicBlock* leaveTarget = block->bbJumpDest;
unsigned jmpAddr = leaveTarget->bbCodeOffs;
// LEAVE clears the stack, spill side effects, and set stack to 0
impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
verCurrentState.esStackDepth = 0;
assert(block->bbJumpKind == BBJ_LEAVE);
assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != nullptr); // should be a BB boundary
BasicBlock* step = nullptr;
enum StepType
{
// No step type; step == NULL.
ST_None,
// Is the step block the BBJ_ALWAYS block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair?
// That is, is step->bbJumpDest where a finally will return to?
ST_FinallyReturn,
// The step block is a catch return.
ST_Catch,
// The step block is in a "try", created as the target for a finally return or the target for a catch return.
ST_Try
};
StepType stepType = ST_None;
unsigned XTnum;
EHblkDsc* HBtab;
for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
{
// Grab the handler offsets
IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
/* Is this a catch-handler we are CEE_LEAVEing out of?
*/
if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
{
// Can't CEE_LEAVE out of a finally/fault handler
if (HBtab->HasFinallyOrFaultHandler())
{
BADCODE("leave out of fault/finally block");
}
/* We are jumping out of a catch */
if (step == nullptr)
{
step = block;
step->bbJumpKind = BBJ_EHCATCHRET; // convert the BBJ_LEAVE to BBJ_EHCATCHRET
stepType = ST_Catch;
#ifdef DEBUG
if (verbose)
{
printf("impImportLeave - jumping out of a catch (EH#%u), convert block " FMT_BB
" to BBJ_EHCATCHRET "
"block\n",
XTnum, step->bbNum);
}
#endif
}
else
{
BasicBlock* exitBlock;
/* Create a new catch exit block in the catch region for the existing step block to jump to in this
* scope */
exitBlock = fgNewBBinRegion(BBJ_EHCATCHRET, 0, XTnum + 1, step);
assert(step->KindIs(BBJ_ALWAYS, BBJ_EHCATCHRET));
step->bbJumpDest = exitBlock; // the previous step (maybe a call to a nested finally, or a nested catch
// exit) returns to this block
step->bbJumpDest->bbRefs++;
#if defined(TARGET_ARM)
if (stepType == ST_FinallyReturn)
{
assert(step->bbJumpKind == BBJ_ALWAYS);
// Mark the target of a finally return
step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
}
#endif // defined(TARGET_ARM)
/* The new block will inherit this block's weight */
exitBlock->inheritWeight(block);
exitBlock->bbFlags |= BBF_IMPORTED;
/* This exit block is the new step */
step = exitBlock;
stepType = ST_Catch;
invalidatePreds = true;
#ifdef DEBUG
if (verbose)
{
printf("impImportLeave - jumping out of a catch (EH#%u), new BBJ_EHCATCHRET block " FMT_BB "\n",
XTnum, exitBlock->bbNum);
}
#endif
}
}
else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
!jitIsBetween(jmpAddr, tryBeg, tryEnd))
{
/* We are jumping out of a finally-protected try */
BasicBlock* callBlock;
if (step == nullptr)
{
#if FEATURE_EH_CALLFINALLY_THUNKS
// Put the call to the finally in the enclosing region.
unsigned callFinallyTryIndex =
(HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
unsigned callFinallyHndIndex =
(HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, block);
// Convert the BBJ_LEAVE to BBJ_ALWAYS, jumping to the new BBJ_CALLFINALLY. This is because
// the new BBJ_CALLFINALLY is in a different EH region, thus it can't just replace the BBJ_LEAVE,
// which might be in the middle of the "try". In most cases, the BBJ_ALWAYS will jump to the
// next block, and flow optimizations will remove it.
block->bbJumpKind = BBJ_ALWAYS;
block->bbJumpDest = callBlock;
block->bbJumpDest->bbRefs++;
/* The new block will inherit this block's weight */
callBlock->inheritWeight(block);
callBlock->bbFlags |= BBF_IMPORTED;
#ifdef DEBUG
if (verbose)
{
printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block " FMT_BB
" to "
"BBJ_ALWAYS, add BBJ_CALLFINALLY block " FMT_BB "\n",
XTnum, block->bbNum, callBlock->bbNum);
}
#endif
#else // !FEATURE_EH_CALLFINALLY_THUNKS
callBlock = block;
callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
#ifdef DEBUG
if (verbose)
{
printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block " FMT_BB
" to "
"BBJ_CALLFINALLY block\n",
XTnum, callBlock->bbNum);
}
#endif
#endif // !FEATURE_EH_CALLFINALLY_THUNKS
}
else
{
// Calling the finally block. We already have a step block that is either the call-to-finally from a
// more nested try/finally (thus we are jumping out of multiple nested 'try' blocks, each protected by
// a 'finally'), or the step block is the return from a catch.
//
// Due to ThreadAbortException, we can't have the catch return target the call-to-finally block
// directly. Note that if a 'catch' ends without resetting the ThreadAbortException, the VM will
// automatically re-raise the exception, using the return address of the catch (that is, the target
// block of the BBJ_EHCATCHRET) as the re-raise address. If this address is in a finally, the VM will
// refuse to do the re-raise, and the ThreadAbortException will get eaten (and lost). On AMD64/ARM64,
// we put the call-to-finally thunk in a special "cloned finally" EH region that does look like a
// finally clause to the VM. Thus, on these platforms, we can't have BBJ_EHCATCHRET target a
// BBJ_CALLFINALLY directly. (Note that on ARM32, we don't mark the thunk specially -- it lives directly
// within the 'try' region protected by the finally, since we generate code in such a way that execution
// never returns to the call-to-finally call, and the finally-protected 'try' region doesn't appear on
// stack walks.)
assert(step->KindIs(BBJ_ALWAYS, BBJ_EHCATCHRET));
#if FEATURE_EH_CALLFINALLY_THUNKS
if (step->bbJumpKind == BBJ_EHCATCHRET)
{
// Need to create another step block in the 'try' region that will actually branch to the
// call-to-finally thunk.
BasicBlock* step2 = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
step->bbJumpDest = step2;
step->bbJumpDest->bbRefs++;
step2->inheritWeight(block);
step2->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
#ifdef DEBUG
if (verbose)
{
printf("impImportLeave - jumping out of a finally-protected try (EH#%u), step block is "
"BBJ_EHCATCHRET (" FMT_BB "), new BBJ_ALWAYS step-step block " FMT_BB "\n",
XTnum, step->bbNum, step2->bbNum);
}
#endif
step = step2;
assert(stepType == ST_Catch); // Leave it as catch type for now.
}
#endif // FEATURE_EH_CALLFINALLY_THUNKS
#if FEATURE_EH_CALLFINALLY_THUNKS
unsigned callFinallyTryIndex =
(HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
unsigned callFinallyHndIndex =
(HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
#else // !FEATURE_EH_CALLFINALLY_THUNKS
unsigned callFinallyTryIndex = XTnum + 1;
unsigned callFinallyHndIndex = 0; // don't care
#endif // !FEATURE_EH_CALLFINALLY_THUNKS
callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, step);
step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
// finally in the chain)
step->bbJumpDest->bbRefs++;
#if defined(TARGET_ARM)
if (stepType == ST_FinallyReturn)
{
assert(step->bbJumpKind == BBJ_ALWAYS);
// Mark the target of a finally return
step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
}
#endif // defined(TARGET_ARM)
/* The new block will inherit this block's weight */
callBlock->inheritWeight(block);
callBlock->bbFlags |= BBF_IMPORTED;
#ifdef DEBUG
if (verbose)
{
printf("impImportLeave - jumping out of a finally-protected try (EH#%u), new BBJ_CALLFINALLY "
"block " FMT_BB "\n",
XTnum, callBlock->bbNum);
}
#endif
}
step = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
stepType = ST_FinallyReturn;
/* The new block will inherit this block's weight */
step->inheritWeight(block);
step->bbFlags |= BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
#ifdef DEBUG
if (verbose)
{
printf("impImportLeave - jumping out of a finally-protected try (EH#%u), created step (BBJ_ALWAYS) "
"block " FMT_BB "\n",
XTnum, step->bbNum);
}
#endif
callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
invalidatePreds = true;
}
else if (HBtab->HasCatchHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
!jitIsBetween(jmpAddr, tryBeg, tryEnd))
{
// We are jumping out of a catch-protected try.
//
// If we are returning from a call to a finally, then we must have a step block within a try
// that is protected by a catch. This is so when unwinding from that finally (e.g., if code within the
// finally raises an exception), the VM will find this step block, notice that it is in a protected region,
// and invoke the appropriate catch.
//
// We also need to handle a special case with the handling of ThreadAbortException. If a try/catch
// catches a ThreadAbortException (which might be because it catches a parent, e.g. System.Exception),
// and the catch doesn't call System.Threading.Thread::ResetAbort(), then when the catch returns to the VM,
// the VM will automatically re-raise the ThreadAbortException. When it does this, it uses the target
// address of the catch return as the new exception address. That is, the re-raised exception appears to
// occur at the catch return address. If this exception return address skips an enclosing try/catch that
// catches ThreadAbortException, then the enclosing try/catch will not catch the exception, as it should.
// For example:
//
// try {
// try {
// // something here raises ThreadAbortException
// LEAVE LABEL_1; // no need to stop at LABEL_2
// } catch (Exception) {
// // This catches ThreadAbortException, but doesn't call System.Threading.Thread::ResetAbort(), so
// // ThreadAbortException is re-raised by the VM at the address specified by the LEAVE opcode.
// // This is bad, since it means the outer try/catch won't get a chance to catch the re-raised
// // ThreadAbortException. So, instead, create step block LABEL_2 and LEAVE to that. We only
// // need to do this transformation if the current EH block is a try/catch that catches
// // ThreadAbortException (or one of its parents), however we might not be able to find that
// // information, so currently we do it for all catch types.
// LEAVE LABEL_1; // Convert this to LEAVE LABEL2;
// }
// LABEL_2: LEAVE LABEL_1; // inserted by this step creation code
// } catch (ThreadAbortException) {
// }
// LABEL_1:
//
// Note that this pattern isn't theoretical: it occurs in ASP.NET, in IL code generated by the Roslyn C#
// compiler.
if ((stepType == ST_FinallyReturn) || (stepType == ST_Catch))
{
BasicBlock* catchStep;
assert(step);
if (stepType == ST_FinallyReturn)
{
assert(step->bbJumpKind == BBJ_ALWAYS);
}
else
{
assert(stepType == ST_Catch);
assert(step->bbJumpKind == BBJ_EHCATCHRET);
}
/* Create a new exit block in the try region for the existing step block to jump to in this scope */
catchStep = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
step->bbJumpDest = catchStep;
step->bbJumpDest->bbRefs++;
#if defined(TARGET_ARM)
if (stepType == ST_FinallyReturn)
{
// Mark the target of a finally return
step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
}
#endif // defined(TARGET_ARM)
/* The new block will inherit this block's weight */
catchStep->inheritWeight(block);
catchStep->bbFlags |= BBF_IMPORTED;
#ifdef DEBUG
if (verbose)
{
if (stepType == ST_FinallyReturn)
{
printf("impImportLeave - return from finally jumping out of a catch-protected try (EH#%u), new "
"BBJ_ALWAYS block " FMT_BB "\n",
XTnum, catchStep->bbNum);
}
else
{
assert(stepType == ST_Catch);
printf("impImportLeave - return from catch jumping out of a catch-protected try (EH#%u), new "
"BBJ_ALWAYS block " FMT_BB "\n",
XTnum, catchStep->bbNum);
}
}
#endif // DEBUG
/* This block is the new step */
step = catchStep;
stepType = ST_Try;
invalidatePreds = true;
}
}
}
if (step == nullptr)
{
block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
#ifdef DEBUG
if (verbose)
{
printf("impImportLeave - no enclosing finally-protected try blocks or catch handlers; convert CEE_LEAVE "
"block " FMT_BB " to BBJ_ALWAYS\n",
block->bbNum);
}
#endif
}
else
{
step->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
#if defined(TARGET_ARM)
if (stepType == ST_FinallyReturn)
{
assert(step->bbJumpKind == BBJ_ALWAYS);
// Mark the target of a finally return
step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
}
#endif // defined(TARGET_ARM)
#ifdef DEBUG
if (verbose)
{
printf("impImportLeave - final destination of step blocks set to " FMT_BB "\n", leaveTarget->bbNum);
}
#endif
// Queue up the jump target for importing
impImportBlockPending(leaveTarget);
}
if (invalidatePreds && fgComputePredsDone)
{
JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
fgRemovePreds();
}
#ifdef DEBUG
fgVerifyHandlerTab();
if (verbose)
{
printf("\nAfter import CEE_LEAVE:\n");
fgDispBasicBlocks();
fgDispHandlerTab();
}
#endif // DEBUG
}
#endif // FEATURE_EH_FUNCLETS
/*****************************************************************************/
// This is called when reimporting a leave block. It resets the JumpKind,
// JumpDest, and bbNext to the original values
void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr)
{
#if defined(FEATURE_EH_FUNCLETS)
// With EH Funclets, while importing leave opcode we create another block ending with BBJ_ALWAYS (call it B1)
// and the block containing leave (say B0) is marked as BBJ_CALLFINALLY. Say for some reason we reimport B0,
// it is reset (in this routine) by marking as ending with BBJ_LEAVE and further down when B0 is reimported, we
// create another BBJ_ALWAYS (call it B2). In this process B1 gets orphaned and any blocks to which B1 is the
// only predecessor are also considered orphans and attempted to be deleted.
//
// try {
// ....
// try
// {
// ....
// leave OUTSIDE; // B0 is the block containing this leave, following this would be B1
// } finally { }
// } finally { }
// OUTSIDE:
//
// In the above nested try-finally example, we create a step block (call it Bstep) which in branches to a block
// where a finally would branch to (and such block is marked as finally target). Block B1 branches to step block.
// Because of re-import of B0, Bstep is also orphaned. Since Bstep is a finally target it cannot be removed. To
// work around this we will duplicate B0 (call it B0Dup) before reseting. B0Dup is marked as BBJ_CALLFINALLY and
// only serves to pair up with B1 (BBJ_ALWAYS) that got orphaned. Now during orphan block deletion B0Dup and B1
// will be treated as pair and handled correctly.
if (block->bbJumpKind == BBJ_CALLFINALLY)
{
BasicBlock* dupBlock = bbNewBasicBlock(block->bbJumpKind);
dupBlock->bbFlags = block->bbFlags;
dupBlock->bbJumpDest = block->bbJumpDest;
dupBlock->copyEHRegion(block);
dupBlock->bbCatchTyp = block->bbCatchTyp;
// Mark this block as
// a) not referenced by any other block to make sure that it gets deleted
// b) weight zero
// c) prevent from being imported
// d) as internal
// e) as rarely run
dupBlock->bbRefs = 0;
dupBlock->bbWeight = BB_ZERO_WEIGHT;
dupBlock->bbFlags |= BBF_IMPORTED | BBF_INTERNAL | BBF_RUN_RARELY;
// Insert the block right after the block which is getting reset so that BBJ_CALLFINALLY and BBJ_ALWAYS
// will be next to each other.
fgInsertBBafter(block, dupBlock);
#ifdef DEBUG
if (verbose)
{
printf("New Basic Block " FMT_BB " duplicate of " FMT_BB " created.\n", dupBlock->bbNum, block->bbNum);
}
#endif
}
#endif // FEATURE_EH_FUNCLETS
block->bbJumpKind = BBJ_LEAVE;
fgInitBBLookup();
block->bbJumpDest = fgLookupBB(jmpAddr);
// We will leave the BBJ_ALWAYS block we introduced. When it's reimported
// the BBJ_ALWAYS block will be unreachable, and will be removed after. The
// reason we don't want to remove the block at this point is that if we call
// fgInitBBLookup() again we will do it wrong as the BBJ_ALWAYS block won't be
// added and the linked list length will be different than fgBBcount.
}
/*****************************************************************************/
// Get the first non-prefix opcode. Used for verification of valid combinations
// of prefixes and actual opcodes.
OPCODE Compiler::impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp)
{
while (codeAddr < codeEndp)
{
OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
codeAddr += sizeof(__int8);
if (opcode == CEE_PREFIX1)
{
if (codeAddr >= codeEndp)
{
break;
}
opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
codeAddr += sizeof(__int8);
}
switch (opcode)
{
case CEE_UNALIGNED:
case CEE_VOLATILE:
case CEE_TAILCALL:
case CEE_CONSTRAINED:
case CEE_READONLY:
break;
default:
return opcode;
}
codeAddr += opcodeSizes[opcode];
}
return CEE_ILLEGAL;
}
/*****************************************************************************/
// Checks whether the opcode is a valid opcode for volatile. and unaligned. prefixes
void Compiler::impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix)
{
OPCODE opcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
if (!(
// Opcode of all ldind and stdind happen to be in continuous, except stind.i.
((CEE_LDIND_I1 <= opcode) && (opcode <= CEE_STIND_R8)) || (opcode == CEE_STIND_I) ||
(opcode == CEE_LDFLD) || (opcode == CEE_STFLD) || (opcode == CEE_LDOBJ) || (opcode == CEE_STOBJ) ||
(opcode == CEE_INITBLK) || (opcode == CEE_CPBLK) ||
// volatile. prefix is allowed with the ldsfld and stsfld
(volatilePrefix && ((opcode == CEE_LDSFLD) || (opcode == CEE_STSFLD)))))
{
BADCODE("Invalid opcode for unaligned. or volatile. prefix");
}
}
/*****************************************************************************/
#ifdef DEBUG
#undef RETURN // undef contracts RETURN macro
enum controlFlow_t
{
NEXT,
CALL,
RETURN,
THROW,
BRANCH,
COND_BRANCH,
BREAK,
PHI,
META,
};
const static controlFlow_t controlFlow[] = {
#define OPDEF(c, s, pop, push, args, type, l, s1, s2, flow) flow,
#include "opcode.def"
#undef OPDEF
};
#endif // DEBUG
/*****************************************************************************
* Determine the result type of an arithemetic operation
* On 64-bit inserts upcasts when native int is mixed with int32
*/
var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTree** pOp1, GenTree** pOp2)
{
var_types type = TYP_UNDEF;
GenTree* op1 = *pOp1;
GenTree* op2 = *pOp2;
// Arithemetic operations are generally only allowed with
// primitive types, but certain operations are allowed
// with byrefs
if ((oper == GT_SUB) && (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
{
if ((genActualType(op1->TypeGet()) == TYP_BYREF) && (genActualType(op2->TypeGet()) == TYP_BYREF))
{
// byref1-byref2 => gives a native int
type = TYP_I_IMPL;
}
else if (genActualTypeIsIntOrI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_BYREF))
{
// [native] int - byref => gives a native int
//
// The reason is that it is possible, in managed C++,
// to have a tree like this:
//
// -
// / \.
// / \.
// / \.
// / \.
// const(h) int addr byref
//
// <BUGNUM> VSW 318822 </BUGNUM>
//
// So here we decide to make the resulting type to be a native int.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_64BIT
if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
{
// insert an explicit upcast
op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
}
#endif // TARGET_64BIT
type = TYP_I_IMPL;
}
else
{
// byref - [native] int => gives a byref
assert(genActualType(op1->TypeGet()) == TYP_BYREF && genActualTypeIsIntOrI(op2->TypeGet()));
#ifdef TARGET_64BIT
if ((genActualType(op2->TypeGet()) != TYP_I_IMPL))
{
// insert an explicit upcast
op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
}
#endif // TARGET_64BIT
type = TYP_BYREF;
}
}
else if ((oper == GT_ADD) &&
(genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
{
// byref + [native] int => gives a byref
// (or)
// [native] int + byref => gives a byref
// only one can be a byref : byref op byref not allowed
assert(genActualType(op1->TypeGet()) != TYP_BYREF || genActualType(op2->TypeGet()) != TYP_BYREF);
assert(genActualTypeIsIntOrI(op1->TypeGet()) || genActualTypeIsIntOrI(op2->TypeGet()));
#ifdef TARGET_64BIT
if (genActualType(op2->TypeGet()) == TYP_BYREF)
{
if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
{
// insert an explicit upcast
op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
}
}
else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
{
// insert an explicit upcast
op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
}
#endif // TARGET_64BIT
type = TYP_BYREF;
}
#ifdef TARGET_64BIT
else if (genActualType(op1->TypeGet()) == TYP_I_IMPL || genActualType(op2->TypeGet()) == TYP_I_IMPL)
{
assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
// int + long => gives long
// long + int => gives long
// we get this because in the IL the long isn't Int64, it's just IntPtr
if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
{
// insert an explicit upcast
op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
}
else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
{
// insert an explicit upcast
op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
}
type = TYP_I_IMPL;
}
#else // 32-bit TARGET
else if (genActualType(op1->TypeGet()) == TYP_LONG || genActualType(op2->TypeGet()) == TYP_LONG)
{
assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
// int + long => gives long
// long + int => gives long
type = TYP_LONG;
}
#endif // TARGET_64BIT
else
{
// int + int => gives an int
assert(genActualType(op1->TypeGet()) != TYP_BYREF && genActualType(op2->TypeGet()) != TYP_BYREF);
assert(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
(varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType)));
type = genActualType(op1->gtType);
// If both operands are TYP_FLOAT, then leave it as TYP_FLOAT.
// Otherwise, turn floats into doubles
if ((type == TYP_FLOAT) && (genActualType(op2->gtType) != TYP_FLOAT))
{
assert(genActualType(op2->gtType) == TYP_DOUBLE);
type = TYP_DOUBLE;
}
}
assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_FLOAT || type == TYP_LONG || type == TYP_INT);
return type;
}
//------------------------------------------------------------------------
// impOptimizeCastClassOrIsInst: attempt to resolve a cast when jitting
//
// Arguments:
// op1 - value to cast
// pResolvedToken - resolved token for type to cast to
// isCastClass - true if this is a castclass, false if isinst
//
// Return Value:
// tree representing optimized cast, or null if no optimization possible
GenTree* Compiler::impOptimizeCastClassOrIsInst(GenTree* op1, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass)
{
assert(op1->TypeGet() == TYP_REF);
// Don't optimize for minopts or debug codegen.
if (opts.OptimizationDisabled())
{
return nullptr;
}
// See what we know about the type of the object being cast.
bool isExact = false;
bool isNonNull = false;
CORINFO_CLASS_HANDLE fromClass = gtGetClassHandle(op1, &isExact, &isNonNull);
if (fromClass != nullptr)
{
CORINFO_CLASS_HANDLE toClass = pResolvedToken->hClass;
JITDUMP("\nConsidering optimization of %s from %s%p (%s) to %p (%s)\n", isCastClass ? "castclass" : "isinst",
isExact ? "exact " : "", dspPtr(fromClass), eeGetClassName(fromClass), dspPtr(toClass),
eeGetClassName(toClass));
// Perhaps we know if the cast will succeed or fail.
TypeCompareState castResult = info.compCompHnd->compareTypesForCast(fromClass, toClass);
if (castResult == TypeCompareState::Must)
{
// Cast will succeed, result is simply op1.
JITDUMP("Cast will succeed, optimizing to simply return input\n");
return op1;
}
else if (castResult == TypeCompareState::MustNot)
{
// See if we can sharpen exactness by looking for final classes
if (!isExact)
{
isExact = impIsClassExact(fromClass);
}
// Cast to exact type will fail. Handle case where we have
// an exact type (that is, fromClass is not a subtype)
// and we're not going to throw on failure.
if (isExact && !isCastClass)
{
JITDUMP("Cast will fail, optimizing to return null\n");
GenTree* result = gtNewIconNode(0, TYP_REF);
// If the cast was fed by a box, we can remove that too.
if (op1->IsBoxedValue())
{
JITDUMP("Also removing upstream box\n");
gtTryRemoveBoxUpstreamEffects(op1);
}
return result;
}
else if (isExact)
{
JITDUMP("Not optimizing failing castclass (yet)\n");
}
else
{
JITDUMP("Can't optimize since fromClass is inexact\n");
}
}
else
{
JITDUMP("Result of cast unknown, must generate runtime test\n");
}
}
else
{
JITDUMP("\nCan't optimize since fromClass is unknown\n");
}
return nullptr;
}
//------------------------------------------------------------------------
// impCastClassOrIsInstToTree: build and import castclass/isinst
//
// Arguments:
// op1 - value to cast
// op2 - type handle for type to cast to
// pResolvedToken - resolved token from the cast operation
// isCastClass - true if this is castclass, false means isinst
//
// Return Value:
// Tree representing the cast
//
// Notes:
// May expand into a series of runtime checks or a helper call.
GenTree* Compiler::impCastClassOrIsInstToTree(
GenTree* op1, GenTree* op2, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass, IL_OFFSET ilOffset)
{
assert(op1->TypeGet() == TYP_REF);
// Optimistically assume the jit should expand this as an inline test
bool shouldExpandInline = true;
// Profitability check.
//
// Don't bother with inline expansion when jit is trying to
// generate code quickly, or the cast is in code that won't run very
// often, or the method already is pretty big.
if (compCurBB->isRunRarely() || opts.OptimizationDisabled())
{
// not worth the code expansion if jitting fast or in a rarely run block
shouldExpandInline = false;
}
else if ((op1->gtFlags & GTF_GLOB_EFFECT) && lvaHaveManyLocals())
{
// not worth creating an untracked local variable
shouldExpandInline = false;
}
else if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR) && (JitConfig.JitProfileCasts() == 1))
{
// Optimizations are enabled but we're still instrumenting (including casts)
if (isCastClass && !impIsClassExact(pResolvedToken->hClass))
{
// Usually, we make a speculative assumption that it makes sense to expand castclass
// even for non-sealed classes, but let's rely on PGO in this specific case
shouldExpandInline = false;
}
}
// Pessimistically assume the jit cannot expand this as an inline test
bool canExpandInline = false;
bool partialExpand = false;
const CorInfoHelpFunc helper = info.compCompHnd->getCastingHelper(pResolvedToken, isCastClass);
GenTree* exactCls = nullptr;
// Legality check.
//
// Not all classclass/isinst operations can be inline expanded.
// Check legality only if an inline expansion is desirable.
if (shouldExpandInline)
{
if (isCastClass)
{
// Jit can only inline expand the normal CHKCASTCLASS helper.
canExpandInline = (helper == CORINFO_HELP_CHKCASTCLASS);
}
else
{
if (helper == CORINFO_HELP_ISINSTANCEOFCLASS)
{
// If the class is exact, the jit can expand the IsInst check inline.
canExpandInline = impIsClassExact(pResolvedToken->hClass);
}
}
// Check if this cast helper have some profile data
if (impIsCastHelperMayHaveProfileData(helper))
{
bool doRandomDevirt = false;
const int maxLikelyClasses = 32;
int likelyClassCount = 0;
LikelyClassRecord likelyClasses[maxLikelyClasses];
#ifdef DEBUG
// Optional stress mode to pick a random known class, rather than
// the most likely known class.
doRandomDevirt = JitConfig.JitRandomGuardedDevirtualization() != 0;
if (doRandomDevirt)
{
// Reuse the random inliner's random state.
CLRRandom* const random =
impInlineRoot()->m_inlineStrategy->GetRandom(JitConfig.JitRandomGuardedDevirtualization());
likelyClasses[0].clsHandle = getRandomClass(fgPgoSchema, fgPgoSchemaCount, fgPgoData, ilOffset, random);
likelyClasses[0].likelihood = 100;
if (likelyClasses[0].clsHandle != NO_CLASS_HANDLE)
{
likelyClassCount = 1;
}
}
else
#endif
{
likelyClassCount = getLikelyClasses(likelyClasses, maxLikelyClasses, fgPgoSchema, fgPgoSchemaCount,
fgPgoData, ilOffset);
}
if (likelyClassCount > 0)
{
LikelyClassRecord likelyClass = likelyClasses[0];
CORINFO_CLASS_HANDLE likelyCls = likelyClass.clsHandle;
if ((likelyCls != NO_CLASS_HANDLE) &&
(likelyClass.likelihood > (UINT32)JitConfig.JitGuardedDevirtualizationChainLikelihood()))
{
if ((info.compCompHnd->compareTypesForCast(likelyCls, pResolvedToken->hClass) ==
TypeCompareState::Must))
{
assert((info.compCompHnd->getClassAttribs(likelyCls) &
(CORINFO_FLG_INTERFACE | CORINFO_FLG_ABSTRACT)) == 0);
JITDUMP("Adding \"is %s (%X)\" check as a fast path for %s using PGO data.\n",
eeGetClassName(likelyCls), likelyCls, isCastClass ? "castclass" : "isinst");
canExpandInline = true;
partialExpand = true;
exactCls = gtNewIconEmbClsHndNode(likelyCls);
}
}
}
}
}
const bool expandInline = canExpandInline && shouldExpandInline;
if (!expandInline)
{
JITDUMP("\nExpanding %s as call because %s\n", isCastClass ? "castclass" : "isinst",
canExpandInline ? "want smaller code or faster jitting" : "inline expansion not legal");
// If we CSE this class handle we prevent assertionProp from making SubType assertions
// so instead we force the CSE logic to not consider CSE-ing this class handle.
//
op2->gtFlags |= GTF_DONT_CSE;
GenTreeCall* call = gtNewHelperCallNode(helper, TYP_REF, gtNewCallArgs(op2, op1));
if (impIsCastHelperEligibleForClassProbe(call) && !impIsClassExact(pResolvedToken->hClass))
{
ClassProfileCandidateInfo* pInfo = new (this, CMK_Inlining) ClassProfileCandidateInfo;
pInfo->ilOffset = ilOffset;
pInfo->probeIndex = info.compClassProbeCount++;
call->gtClassProfileCandidateInfo = pInfo;
compCurBB->bbFlags |= BBF_HAS_CLASS_PROFILE;
}
return call;
}
JITDUMP("\nExpanding %s inline\n", isCastClass ? "castclass" : "isinst");
impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark2"));
GenTree* temp;
GenTree* condMT;
//
// expand the methodtable match:
//
// condMT ==> GT_NE
// / \.
// GT_IND op2 (typically CNS_INT)
// |
// op1Copy
//
// This can replace op1 with a GT_COMMA that evaluates op1 into a local
//
op1 = impCloneExpr(op1, &temp, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("CASTCLASS eval op1"));
//
// op1 is now known to be a non-complex tree
// thus we can use gtClone(op1) from now on
//
GenTree* op2Var = op2;
if (isCastClass && !partialExpand)
{
op2Var = fgInsertCommaFormTemp(&op2);
lvaTable[op2Var->AsLclVarCommon()->GetLclNum()].lvIsCSE = true;
}
temp = gtNewMethodTableLookup(temp);
condMT = gtNewOperNode(GT_NE, TYP_INT, temp, exactCls != nullptr ? exactCls : op2);
GenTree* condNull;
//
// expand the null check:
//
// condNull ==> GT_EQ
// / \.
// op1Copy CNS_INT
// null
//
condNull = gtNewOperNode(GT_EQ, TYP_INT, gtClone(op1), gtNewIconNode(0, TYP_REF));
//
// expand the true and false trees for the condMT
//
GenTree* condFalse = gtClone(op1);
GenTree* condTrue;
if (isCastClass)
{
//
// use the special helper that skips the cases checked by our inlined cast
//
const CorInfoHelpFunc specialHelper = CORINFO_HELP_CHKCASTCLASS_SPECIAL;
condTrue =
gtNewHelperCallNode(specialHelper, TYP_REF, gtNewCallArgs(partialExpand ? op2 : op2Var, gtClone(op1)));
}
else if (partialExpand)
{
condTrue = gtNewHelperCallNode(helper, TYP_REF, gtNewCallArgs(op2, gtClone(op1)));
}
else
{
condTrue = gtNewIconNode(0, TYP_REF);
}
GenTree* qmarkMT;
//
// Generate first QMARK - COLON tree
//
// qmarkMT ==> GT_QMARK
// / \.
// condMT GT_COLON
// / \.
// condFalse condTrue
//
temp = new (this, GT_COLON) GenTreeColon(TYP_REF, condTrue, condFalse);
qmarkMT = gtNewQmarkNode(TYP_REF, condMT, temp->AsColon());
if (isCastClass && impIsClassExact(pResolvedToken->hClass) && condTrue->OperIs(GT_CALL))
{
// condTrue is used only for throwing InvalidCastException in case of casting to an exact class.
condTrue->AsCall()->gtCallMoreFlags |= GTF_CALL_M_DOES_NOT_RETURN;
}
GenTree* qmarkNull;
//
// Generate second QMARK - COLON tree
//
// qmarkNull ==> GT_QMARK
// / \.
// condNull GT_COLON
// / \.
// qmarkMT op1Copy
//
temp = new (this, GT_COLON) GenTreeColon(TYP_REF, gtClone(op1), qmarkMT);
qmarkNull = gtNewQmarkNode(TYP_REF, condNull, temp->AsColon());
qmarkNull->gtFlags |= GTF_QMARK_CAST_INSTOF;
// Make QMark node a top level node by spilling it.
unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark2"));
impAssignTempGen(tmp, qmarkNull, (unsigned)CHECK_SPILL_NONE);
// TODO-CQ: Is it possible op1 has a better type?
//
// See also gtGetHelperCallClassHandle where we make the same
// determination for the helper call variants.
LclVarDsc* lclDsc = lvaGetDesc(tmp);
assert(lclDsc->lvSingleDef == 0);
lclDsc->lvSingleDef = 1;
JITDUMP("Marked V%02u as a single def temp\n", tmp);
lvaSetClass(tmp, pResolvedToken->hClass);
return gtNewLclvNode(tmp, TYP_REF);
}
#ifndef DEBUG
#define assertImp(cond) ((void)0)
#else
#define assertImp(cond) \
do \
{ \
if (!(cond)) \
{ \
const int cchAssertImpBuf = 600; \
char* assertImpBuf = (char*)_alloca(cchAssertImpBuf); \
_snprintf_s(assertImpBuf, cchAssertImpBuf, cchAssertImpBuf - 1, \
"%s : Possibly bad IL with CEE_%s at offset %04Xh (op1=%s op2=%s stkDepth=%d)", #cond, \
impCurOpcName, impCurOpcOffs, op1 ? varTypeName(op1->TypeGet()) : "NULL", \
op2 ? varTypeName(op2->TypeGet()) : "NULL", verCurrentState.esStackDepth); \
assertAbort(assertImpBuf, __FILE__, __LINE__); \
} \
} while (0)
#endif // DEBUG
//------------------------------------------------------------------------
// impBlockIsInALoop: check if a block might be in a loop
//
// Arguments:
// block - block to check
//
// Returns:
// true if the block might be in a loop.
//
// Notes:
// Conservatively correct; may return true for some blocks that are
// not actually in loops.
//
bool Compiler::impBlockIsInALoop(BasicBlock* block)
{
return (compIsForInlining() && ((impInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) != 0)) ||
((block->bbFlags & BBF_BACKWARD_JUMP) != 0);
}
#ifdef _PREFAST_
#pragma warning(push)
#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
/*****************************************************************************
* Import the instr for the given basic block
*/
void Compiler::impImportBlockCode(BasicBlock* block)
{
#define _impResolveToken(kind) impResolveToken(codeAddr, &resolvedToken, kind)
#ifdef DEBUG
if (verbose)
{
printf("\nImporting " FMT_BB " (PC=%03u) of '%s'", block->bbNum, block->bbCodeOffs, info.compFullName);
}
#endif
unsigned nxtStmtIndex = impInitBlockLineInfo();
IL_OFFSET nxtStmtOffs;
CorInfoHelpFunc helper;
CorInfoIsAccessAllowedResult accessAllowedResult;
CORINFO_HELPER_DESC calloutHelper;
const BYTE* lastLoadToken = nullptr;
/* Get the tree list started */
impBeginTreeList();
#ifdef FEATURE_ON_STACK_REPLACEMENT
bool enablePatchpoints = opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && (JitConfig.TC_OnStackReplacement() > 0);
#ifdef DEBUG
// Optionally suppress patchpoints by method hash
//
static ConfigMethodRange JitEnablePatchpointRange;
JitEnablePatchpointRange.EnsureInit(JitConfig.JitEnablePatchpointRange());
const unsigned hash = impInlineRoot()->info.compMethodHash();
const bool inRange = JitEnablePatchpointRange.Contains(hash);
enablePatchpoints &= inRange;
#endif // DEBUG
if (enablePatchpoints)
{
// We don't inline at Tier0, if we do, we may need rethink our approach.
// Could probably support inlines that don't introduce flow.
//
assert(!compIsForInlining());
// OSR is not yet supported for methods with explicit tail calls.
//
// But we also do not have to switch these methods to be optimized, as we should be
// able to avoid getting trapped in Tier0 code by normal call counting.
// So instead, just suppress adding patchpoints.
//
if (!compTailPrefixSeen)
{
// We only need to add patchpoints if the method can loop.
//
if (compHasBackwardJump)
{
assert(compCanHavePatchpoints());
// By default we use the "adaptive" strategy.
//
// This can create both source and target patchpoints within a given
// loop structure, which isn't ideal, but is not incorrect. We will
// just have some extra Tier0 overhead.
//
// Todo: implement support for mid-block patchpoints. If `block`
// is truly a backedge source (and not in a handler) then we should be
// able to find a stack empty point somewhere in the block.
//
const int patchpointStrategy = JitConfig.TC_PatchpointStrategy();
bool addPatchpoint = false;
bool mustUseTargetPatchpoint = false;
switch (patchpointStrategy)
{
default:
{
// Patchpoints at backedge sources, if possible, otherwise targets.
//
addPatchpoint = ((block->bbFlags & BBF_BACKWARD_JUMP_SOURCE) == BBF_BACKWARD_JUMP_SOURCE);
mustUseTargetPatchpoint = (verCurrentState.esStackDepth != 0) || block->hasHndIndex();
break;
}
case 1:
{
// Patchpoints at stackempty backedge targets.
// Note if we have loops where the IL stack is not empty on the backedge we can't patchpoint
// them.
//
// We should not have allowed OSR if there were backedges in handlers.
//
assert(!block->hasHndIndex());
addPatchpoint = ((block->bbFlags & BBF_BACKWARD_JUMP_TARGET) == BBF_BACKWARD_JUMP_TARGET) &&
(verCurrentState.esStackDepth == 0);
break;
}
case 2:
{
// Adaptive strategy.
//
// Patchpoints at backedge targets if there are multiple backedges,
// otherwise at backedge sources, if possible. Note a block can be both; if so we
// just need one patchpoint.
//
if ((block->bbFlags & BBF_BACKWARD_JUMP_TARGET) == BBF_BACKWARD_JUMP_TARGET)
{
// We don't know backedge count, so just use ref count.
//
addPatchpoint = (block->bbRefs > 1) && (verCurrentState.esStackDepth == 0);
}
if (!addPatchpoint && ((block->bbFlags & BBF_BACKWARD_JUMP_SOURCE) == BBF_BACKWARD_JUMP_SOURCE))
{
addPatchpoint = true;
mustUseTargetPatchpoint = (verCurrentState.esStackDepth != 0) || block->hasHndIndex();
// Also force target patchpoint if target block has multiple (backedge) preds.
//
if (!mustUseTargetPatchpoint)
{
for (BasicBlock* const succBlock : block->Succs(this))
{
if ((succBlock->bbNum <= block->bbNum) && (succBlock->bbRefs > 1))
{
mustUseTargetPatchpoint = true;
break;
}
}
}
}
break;
}
}
if (addPatchpoint)
{
if (mustUseTargetPatchpoint)
{
// We wanted a source patchpoint, but could not have one.
// So, add patchpoints to the backedge targets.
//
for (BasicBlock* const succBlock : block->Succs(this))
{
if (succBlock->bbNum <= block->bbNum)
{
// The succBlock had better agree it's a target.
//
assert((succBlock->bbFlags & BBF_BACKWARD_JUMP_TARGET) == BBF_BACKWARD_JUMP_TARGET);
// We may already have decided to put a patchpoint in succBlock. If not, add one.
//
if ((succBlock->bbFlags & BBF_PATCHPOINT) != 0)
{
// In some cases the target may not be stack-empty at entry.
// If so, we will bypass patchpoints for this backedge.
//
if (succBlock->bbStackDepthOnEntry() > 0)
{
JITDUMP("\nCan't set source patchpoint at " FMT_BB ", can't use target " FMT_BB
" as it has non-empty stack on entry.\n",
block->bbNum, succBlock->bbNum);
}
else
{
JITDUMP("\nCan't set source patchpoint at " FMT_BB ", using target " FMT_BB
" instead\n",
block->bbNum, succBlock->bbNum);
assert(!succBlock->hasHndIndex());
succBlock->bbFlags |= BBF_PATCHPOINT;
}
}
}
}
}
else
{
assert(!block->hasHndIndex());
block->bbFlags |= BBF_PATCHPOINT;
}
setMethodHasPatchpoint();
}
}
else
{
// Should not see backward branch targets w/o backwards branches.
// So if !compHasBackwardsBranch, these flags should never be set.
//
assert((block->bbFlags & (BBF_BACKWARD_JUMP_TARGET | BBF_BACKWARD_JUMP_SOURCE)) == 0);
}
}
#ifdef DEBUG
// As a stress test, we can place patchpoints at the start of any block
// that is a stack empty point and is not within a handler.
//
// Todo: enable for mid-block stack empty points too.
//
const int offsetOSR = JitConfig.JitOffsetOnStackReplacement();
const int randomOSR = JitConfig.JitRandomOnStackReplacement();
const bool tryOffsetOSR = offsetOSR >= 0;
const bool tryRandomOSR = randomOSR > 0;
if (compCanHavePatchpoints() && (tryOffsetOSR || tryRandomOSR) && (verCurrentState.esStackDepth == 0) &&
!block->hasHndIndex() && ((block->bbFlags & BBF_PATCHPOINT) == 0))
{
// Block start can have a patchpoint. See if we should add one.
//
bool addPatchpoint = false;
// Specific offset?
//
if (tryOffsetOSR)
{
if (impCurOpcOffs == (unsigned)offsetOSR)
{
addPatchpoint = true;
}
}
// Random?
//
else
{
// Reuse the random inliner's random state.
// Note m_inlineStrategy is always created, even if we're not inlining.
//
CLRRandom* const random = impInlineRoot()->m_inlineStrategy->GetRandom(randomOSR);
const int randomValue = (int)random->Next(100);
addPatchpoint = (randomValue < randomOSR);
}
if (addPatchpoint)
{
block->bbFlags |= BBF_PATCHPOINT;
setMethodHasPatchpoint();
}
JITDUMP("\n** %s patchpoint%s added to " FMT_BB " (il offset %u)\n", tryOffsetOSR ? "offset" : "random",
addPatchpoint ? "" : " not", block->bbNum, impCurOpcOffs);
}
#endif // DEBUG
}
// Mark stack-empty rare blocks to be considered for partial compilation.
//
// Ideally these are conditionally executed blocks -- if the method is going
// to unconditionally throw, there's not as much to be gained by deferring jitting.
// For now, we just screen out the entry bb.
//
// In general we might want track all the IL stack empty points so we can
// propagate rareness back through flow and place the partial compilation patchpoints "earlier"
// so there are fewer overall.
//
// Note unlike OSR, it's ok to forgo these.
//
// Todo: stress mode...
//
if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && (JitConfig.TC_PartialCompilation() > 0) &&
compCanHavePatchpoints() && !compTailPrefixSeen)
{
// Is this block a good place for partial compilation?
//
if ((block != fgFirstBB) && block->isRunRarely() && (verCurrentState.esStackDepth == 0) &&
((block->bbFlags & BBF_PATCHPOINT) == 0) && !block->hasHndIndex())
{
JITDUMP("\nBlock " FMT_BB " will be a partial compilation patchpoint -- not importing\n", block->bbNum);
block->bbFlags |= BBF_PARTIAL_COMPILATION_PATCHPOINT;
setMethodHasPartialCompilationPatchpoint();
// Change block to BBJ_THROW so we won't trigger importation of successors.
//
block->bbJumpKind = BBJ_THROW;
// If this method has a explicit generic context, the only uses of it may be in
// the IL for this block. So assume it's used.
//
if (info.compMethodInfo->options &
(CORINFO_GENERICS_CTXT_FROM_METHODDESC | CORINFO_GENERICS_CTXT_FROM_METHODTABLE))
{
lvaGenericsContextInUse = true;
}
return;
}
}
#endif // FEATURE_ON_STACK_REPLACEMENT
/* Walk the opcodes that comprise the basic block */
const BYTE* codeAddr = info.compCode + block->bbCodeOffs;
const BYTE* codeEndp = info.compCode + block->bbCodeOffsEnd;
IL_OFFSET opcodeOffs = block->bbCodeOffs;
IL_OFFSET lastSpillOffs = opcodeOffs;
signed jmpDist;
/* remember the start of the delegate creation sequence (used for verification) */
const BYTE* delegateCreateStart = nullptr;
int prefixFlags = 0;
bool explicitTailCall, constraintCall, readonlyCall;
typeInfo tiRetVal;
unsigned numArgs = info.compArgsCount;
/* Now process all the opcodes in the block */
var_types callTyp = TYP_COUNT;
OPCODE prevOpcode = CEE_ILLEGAL;
if (block->bbCatchTyp)
{
if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
{
impCurStmtOffsSet(block->bbCodeOffs);
}
// We will spill the GT_CATCH_ARG and the input of the BB_QMARK block
// to a temp. This is a trade off for code simplicity
impSpillSpecialSideEff();
}
while (codeAddr < codeEndp)
{
#ifdef FEATURE_READYTORUN
bool usingReadyToRunHelper = false;
#endif
CORINFO_RESOLVED_TOKEN resolvedToken;
CORINFO_RESOLVED_TOKEN constrainedResolvedToken = {};
CORINFO_CALL_INFO callInfo;
CORINFO_FIELD_INFO fieldInfo;
tiRetVal = typeInfo(); // Default type info
//---------------------------------------------------------------------
/* We need to restrict the max tree depth as many of the Compiler
functions are recursive. We do this by spilling the stack */
if (verCurrentState.esStackDepth)
{
/* Has it been a while since we last saw a non-empty stack (which
guarantees that the tree depth isnt accumulating. */
if ((opcodeOffs - lastSpillOffs) > MAX_TREE_SIZE && impCanSpillNow(prevOpcode))
{
impSpillStackEnsure();
lastSpillOffs = opcodeOffs;
}
}
else
{
lastSpillOffs = opcodeOffs;
impBoxTempInUse = false; // nothing on the stack, box temp OK to use again
}
/* Compute the current instr offset */
opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
#ifndef DEBUG
if (opts.compDbgInfo)
#endif
{
nxtStmtOffs =
(nxtStmtIndex < info.compStmtOffsetsCount) ? info.compStmtOffsets[nxtStmtIndex] : BAD_IL_OFFSET;
/* Have we reached the next stmt boundary ? */
if (nxtStmtOffs != BAD_IL_OFFSET && opcodeOffs >= nxtStmtOffs)
{
assert(nxtStmtOffs == info.compStmtOffsets[nxtStmtIndex]);
if (verCurrentState.esStackDepth != 0 && opts.compDbgCode)
{
/* We need to provide accurate IP-mapping at this point.
So spill anything on the stack so that it will form
gtStmts with the correct stmt offset noted */
impSpillStackEnsure(true);
}
// Have we reported debug info for any tree?
if (impCurStmtDI.IsValid() && opts.compDbgCode)
{
GenTree* placeHolder = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
impAppendTree(placeHolder, (unsigned)CHECK_SPILL_NONE, impCurStmtDI);
assert(!impCurStmtDI.IsValid());
}
if (!impCurStmtDI.IsValid())
{
/* Make sure that nxtStmtIndex is in sync with opcodeOffs.
If opcodeOffs has gone past nxtStmtIndex, catch up */
while ((nxtStmtIndex + 1) < info.compStmtOffsetsCount &&
info.compStmtOffsets[nxtStmtIndex + 1] <= opcodeOffs)
{
nxtStmtIndex++;
}
/* Go to the new stmt */
impCurStmtOffsSet(info.compStmtOffsets[nxtStmtIndex]);
/* Update the stmt boundary index */
nxtStmtIndex++;
assert(nxtStmtIndex <= info.compStmtOffsetsCount);
/* Are there any more line# entries after this one? */
if (nxtStmtIndex < info.compStmtOffsetsCount)
{
/* Remember where the next line# starts */
nxtStmtOffs = info.compStmtOffsets[nxtStmtIndex];
}
else
{
/* No more line# entries */
nxtStmtOffs = BAD_IL_OFFSET;
}
}
}
else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES) &&
(verCurrentState.esStackDepth == 0))
{
/* At stack-empty locations, we have already added the tree to
the stmt list with the last offset. We just need to update
impCurStmtDI
*/
impCurStmtOffsSet(opcodeOffs);
}
else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) &&
impOpcodeIsCallSiteBoundary(prevOpcode))
{
/* Make sure we have a type cached */
assert(callTyp != TYP_COUNT);
if (callTyp == TYP_VOID)
{
impCurStmtOffsSet(opcodeOffs);
}
else if (opts.compDbgCode)
{
impSpillStackEnsure(true);
impCurStmtOffsSet(opcodeOffs);
}
}
else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES) && (prevOpcode == CEE_NOP))
{
if (opts.compDbgCode)
{
impSpillStackEnsure(true);
}
impCurStmtOffsSet(opcodeOffs);
}
assert(!impCurStmtDI.IsValid() || (nxtStmtOffs == BAD_IL_OFFSET) ||
(impCurStmtDI.GetLocation().GetOffset() <= nxtStmtOffs));
}
CORINFO_CLASS_HANDLE clsHnd = DUMMY_INIT(NULL);
CORINFO_CLASS_HANDLE ldelemClsHnd = DUMMY_INIT(NULL);
CORINFO_CLASS_HANDLE stelemClsHnd = DUMMY_INIT(NULL);
var_types lclTyp, ovflType = TYP_UNKNOWN;
GenTree* op1 = DUMMY_INIT(NULL);
GenTree* op2 = DUMMY_INIT(NULL);
GenTree* newObjThisPtr = DUMMY_INIT(NULL);
bool uns = DUMMY_INIT(false);
bool isLocal = false;
/* Get the next opcode and the size of its parameters */
OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
codeAddr += sizeof(__int8);
#ifdef DEBUG
impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
JITDUMP("\n [%2u] %3u (0x%03x) ", verCurrentState.esStackDepth, impCurOpcOffs, impCurOpcOffs);
#endif
DECODE_OPCODE:
// Return if any previous code has caused inline to fail.
if (compDonotInline())
{
return;
}
/* Get the size of additional parameters */
signed int sz = opcodeSizes[opcode];
#ifdef DEBUG
clsHnd = NO_CLASS_HANDLE;
lclTyp = TYP_COUNT;
callTyp = TYP_COUNT;
impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
impCurOpcName = opcodeNames[opcode];
if (verbose && (opcode != CEE_PREFIX1))
{
printf("%s", impCurOpcName);
}
/* Use assertImp() to display the opcode */
op1 = op2 = nullptr;
#endif
/* See what kind of an opcode we have, then */
unsigned mflags = 0;
unsigned clsFlags = 0;
switch (opcode)
{
unsigned lclNum;
var_types type;
GenTree* op3;
genTreeOps oper;
unsigned size;
int val;
CORINFO_SIG_INFO sig;
IL_OFFSET jmpAddr;
bool ovfl, unordered, callNode;
bool ldstruct;
CORINFO_CLASS_HANDLE tokenType;
union {
int intVal;
float fltVal;
__int64 lngVal;
double dblVal;
} cval;
case CEE_PREFIX1:
opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
codeAddr += sizeof(__int8);
goto DECODE_OPCODE;
SPILL_APPEND:
// We need to call impSpillLclRefs() for a struct type lclVar.
// This is because there may be loads of that lclVar on the evaluation stack, and
// we need to ensure that those loads are completed before we modify it.
if ((op1->OperGet() == GT_ASG) && varTypeIsStruct(op1->gtGetOp1()))
{
GenTree* lhs = op1->gtGetOp1();
GenTreeLclVarCommon* lclVar = nullptr;
if (lhs->gtOper == GT_LCL_VAR)
{
lclVar = lhs->AsLclVarCommon();
}
else if (lhs->OperIsBlk())
{
// Check if LHS address is within some struct local, to catch
// cases where we're updating the struct by something other than a stfld
GenTree* addr = lhs->AsBlk()->Addr();
// Catches ADDR(LCL_VAR), or ADD(ADDR(LCL_VAR),CNS_INT))
lclVar = addr->IsLocalAddrExpr();
// Catches ADDR(FIELD(... ADDR(LCL_VAR)))
if (lclVar == nullptr)
{
GenTree* lclTree = nullptr;
if (impIsAddressInLocal(addr, &lclTree))
{
lclVar = lclTree->AsLclVarCommon();
}
}
}
if (lclVar != nullptr)
{
impSpillLclRefs(lclVar->GetLclNum());
}
}
/* Append 'op1' to the list of statements */
impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI);
goto DONE_APPEND;
APPEND:
/* Append 'op1' to the list of statements */
impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtDI);
goto DONE_APPEND;
DONE_APPEND:
#ifdef DEBUG
// Remember at which BC offset the tree was finished
impNoteLastILoffs();
#endif
break;
case CEE_LDNULL:
impPushNullObjRefOnStack();
break;
case CEE_LDC_I4_M1:
case CEE_LDC_I4_0:
case CEE_LDC_I4_1:
case CEE_LDC_I4_2:
case CEE_LDC_I4_3:
case CEE_LDC_I4_4:
case CEE_LDC_I4_5:
case CEE_LDC_I4_6:
case CEE_LDC_I4_7:
case CEE_LDC_I4_8:
cval.intVal = (opcode - CEE_LDC_I4_0);
assert(-1 <= cval.intVal && cval.intVal <= 8);
goto PUSH_I4CON;
case CEE_LDC_I4_S:
cval.intVal = getI1LittleEndian(codeAddr);
goto PUSH_I4CON;
case CEE_LDC_I4:
cval.intVal = getI4LittleEndian(codeAddr);
goto PUSH_I4CON;
PUSH_I4CON:
JITDUMP(" %d", cval.intVal);
impPushOnStack(gtNewIconNode(cval.intVal), typeInfo(TI_INT));
break;
case CEE_LDC_I8:
cval.lngVal = getI8LittleEndian(codeAddr);
JITDUMP(" 0x%016llx", cval.lngVal);
impPushOnStack(gtNewLconNode(cval.lngVal), typeInfo(TI_LONG));
break;
case CEE_LDC_R8:
cval.dblVal = getR8LittleEndian(codeAddr);
JITDUMP(" %#.17g", cval.dblVal);
impPushOnStack(gtNewDconNode(cval.dblVal), typeInfo(TI_DOUBLE));
break;
case CEE_LDC_R4:
cval.dblVal = getR4LittleEndian(codeAddr);
JITDUMP(" %#.17g", cval.dblVal);
impPushOnStack(gtNewDconNode(cval.dblVal, TYP_FLOAT), typeInfo(TI_DOUBLE));
break;
case CEE_LDSTR:
val = getU4LittleEndian(codeAddr);
JITDUMP(" %08X", val);
impPushOnStack(gtNewSconNode(val, info.compScopeHnd), tiRetVal);
break;
case CEE_LDARG:
lclNum = getU2LittleEndian(codeAddr);
JITDUMP(" %u", lclNum);
impLoadArg(lclNum, opcodeOffs + sz + 1);
break;
case CEE_LDARG_S:
lclNum = getU1LittleEndian(codeAddr);
JITDUMP(" %u", lclNum);
impLoadArg(lclNum, opcodeOffs + sz + 1);
break;
case CEE_LDARG_0:
case CEE_LDARG_1:
case CEE_LDARG_2:
case CEE_LDARG_3:
lclNum = (opcode - CEE_LDARG_0);
assert(lclNum >= 0 && lclNum < 4);
impLoadArg(lclNum, opcodeOffs + sz + 1);
break;
case CEE_LDLOC:
lclNum = getU2LittleEndian(codeAddr);
JITDUMP(" %u", lclNum);
impLoadLoc(lclNum, opcodeOffs + sz + 1);
break;
case CEE_LDLOC_S:
lclNum = getU1LittleEndian(codeAddr);
JITDUMP(" %u", lclNum);
impLoadLoc(lclNum, opcodeOffs + sz + 1);
break;
case CEE_LDLOC_0:
case CEE_LDLOC_1:
case CEE_LDLOC_2:
case CEE_LDLOC_3:
lclNum = (opcode - CEE_LDLOC_0);
assert(lclNum >= 0 && lclNum < 4);
impLoadLoc(lclNum, opcodeOffs + sz + 1);
break;
case CEE_STARG:
lclNum = getU2LittleEndian(codeAddr);
goto STARG;
case CEE_STARG_S:
lclNum = getU1LittleEndian(codeAddr);
STARG:
JITDUMP(" %u", lclNum);
if (compIsForInlining())
{
op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
noway_assert(op1->gtOper == GT_LCL_VAR);
lclNum = op1->AsLclVar()->GetLclNum();
goto VAR_ST_VALID;
}
lclNum = compMapILargNum(lclNum); // account for possible hidden param
assertImp(lclNum < numArgs);
if (lclNum == info.compThisArg)
{
lclNum = lvaArg0Var;
}
// We should have seen this arg write in the prescan
assert(lvaTable[lclNum].lvHasILStoreOp);
goto VAR_ST;
case CEE_STLOC:
lclNum = getU2LittleEndian(codeAddr);
isLocal = true;
JITDUMP(" %u", lclNum);
goto LOC_ST;
case CEE_STLOC_S:
lclNum = getU1LittleEndian(codeAddr);
isLocal = true;
JITDUMP(" %u", lclNum);
goto LOC_ST;
case CEE_STLOC_0:
case CEE_STLOC_1:
case CEE_STLOC_2:
case CEE_STLOC_3:
isLocal = true;
lclNum = (opcode - CEE_STLOC_0);
assert(lclNum >= 0 && lclNum < 4);
LOC_ST:
if (compIsForInlining())
{
lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
/* Have we allocated a temp for this local? */
lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline stloc first use temp"));
goto _PopValue;
}
lclNum += numArgs;
VAR_ST:
if (lclNum >= info.compLocalsCount && lclNum != lvaArg0Var)
{
BADCODE("Bad IL");
}
VAR_ST_VALID:
/* if it is a struct assignment, make certain we don't overflow the buffer */
assert(lclTyp != TYP_STRUCT || lvaLclSize(lclNum) >= info.compCompHnd->getClassSize(clsHnd));
if (lvaTable[lclNum].lvNormalizeOnLoad())
{
lclTyp = lvaGetRealType(lclNum);
}
else
{
lclTyp = lvaGetActualType(lclNum);
}
_PopValue:
/* Pop the value being assigned */
{
StackEntry se = impPopStack();
clsHnd = se.seTypeInfo.GetClassHandle();
op1 = se.val;
tiRetVal = se.seTypeInfo;
}
#ifdef FEATURE_SIMD
if (varTypeIsSIMD(lclTyp) && (lclTyp != op1->TypeGet()))
{
assert(op1->TypeGet() == TYP_STRUCT);
op1->gtType = lclTyp;
}
#endif // FEATURE_SIMD
op1 = impImplicitIorI4Cast(op1, lclTyp);
#ifdef TARGET_64BIT
// Downcast the TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
if (varTypeIsI(op1->TypeGet()) && (genActualType(lclTyp) == TYP_INT))
{
op1 = gtNewCastNode(TYP_INT, op1, false, TYP_INT);
}
#endif // TARGET_64BIT
// We had better assign it a value of the correct type
assertImp(
genActualType(lclTyp) == genActualType(op1->gtType) ||
(genActualType(lclTyp) == TYP_I_IMPL && op1->IsLocalAddrExpr() != nullptr) ||
(genActualType(lclTyp) == TYP_I_IMPL && (op1->gtType == TYP_BYREF || op1->gtType == TYP_REF)) ||
(genActualType(op1->gtType) == TYP_I_IMPL && lclTyp == TYP_BYREF) ||
(varTypeIsFloating(lclTyp) && varTypeIsFloating(op1->TypeGet())) ||
((genActualType(lclTyp) == TYP_BYREF) && genActualType(op1->TypeGet()) == TYP_REF));
/* If op1 is "&var" then its type is the transient "*" and it can
be used either as TYP_BYREF or TYP_I_IMPL */
if (op1->IsLocalAddrExpr() != nullptr)
{
assertImp(genActualType(lclTyp) == TYP_I_IMPL || lclTyp == TYP_BYREF);
/* When "&var" is created, we assume it is a byref. If it is
being assigned to a TYP_I_IMPL var, change the type to
prevent unnecessary GC info */
if (genActualType(lclTyp) == TYP_I_IMPL)
{
op1->gtType = TYP_I_IMPL;
}
}
// If this is a local and the local is a ref type, see
// if we can improve type information based on the
// value being assigned.
if (isLocal && (lclTyp == TYP_REF))
{
// We should have seen a stloc in our IL prescan.
assert(lvaTable[lclNum].lvHasILStoreOp);
// Is there just one place this local is defined?
const bool isSingleDefLocal = lvaTable[lclNum].lvSingleDef;
// Conservative check that there is just one
// definition that reaches this store.
const bool hasSingleReachingDef = (block->bbStackDepthOnEntry() == 0);
if (isSingleDefLocal && hasSingleReachingDef)
{
lvaUpdateClass(lclNum, op1, clsHnd);
}
}
/* Filter out simple assignments to itself */
if (op1->gtOper == GT_LCL_VAR && lclNum == op1->AsLclVarCommon()->GetLclNum())
{
if (opts.compDbgCode)
{
op1 = gtNewNothingNode();
goto SPILL_APPEND;
}
else
{
break;
}
}
/* Create the assignment node */
op2 = gtNewLclvNode(lclNum, lclTyp DEBUGARG(opcodeOffs + sz + 1));
/* If the local is aliased or pinned, we need to spill calls and
indirections from the stack. */
if ((lvaTable[lclNum].IsAddressExposed() || lvaTable[lclNum].lvHasLdAddrOp ||
lvaTable[lclNum].lvPinned) &&
(verCurrentState.esStackDepth > 0))
{
impSpillSideEffects(false,
(unsigned)CHECK_SPILL_ALL DEBUGARG("Local could be aliased or is pinned"));
}
/* Spill any refs to the local from the stack */
impSpillLclRefs(lclNum);
// We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
// We insert a cast to the dest 'op2' type
//
if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
varTypeIsFloating(op2->gtType))
{
op1 = gtNewCastNode(op2->TypeGet(), op1, false, op2->TypeGet());
}
if (varTypeIsStruct(lclTyp))
{
op1 = impAssignStruct(op2, op1, clsHnd, (unsigned)CHECK_SPILL_ALL);
}
else
{
op1 = gtNewAssignNode(op2, op1);
}
goto SPILL_APPEND;
case CEE_LDLOCA:
lclNum = getU2LittleEndian(codeAddr);
goto LDLOCA;
case CEE_LDLOCA_S:
lclNum = getU1LittleEndian(codeAddr);
LDLOCA:
JITDUMP(" %u", lclNum);
if (compIsForInlining())
{
// Get the local type
lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
/* Have we allocated a temp for this local? */
lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline ldloca(s) first use temp"));
assert(!lvaGetDesc(lclNum)->lvNormalizeOnLoad());
op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum));
goto _PUSH_ADRVAR;
}
lclNum += numArgs;
assertImp(lclNum < info.compLocalsCount);
goto ADRVAR;
case CEE_LDARGA:
lclNum = getU2LittleEndian(codeAddr);
goto LDARGA;
case CEE_LDARGA_S:
lclNum = getU1LittleEndian(codeAddr);
LDARGA:
JITDUMP(" %u", lclNum);
Verify(lclNum < info.compILargsCount, "bad arg num");
if (compIsForInlining())
{
// In IL, LDARGA(_S) is used to load the byref managed pointer of struct argument,
// followed by a ldfld to load the field.
op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
if (op1->gtOper != GT_LCL_VAR)
{
compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDARGA_NOT_LOCAL_VAR);
return;
}
assert(op1->gtOper == GT_LCL_VAR);
goto _PUSH_ADRVAR;
}
lclNum = compMapILargNum(lclNum); // account for possible hidden param
assertImp(lclNum < numArgs);
if (lclNum == info.compThisArg)
{
lclNum = lvaArg0Var;
}
goto ADRVAR;
ADRVAR:
op1 = impCreateLocalNode(lclNum DEBUGARG(opcodeOffs + sz + 1));
_PUSH_ADRVAR:
assert(op1->gtOper == GT_LCL_VAR);
/* Note that this is supposed to create the transient type "*"
which may be used as a TYP_I_IMPL. However we catch places
where it is used as a TYP_I_IMPL and change the node if needed.
Thus we are pessimistic and may report byrefs in the GC info
where it was not absolutely needed, but it is safer this way.
*/
op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
// &aliasedVar doesnt need GTF_GLOB_REF, though alisasedVar does
assert((op1->gtFlags & GTF_GLOB_REF) == 0);
tiRetVal = lvaTable[lclNum].lvVerTypeInfo;
impPushOnStack(op1, tiRetVal);
break;
case CEE_ARGLIST:
if (!info.compIsVarArgs)
{
BADCODE("arglist in non-vararg method");
}
assertImp((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG);
/* The ARGLIST cookie is a hidden 'last' parameter, we have already
adjusted the arg count cos this is like fetching the last param */
assertImp(0 < numArgs);
lclNum = lvaVarargsHandleArg;
op1 = gtNewLclvNode(lclNum, TYP_I_IMPL DEBUGARG(opcodeOffs + sz + 1));
op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
impPushOnStack(op1, tiRetVal);
break;
case CEE_ENDFINALLY:
if (compIsForInlining())
{
assert(!"Shouldn't have exception handlers in the inliner!");
compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFINALLY);
return;
}
if (verCurrentState.esStackDepth > 0)
{
impEvalSideEffects();
}
if (info.compXcptnsCount == 0)
{
BADCODE("endfinally outside finally");
}
assert(verCurrentState.esStackDepth == 0);
op1 = gtNewOperNode(GT_RETFILT, TYP_VOID, nullptr);
goto APPEND;
case CEE_ENDFILTER:
if (compIsForInlining())
{
assert(!"Shouldn't have exception handlers in the inliner!");
compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFILTER);
return;
}
block->bbSetRunRarely(); // filters are rare
if (info.compXcptnsCount == 0)
{
BADCODE("endfilter outside filter");
}
op1 = impPopStack().val;
assertImp(op1->gtType == TYP_INT);
if (!bbInFilterILRange(block))
{
BADCODE("EndFilter outside a filter handler");
}
/* Mark current bb as end of filter */
assert(compCurBB->bbFlags & BBF_DONT_REMOVE);
assert(compCurBB->bbJumpKind == BBJ_EHFILTERRET);
/* Mark catch handler as successor */
op1 = gtNewOperNode(GT_RETFILT, op1->TypeGet(), op1);
if (verCurrentState.esStackDepth != 0)
{
verRaiseVerifyException(INDEBUG("stack must be 1 on end of filter") DEBUGARG(__FILE__)
DEBUGARG(__LINE__));
}
goto APPEND;
case CEE_RET:
prefixFlags &= ~PREFIX_TAILCALL; // ret without call before it
RET:
if (!impReturnInstruction(prefixFlags, opcode))
{
return; // abort
}
else
{
break;
}
case CEE_JMP:
assert(!compIsForInlining());
if ((info.compFlags & CORINFO_FLG_SYNCH) || block->hasTryIndex() || block->hasHndIndex())
{
/* CEE_JMP does not make sense in some "protected" regions. */
BADCODE("Jmp not allowed in protected region");
}
if (opts.IsReversePInvoke())
{
BADCODE("Jmp not allowed in reverse P/Invoke");
}
if (verCurrentState.esStackDepth != 0)
{
BADCODE("Stack must be empty after CEE_JMPs");
}
_impResolveToken(CORINFO_TOKENKIND_Method);
JITDUMP(" %08X", resolvedToken.token);
/* The signature of the target has to be identical to ours.
At least check that argCnt and returnType match */
eeGetMethodSig(resolvedToken.hMethod, &sig);
if (sig.numArgs != info.compMethodInfo->args.numArgs ||
sig.retType != info.compMethodInfo->args.retType ||
sig.callConv != info.compMethodInfo->args.callConv)
{
BADCODE("Incompatible target for CEE_JMPs");
}
op1 = new (this, GT_JMP) GenTreeVal(GT_JMP, TYP_VOID, (size_t)resolvedToken.hMethod);
/* Mark the basic block as being a JUMP instead of RETURN */
block->bbFlags |= BBF_HAS_JMP;
/* Set this flag to make sure register arguments have a location assigned
* even if we don't use them inside the method */
compJmpOpUsed = true;
fgNoStructPromotion = true;
goto APPEND;
case CEE_LDELEMA:
assertImp(sz == sizeof(unsigned));
_impResolveToken(CORINFO_TOKENKIND_Class);
JITDUMP(" %08X", resolvedToken.token);
ldelemClsHnd = resolvedToken.hClass;
// If it's a value class array we just do a simple address-of
if (eeIsValueClass(ldelemClsHnd))
{
CorInfoType cit = info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd);
if (cit == CORINFO_TYPE_UNDEF)
{
lclTyp = TYP_STRUCT;
}
else
{
lclTyp = JITtype2varType(cit);
}
goto ARR_LD_POST_VERIFY;
}
// Similarly, if its a readonly access, we can do a simple address-of
// without doing a runtime type-check
if (prefixFlags & PREFIX_READONLY)
{
lclTyp = TYP_REF;
goto ARR_LD_POST_VERIFY;
}
// Otherwise we need the full helper function with run-time type check
op1 = impTokenToHandle(&resolvedToken);
if (op1 == nullptr)
{ // compDonotInline()
return;
}
{
GenTreeCall::Use* args = gtNewCallArgs(op1); // Type
args = gtPrependNewCallArg(impPopStack().val, args); // index
args = gtPrependNewCallArg(impPopStack().val, args); // array
op1 = gtNewHelperCallNode(CORINFO_HELP_LDELEMA_REF, TYP_BYREF, args);
}
impPushOnStack(op1, tiRetVal);
break;
// ldelem for reference and value types
case CEE_LDELEM:
assertImp(sz == sizeof(unsigned));
_impResolveToken(CORINFO_TOKENKIND_Class);
JITDUMP(" %08X", resolvedToken.token);
ldelemClsHnd = resolvedToken.hClass;
// If it's a reference type or generic variable type
// then just generate code as though it's a ldelem.ref instruction
if (!eeIsValueClass(ldelemClsHnd))
{
lclTyp = TYP_REF;
opcode = CEE_LDELEM_REF;
}
else
{
CorInfoType jitTyp = info.compCompHnd->asCorInfoType(ldelemClsHnd);
lclTyp = JITtype2varType(jitTyp);
tiRetVal = verMakeTypeInfo(ldelemClsHnd); // precise type always needed for struct
tiRetVal.NormaliseForStack();
}
goto ARR_LD_POST_VERIFY;
case CEE_LDELEM_I1:
lclTyp = TYP_BYTE;
goto ARR_LD;
case CEE_LDELEM_I2:
lclTyp = TYP_SHORT;
goto ARR_LD;
case CEE_LDELEM_I:
lclTyp = TYP_I_IMPL;
goto ARR_LD;
// Should be UINT, but since no platform widens 4->8 bytes it doesn't matter
// and treating it as TYP_INT avoids other asserts.
case CEE_LDELEM_U4:
lclTyp = TYP_INT;
goto ARR_LD;
case CEE_LDELEM_I4:
lclTyp = TYP_INT;
goto ARR_LD;
case CEE_LDELEM_I8:
lclTyp = TYP_LONG;
goto ARR_LD;
case CEE_LDELEM_REF:
lclTyp = TYP_REF;
goto ARR_LD;
case CEE_LDELEM_R4:
lclTyp = TYP_FLOAT;
goto ARR_LD;
case CEE_LDELEM_R8:
lclTyp = TYP_DOUBLE;
goto ARR_LD;
case CEE_LDELEM_U1:
lclTyp = TYP_UBYTE;
goto ARR_LD;
case CEE_LDELEM_U2:
lclTyp = TYP_USHORT;
goto ARR_LD;
ARR_LD:
ARR_LD_POST_VERIFY:
/* Pull the index value and array address */
op2 = impPopStack().val;
op1 = impPopStack().val;
assertImp(op1->gtType == TYP_REF);
/* Check for null pointer - in the inliner case we simply abort */
if (compIsForInlining())
{
if (op1->gtOper == GT_CNS_INT)
{
compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NULL_FOR_LDELEM);
return;
}
}
/* Mark the block as containing an index expression */
if (op1->gtOper == GT_LCL_VAR)
{
if (op2->gtOper == GT_LCL_VAR || op2->gtOper == GT_CNS_INT || op2->gtOper == GT_ADD)
{
block->bbFlags |= BBF_HAS_IDX_LEN;
optMethodFlags |= OMF_HAS_ARRAYREF;
}
}
/* Create the index node and push it on the stack */
op1 = gtNewIndexRef(lclTyp, op1, op2);
ldstruct = (opcode == CEE_LDELEM && lclTyp == TYP_STRUCT);
if ((opcode == CEE_LDELEMA) || ldstruct ||
(ldelemClsHnd != DUMMY_INIT(NULL) && eeIsValueClass(ldelemClsHnd)))
{
assert(ldelemClsHnd != DUMMY_INIT(NULL));
// remember the element size
if (lclTyp == TYP_REF)
{
op1->AsIndex()->gtIndElemSize = TARGET_POINTER_SIZE;
}
else
{
// If ldElemClass is precisely a primitive type, use that, otherwise, preserve the struct type.
if (info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd) == CORINFO_TYPE_UNDEF)
{
op1->AsIndex()->gtStructElemClass = ldelemClsHnd;
}
assert(lclTyp != TYP_STRUCT || op1->AsIndex()->gtStructElemClass != nullptr);
if (lclTyp == TYP_STRUCT)
{
size = info.compCompHnd->getClassSize(ldelemClsHnd);
op1->AsIndex()->gtIndElemSize = size;
op1->gtType = lclTyp;
}
}
if ((opcode == CEE_LDELEMA) || ldstruct)
{
// wrap it in a &
lclTyp = TYP_BYREF;
op1 = gtNewOperNode(GT_ADDR, lclTyp, op1);
}
else
{
assert(lclTyp != TYP_STRUCT);
}
}
if (ldstruct)
{
// Create an OBJ for the result
op1 = gtNewObjNode(ldelemClsHnd, op1);
op1->gtFlags |= GTF_EXCEPT;
}
impPushOnStack(op1, tiRetVal);
break;
// stelem for reference and value types
case CEE_STELEM:
assertImp(sz == sizeof(unsigned));
_impResolveToken(CORINFO_TOKENKIND_Class);
JITDUMP(" %08X", resolvedToken.token);
stelemClsHnd = resolvedToken.hClass;
// If it's a reference type just behave as though it's a stelem.ref instruction
if (!eeIsValueClass(stelemClsHnd))
{
goto STELEM_REF_POST_VERIFY;
}
// Otherwise extract the type
{
CorInfoType jitTyp = info.compCompHnd->asCorInfoType(stelemClsHnd);
lclTyp = JITtype2varType(jitTyp);
goto ARR_ST_POST_VERIFY;
}
case CEE_STELEM_REF:
STELEM_REF_POST_VERIFY:
if (opts.OptimizationEnabled())
{
GenTree* array = impStackTop(2).val;
GenTree* value = impStackTop().val;
// Is this a case where we can skip the covariant store check?
if (impCanSkipCovariantStoreCheck(value, array))
{
lclTyp = TYP_REF;
goto ARR_ST_POST_VERIFY;
}
}
// Else call a helper function to do the assignment
op1 = gtNewHelperCallNode(CORINFO_HELP_ARRADDR_ST, TYP_VOID, impPopCallArgs(3, nullptr));
goto SPILL_APPEND;
case CEE_STELEM_I1:
lclTyp = TYP_BYTE;
goto ARR_ST;
case CEE_STELEM_I2:
lclTyp = TYP_SHORT;
goto ARR_ST;
case CEE_STELEM_I:
lclTyp = TYP_I_IMPL;
goto ARR_ST;
case CEE_STELEM_I4:
lclTyp = TYP_INT;
goto ARR_ST;
case CEE_STELEM_I8:
lclTyp = TYP_LONG;
goto ARR_ST;
case CEE_STELEM_R4:
lclTyp = TYP_FLOAT;
goto ARR_ST;
case CEE_STELEM_R8:
lclTyp = TYP_DOUBLE;
goto ARR_ST;
ARR_ST:
ARR_ST_POST_VERIFY:
/* The strict order of evaluation is LHS-operands, RHS-operands,
range-check, and then assignment. However, codegen currently
does the range-check before evaluation the RHS-operands. So to
maintain strict ordering, we spill the stack. */
if (impStackTop().val->gtFlags & GTF_SIDE_EFFECT)
{
impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
"Strict ordering of exceptions for Array store"));
}
/* Pull the new value from the stack */
op2 = impPopStack().val;
/* Pull the index value */
op1 = impPopStack().val;
/* Pull the array address */
op3 = impPopStack().val;
assertImp(op3->gtType == TYP_REF);
if (op2->IsLocalAddrExpr() != nullptr)
{
op2->gtType = TYP_I_IMPL;
}
// Mark the block as containing an index expression
if (op3->gtOper == GT_LCL_VAR)
{
if (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CNS_INT || op1->gtOper == GT_ADD)
{
block->bbFlags |= BBF_HAS_IDX_LEN;
optMethodFlags |= OMF_HAS_ARRAYREF;
}
}
/* Create the index node */
op1 = gtNewIndexRef(lclTyp, op3, op1);
/* Create the assignment node and append it */
if (lclTyp == TYP_STRUCT)
{
assert(stelemClsHnd != DUMMY_INIT(NULL));
op1->AsIndex()->gtStructElemClass = stelemClsHnd;
op1->AsIndex()->gtIndElemSize = info.compCompHnd->getClassSize(stelemClsHnd);
}
if (varTypeIsStruct(op1))
{
op1 = impAssignStruct(op1, op2, stelemClsHnd, (unsigned)CHECK_SPILL_ALL);
}
else
{
op2 = impImplicitR4orR8Cast(op2, op1->TypeGet());
op1 = gtNewAssignNode(op1, op2);
}
/* Mark the expression as containing an assignment */
op1->gtFlags |= GTF_ASG;
goto SPILL_APPEND;
case CEE_ADD:
oper = GT_ADD;
goto MATH_OP2;
case CEE_ADD_OVF:
uns = false;
goto ADD_OVF;
case CEE_ADD_OVF_UN:
uns = true;
goto ADD_OVF;
ADD_OVF:
ovfl = true;
callNode = false;
oper = GT_ADD;
goto MATH_OP2_FLAGS;
case CEE_SUB:
oper = GT_SUB;
goto MATH_OP2;
case CEE_SUB_OVF:
uns = false;
goto SUB_OVF;
case CEE_SUB_OVF_UN:
uns = true;
goto SUB_OVF;
SUB_OVF:
ovfl = true;
callNode = false;
oper = GT_SUB;
goto MATH_OP2_FLAGS;
case CEE_MUL:
oper = GT_MUL;
goto MATH_MAYBE_CALL_NO_OVF;
case CEE_MUL_OVF:
uns = false;
goto MUL_OVF;
case CEE_MUL_OVF_UN:
uns = true;
goto MUL_OVF;
MUL_OVF:
ovfl = true;
oper = GT_MUL;
goto MATH_MAYBE_CALL_OVF;
// Other binary math operations
case CEE_DIV:
oper = GT_DIV;
goto MATH_MAYBE_CALL_NO_OVF;
case CEE_DIV_UN:
oper = GT_UDIV;
goto MATH_MAYBE_CALL_NO_OVF;
case CEE_REM:
oper = GT_MOD;
goto MATH_MAYBE_CALL_NO_OVF;
case CEE_REM_UN:
oper = GT_UMOD;
goto MATH_MAYBE_CALL_NO_OVF;
MATH_MAYBE_CALL_NO_OVF:
ovfl = false;
MATH_MAYBE_CALL_OVF:
// Morpher has some complex logic about when to turn different
// typed nodes on different platforms into helper calls. We
// need to either duplicate that logic here, or just
// pessimistically make all the nodes large enough to become
// call nodes. Since call nodes aren't that much larger and
// these opcodes are infrequent enough I chose the latter.
callNode = true;
goto MATH_OP2_FLAGS;
case CEE_AND:
oper = GT_AND;
goto MATH_OP2;
case CEE_OR:
oper = GT_OR;
goto MATH_OP2;
case CEE_XOR:
oper = GT_XOR;
goto MATH_OP2;
MATH_OP2: // For default values of 'ovfl' and 'callNode'
ovfl = false;
callNode = false;
MATH_OP2_FLAGS: // If 'ovfl' and 'callNode' have already been set
/* Pull two values and push back the result */
op2 = impPopStack().val;
op1 = impPopStack().val;
/* Can't do arithmetic with references */
assertImp(genActualType(op1->TypeGet()) != TYP_REF && genActualType(op2->TypeGet()) != TYP_REF);
// Change both to TYP_I_IMPL (impBashVarAddrsToI won't change if its a true byref, only
// if it is in the stack)
impBashVarAddrsToI(op1, op2);
type = impGetByRefResultType(oper, uns, &op1, &op2);
assert(!ovfl || !varTypeIsFloating(op1->gtType));
/* Special case: "int+0", "int-0", "int*1", "int/1" */
if (op2->gtOper == GT_CNS_INT)
{
if ((op2->IsIntegralConst(0) && (oper == GT_ADD || oper == GT_SUB)) ||
(op2->IsIntegralConst(1) && (oper == GT_MUL || oper == GT_DIV)))
{
impPushOnStack(op1, tiRetVal);
break;
}
}
// We can generate a TYP_FLOAT operation that has a TYP_DOUBLE operand
//
if (varTypeIsFloating(type) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType))
{
if (op1->TypeGet() != type)
{
// We insert a cast of op1 to 'type'
op1 = gtNewCastNode(type, op1, false, type);
}
if (op2->TypeGet() != type)
{
// We insert a cast of op2 to 'type'
op2 = gtNewCastNode(type, op2, false, type);
}
}
if (callNode)
{
/* These operators can later be transformed into 'GT_CALL' */
assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MUL]);
#ifndef TARGET_ARM
assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_DIV]);
assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UDIV]);
assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MOD]);
assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UMOD]);
#endif
// It's tempting to use LargeOpOpcode() here, but this logic is *not* saying
// that we'll need to transform into a general large node, but rather specifically
// to a call: by doing it this way, things keep working if there are multiple sizes,
// and a CALL is no longer the largest.
// That said, as of now it *is* a large node, so we'll do this with an assert rather
// than an "if".
assert(GenTree::s_gtNodeSizes[GT_CALL] == TREE_NODE_SZ_LARGE);
op1 = new (this, GT_CALL) GenTreeOp(oper, type, op1, op2 DEBUGARG(/*largeNode*/ true));
}
else
{
op1 = gtNewOperNode(oper, type, op1, op2);
}
/* Special case: integer/long division may throw an exception */
if (varTypeIsIntegral(op1->TypeGet()) && op1->OperMayThrow(this))
{
op1->gtFlags |= GTF_EXCEPT;
}
if (ovfl)
{
assert(oper == GT_ADD || oper == GT_SUB || oper == GT_MUL);
if (ovflType != TYP_UNKNOWN)
{
op1->gtType = ovflType;
}
op1->gtFlags |= (GTF_EXCEPT | GTF_OVERFLOW);
if (uns)
{
op1->gtFlags |= GTF_UNSIGNED;
}
}
impPushOnStack(op1, tiRetVal);
break;
case CEE_SHL:
oper = GT_LSH;
goto CEE_SH_OP2;
case CEE_SHR:
oper = GT_RSH;
goto CEE_SH_OP2;
case CEE_SHR_UN:
oper = GT_RSZ;
goto CEE_SH_OP2;
CEE_SH_OP2:
op2 = impPopStack().val;
op1 = impPopStack().val; // operand to be shifted
impBashVarAddrsToI(op1, op2);
type = genActualType(op1->TypeGet());
op1 = gtNewOperNode(oper, type, op1, op2);
impPushOnStack(op1, tiRetVal);
break;
case CEE_NOT:
op1 = impPopStack().val;
impBashVarAddrsToI(op1, nullptr);
type = genActualType(op1->TypeGet());
impPushOnStack(gtNewOperNode(GT_NOT, type, op1), tiRetVal);
break;
case CEE_CKFINITE:
op1 = impPopStack().val;
type = op1->TypeGet();
op1 = gtNewOperNode(GT_CKFINITE, type, op1);
op1->gtFlags |= GTF_EXCEPT;
impPushOnStack(op1, tiRetVal);
break;
case CEE_LEAVE:
val = getI4LittleEndian(codeAddr); // jump distance
jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int32)) + val);
goto LEAVE;
case CEE_LEAVE_S:
val = getI1LittleEndian(codeAddr); // jump distance
jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int8)) + val);
LEAVE:
if (compIsForInlining())
{
compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_LEAVE);
return;
}
JITDUMP(" %04X", jmpAddr);
if (block->bbJumpKind != BBJ_LEAVE)
{
impResetLeaveBlock(block, jmpAddr);
}
assert(jmpAddr == block->bbJumpDest->bbCodeOffs);
impImportLeave(block);
impNoteBranchOffs();
break;
case CEE_BR:
case CEE_BR_S:
jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr);
if (compIsForInlining() && jmpDist == 0)
{
break; /* NOP */
}
impNoteBranchOffs();
break;
case CEE_BRTRUE:
case CEE_BRTRUE_S:
case CEE_BRFALSE:
case CEE_BRFALSE_S:
/* Pop the comparand (now there's a neat term) from the stack */
op1 = impPopStack().val;
type = op1->TypeGet();
// Per Ecma-355, brfalse and brtrue are only specified for nint, ref, and byref.
//
// We've historically been a bit more permissive, so here we allow
// any type that gtNewZeroConNode can handle.
if (!varTypeIsArithmetic(type) && !varTypeIsGC(type))
{
BADCODE("invalid type for brtrue/brfalse");
}
if (opts.OptimizationEnabled() && (block->bbJumpDest == block->bbNext))
{
block->bbJumpKind = BBJ_NONE;
if (op1->gtFlags & GTF_GLOB_EFFECT)
{
op1 = gtUnusedValNode(op1);
goto SPILL_APPEND;
}
else
{
break;
}
}
if (op1->OperIsCompare())
{
if (opcode == CEE_BRFALSE || opcode == CEE_BRFALSE_S)
{
// Flip the sense of the compare
op1 = gtReverseCond(op1);
}
}
else
{
// We'll compare against an equally-sized integer 0
// For small types, we always compare against int
op2 = gtNewZeroConNode(genActualType(op1->gtType));
// Create the comparison operator and try to fold it
oper = (opcode == CEE_BRTRUE || opcode == CEE_BRTRUE_S) ? GT_NE : GT_EQ;
op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
}
// fall through
COND_JUMP:
/* Fold comparison if we can */
op1 = gtFoldExpr(op1);
/* Try to fold the really simple cases like 'iconst *, ifne/ifeq'*/
/* Don't make any blocks unreachable in import only mode */
if ((op1->gtOper == GT_CNS_INT) && !compIsForImportOnly())
{
/* gtFoldExpr() should prevent this as we don't want to make any blocks
unreachable under compDbgCode */
assert(!opts.compDbgCode);
BBjumpKinds foldedJumpKind = (BBjumpKinds)(op1->AsIntCon()->gtIconVal ? BBJ_ALWAYS : BBJ_NONE);
assertImp((block->bbJumpKind == BBJ_COND) // normal case
|| (block->bbJumpKind == foldedJumpKind)); // this can happen if we are reimporting the
// block for the second time
block->bbJumpKind = foldedJumpKind;
#ifdef DEBUG
if (verbose)
{
if (op1->AsIntCon()->gtIconVal)
{
printf("\nThe conditional jump becomes an unconditional jump to " FMT_BB "\n",
block->bbJumpDest->bbNum);
}
else
{
printf("\nThe block falls through into the next " FMT_BB "\n", block->bbNext->bbNum);
}
}
#endif
break;
}
op1 = gtNewOperNode(GT_JTRUE, TYP_VOID, op1);
/* GT_JTRUE is handled specially for non-empty stacks. See 'addStmt'
in impImportBlock(block). For correct line numbers, spill stack. */
if (opts.compDbgCode && impCurStmtDI.IsValid())
{
impSpillStackEnsure(true);
}
goto SPILL_APPEND;
case CEE_CEQ:
oper = GT_EQ;
uns = false;
goto CMP_2_OPs;
case CEE_CGT_UN:
oper = GT_GT;
uns = true;
goto CMP_2_OPs;
case CEE_CGT:
oper = GT_GT;
uns = false;
goto CMP_2_OPs;
case CEE_CLT_UN:
oper = GT_LT;
uns = true;
goto CMP_2_OPs;
case CEE_CLT:
oper = GT_LT;
uns = false;
goto CMP_2_OPs;
CMP_2_OPs:
op2 = impPopStack().val;
op1 = impPopStack().val;
// Recognize the IL idiom of CGT_UN(op1, 0) and normalize
// it so that downstream optimizations don't have to.
if ((opcode == CEE_CGT_UN) && op2->IsIntegralConst(0))
{
oper = GT_NE;
uns = false;
}
#ifdef TARGET_64BIT
// TODO-Casts: create a helper that upcasts int32 -> native int when necessary.
// See also identical code in impGetByRefResultType and STSFLD import.
if (varTypeIsI(op1) && (genActualType(op2) == TYP_INT))
{
op2 = gtNewCastNode(TYP_I_IMPL, op2, uns, TYP_I_IMPL);
}
else if (varTypeIsI(op2) && (genActualType(op1) == TYP_INT))
{
op1 = gtNewCastNode(TYP_I_IMPL, op1, uns, TYP_I_IMPL);
}
#endif // TARGET_64BIT
assertImp(genActualType(op1) == genActualType(op2) || (varTypeIsI(op1) && varTypeIsI(op2)) ||
(varTypeIsFloating(op1) && varTypeIsFloating(op2)));
// Create the comparison node.
op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
// TODO: setting both flags when only one is appropriate.
if (uns)
{
op1->gtFlags |= GTF_RELOP_NAN_UN | GTF_UNSIGNED;
}
// Fold result, if possible.
op1 = gtFoldExpr(op1);
impPushOnStack(op1, tiRetVal);
break;
case CEE_BEQ_S:
case CEE_BEQ:
oper = GT_EQ;
goto CMP_2_OPs_AND_BR;
case CEE_BGE_S:
case CEE_BGE:
oper = GT_GE;
goto CMP_2_OPs_AND_BR;
case CEE_BGE_UN_S:
case CEE_BGE_UN:
oper = GT_GE;
goto CMP_2_OPs_AND_BR_UN;
case CEE_BGT_S:
case CEE_BGT:
oper = GT_GT;
goto CMP_2_OPs_AND_BR;
case CEE_BGT_UN_S:
case CEE_BGT_UN:
oper = GT_GT;
goto CMP_2_OPs_AND_BR_UN;
case CEE_BLE_S:
case CEE_BLE:
oper = GT_LE;
goto CMP_2_OPs_AND_BR;
case CEE_BLE_UN_S:
case CEE_BLE_UN:
oper = GT_LE;
goto CMP_2_OPs_AND_BR_UN;
case CEE_BLT_S:
case CEE_BLT:
oper = GT_LT;
goto CMP_2_OPs_AND_BR;
case CEE_BLT_UN_S:
case CEE_BLT_UN:
oper = GT_LT;
goto CMP_2_OPs_AND_BR_UN;
case CEE_BNE_UN_S:
case CEE_BNE_UN:
oper = GT_NE;
goto CMP_2_OPs_AND_BR_UN;
CMP_2_OPs_AND_BR_UN:
uns = true;
unordered = true;
goto CMP_2_OPs_AND_BR_ALL;
CMP_2_OPs_AND_BR:
uns = false;
unordered = false;
goto CMP_2_OPs_AND_BR_ALL;
CMP_2_OPs_AND_BR_ALL:
/* Pull two values */
op2 = impPopStack().val;
op1 = impPopStack().val;
#ifdef TARGET_64BIT
if ((op1->TypeGet() == TYP_I_IMPL) && (genActualType(op2->TypeGet()) == TYP_INT))
{
op2 = gtNewCastNode(TYP_I_IMPL, op2, uns, uns ? TYP_U_IMPL : TYP_I_IMPL);
}
else if ((op2->TypeGet() == TYP_I_IMPL) && (genActualType(op1->TypeGet()) == TYP_INT))
{
op1 = gtNewCastNode(TYP_I_IMPL, op1, uns, uns ? TYP_U_IMPL : TYP_I_IMPL);
}
#endif // TARGET_64BIT
assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
(varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet())) ||
(varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType)));
if (opts.OptimizationEnabled() && (block->bbJumpDest == block->bbNext))
{
block->bbJumpKind = BBJ_NONE;
if (op1->gtFlags & GTF_GLOB_EFFECT)
{
impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
"Branch to next Optimization, op1 side effect"));
impAppendTree(gtUnusedValNode(op1), (unsigned)CHECK_SPILL_NONE, impCurStmtDI);
}
if (op2->gtFlags & GTF_GLOB_EFFECT)
{
impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
"Branch to next Optimization, op2 side effect"));
impAppendTree(gtUnusedValNode(op2), (unsigned)CHECK_SPILL_NONE, impCurStmtDI);
}
#ifdef DEBUG
if ((op1->gtFlags | op2->gtFlags) & GTF_GLOB_EFFECT)
{
impNoteLastILoffs();
}
#endif
break;
}
// We can generate an compare of different sized floating point op1 and op2
// We insert a cast
//
if (varTypeIsFloating(op1->TypeGet()))
{
if (op1->TypeGet() != op2->TypeGet())
{
assert(varTypeIsFloating(op2->TypeGet()));
// say op1=double, op2=float. To avoid loss of precision
// while comparing, op2 is converted to double and double
// comparison is done.
if (op1->TypeGet() == TYP_DOUBLE)
{
// We insert a cast of op2 to TYP_DOUBLE
op2 = gtNewCastNode(TYP_DOUBLE, op2, false, TYP_DOUBLE);
}
else if (op2->TypeGet() == TYP_DOUBLE)
{
// We insert a cast of op1 to TYP_DOUBLE
op1 = gtNewCastNode(TYP_DOUBLE, op1, false, TYP_DOUBLE);
}
}
}
/* Create and append the operator */
op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
if (uns)
{
op1->gtFlags |= GTF_UNSIGNED;
}
if (unordered)
{
op1->gtFlags |= GTF_RELOP_NAN_UN;
}
goto COND_JUMP;
case CEE_SWITCH:
/* Pop the switch value off the stack */
op1 = impPopStack().val;
assertImp(genActualTypeIsIntOrI(op1->TypeGet()));
/* We can create a switch node */
op1 = gtNewOperNode(GT_SWITCH, TYP_VOID, op1);
val = (int)getU4LittleEndian(codeAddr);
codeAddr += 4 + val * 4; // skip over the switch-table
goto SPILL_APPEND;
/************************** Casting OPCODES ***************************/
case CEE_CONV_OVF_I1:
lclTyp = TYP_BYTE;
goto CONV_OVF;
case CEE_CONV_OVF_I2:
lclTyp = TYP_SHORT;
goto CONV_OVF;
case CEE_CONV_OVF_I:
lclTyp = TYP_I_IMPL;
goto CONV_OVF;
case CEE_CONV_OVF_I4:
lclTyp = TYP_INT;
goto CONV_OVF;
case CEE_CONV_OVF_I8:
lclTyp = TYP_LONG;
goto CONV_OVF;
case CEE_CONV_OVF_U1:
lclTyp = TYP_UBYTE;
goto CONV_OVF;
case CEE_CONV_OVF_U2:
lclTyp = TYP_USHORT;
goto CONV_OVF;
case CEE_CONV_OVF_U:
lclTyp = TYP_U_IMPL;
goto CONV_OVF;
case CEE_CONV_OVF_U4:
lclTyp = TYP_UINT;
goto CONV_OVF;
case CEE_CONV_OVF_U8:
lclTyp = TYP_ULONG;
goto CONV_OVF;
case CEE_CONV_OVF_I1_UN:
lclTyp = TYP_BYTE;
goto CONV_OVF_UN;
case CEE_CONV_OVF_I2_UN:
lclTyp = TYP_SHORT;
goto CONV_OVF_UN;
case CEE_CONV_OVF_I_UN:
lclTyp = TYP_I_IMPL;
goto CONV_OVF_UN;
case CEE_CONV_OVF_I4_UN:
lclTyp = TYP_INT;
goto CONV_OVF_UN;
case CEE_CONV_OVF_I8_UN:
lclTyp = TYP_LONG;
goto CONV_OVF_UN;
case CEE_CONV_OVF_U1_UN:
lclTyp = TYP_UBYTE;
goto CONV_OVF_UN;
case CEE_CONV_OVF_U2_UN:
lclTyp = TYP_USHORT;
goto CONV_OVF_UN;
case CEE_CONV_OVF_U_UN:
lclTyp = TYP_U_IMPL;
goto CONV_OVF_UN;
case CEE_CONV_OVF_U4_UN:
lclTyp = TYP_UINT;
goto CONV_OVF_UN;
case CEE_CONV_OVF_U8_UN:
lclTyp = TYP_ULONG;
goto CONV_OVF_UN;
CONV_OVF_UN:
uns = true;
goto CONV_OVF_COMMON;
CONV_OVF:
uns = false;
goto CONV_OVF_COMMON;
CONV_OVF_COMMON:
ovfl = true;
goto _CONV;
case CEE_CONV_I1:
lclTyp = TYP_BYTE;
goto CONV;
case CEE_CONV_I2:
lclTyp = TYP_SHORT;
goto CONV;
case CEE_CONV_I:
lclTyp = TYP_I_IMPL;
goto CONV;
case CEE_CONV_I4:
lclTyp = TYP_INT;
goto CONV;
case CEE_CONV_I8:
lclTyp = TYP_LONG;
goto CONV;
case CEE_CONV_U1:
lclTyp = TYP_UBYTE;
goto CONV;
case CEE_CONV_U2:
lclTyp = TYP_USHORT;
goto CONV;
#if (REGSIZE_BYTES == 8)
case CEE_CONV_U:
lclTyp = TYP_U_IMPL;
goto CONV_UN;
#else
case CEE_CONV_U:
lclTyp = TYP_U_IMPL;
goto CONV;
#endif
case CEE_CONV_U4:
lclTyp = TYP_UINT;
goto CONV;
case CEE_CONV_U8:
lclTyp = TYP_ULONG;
goto CONV_UN;
case CEE_CONV_R4:
lclTyp = TYP_FLOAT;
goto CONV;
case CEE_CONV_R8:
lclTyp = TYP_DOUBLE;
goto CONV;
case CEE_CONV_R_UN:
lclTyp = TYP_DOUBLE;
goto CONV_UN;
CONV_UN:
uns = true;
ovfl = false;
goto _CONV;
CONV:
uns = false;
ovfl = false;
goto _CONV;
_CONV:
// only converts from FLOAT or DOUBLE to an integer type
// and converts from ULONG (or LONG on ARM) to DOUBLE are morphed to calls
if (varTypeIsFloating(lclTyp))
{
callNode = varTypeIsLong(impStackTop().val) || uns // uint->dbl gets turned into uint->long->dbl
#ifdef TARGET_64BIT
// TODO-ARM64-Bug?: This was AMD64; I enabled it for ARM64 also. OK?
// TYP_BYREF could be used as TYP_I_IMPL which is long.
// TODO-CQ: remove this when we lower casts long/ulong --> float/double
// and generate SSE2 code instead of going through helper calls.
|| (impStackTop().val->TypeGet() == TYP_BYREF)
#endif
;
}
else
{
callNode = varTypeIsFloating(impStackTop().val->TypeGet());
}
op1 = impPopStack().val;
impBashVarAddrsToI(op1);
// Casts from floating point types must not have GTF_UNSIGNED set.
if (varTypeIsFloating(op1))
{
uns = false;
}
// At this point uns, ovf, callNode are all set.
if (varTypeIsSmall(lclTyp) && !ovfl && op1->gtType == TYP_INT && op1->gtOper == GT_AND)
{
op2 = op1->AsOp()->gtOp2;
if (op2->gtOper == GT_CNS_INT)
{
ssize_t ival = op2->AsIntCon()->gtIconVal;
ssize_t mask, umask;
switch (lclTyp)
{
case TYP_BYTE:
case TYP_UBYTE:
mask = 0x00FF;
umask = 0x007F;
break;
case TYP_USHORT:
case TYP_SHORT:
mask = 0xFFFF;
umask = 0x7FFF;
break;
default:
assert(!"unexpected type");
return;
}
if (((ival & umask) == ival) || ((ival & mask) == ival && uns))
{
/* Toss the cast, it's a waste of time */
impPushOnStack(op1, tiRetVal);
break;
}
else if (ival == mask)
{
/* Toss the masking, it's a waste of time, since
we sign-extend from the small value anyways */
op1 = op1->AsOp()->gtOp1;
}
}
}
/* The 'op2' sub-operand of a cast is the 'real' type number,
since the result of a cast to one of the 'small' integer
types is an integer.
*/
type = genActualType(lclTyp);
// If this is a no-op cast, just use op1.
if (!ovfl && (type == op1->TypeGet()) && (genTypeSize(type) == genTypeSize(lclTyp)))
{
// Nothing needs to change
}
// Work is evidently required, add cast node
else
{
if (callNode)
{
op1 = gtNewCastNodeL(type, op1, uns, lclTyp);
}
else
{
op1 = gtNewCastNode(type, op1, uns, lclTyp);
}
if (ovfl)
{
op1->gtFlags |= (GTF_OVERFLOW | GTF_EXCEPT);
}
if (op1->gtGetOp1()->OperIsConst() && opts.OptimizationEnabled())
{
// Try and fold the introduced cast
op1 = gtFoldExprConst(op1);
}
}
impPushOnStack(op1, tiRetVal);
break;
case CEE_NEG:
op1 = impPopStack().val;
impBashVarAddrsToI(op1, nullptr);
impPushOnStack(gtNewOperNode(GT_NEG, genActualType(op1->gtType), op1), tiRetVal);
break;
case CEE_POP:
{
/* Pull the top value from the stack */
StackEntry se = impPopStack();
clsHnd = se.seTypeInfo.GetClassHandle();
op1 = se.val;
/* Get hold of the type of the value being duplicated */
lclTyp = genActualType(op1->gtType);
/* Does the value have any side effects? */
if ((op1->gtFlags & GTF_SIDE_EFFECT) || opts.compDbgCode)
{
// Since we are throwing away the value, just normalize
// it to its address. This is more efficient.
if (varTypeIsStruct(op1))
{
JITDUMP("\n ... CEE_POP struct ...\n");
DISPTREE(op1);
#ifdef UNIX_AMD64_ABI
// Non-calls, such as obj or ret_expr, have to go through this.
// Calls with large struct return value have to go through this.
// Helper calls with small struct return value also have to go
// through this since they do not follow Unix calling convention.
if (op1->gtOper != GT_CALL ||
!IsMultiRegReturnedType(clsHnd, op1->AsCall()->GetUnmanagedCallConv()) ||
op1->AsCall()->gtCallType == CT_HELPER)
#endif // UNIX_AMD64_ABI
{
// If the value being produced comes from loading
// via an underlying address, just null check the address.
if (op1->OperIs(GT_FIELD, GT_IND, GT_OBJ))
{
gtChangeOperToNullCheck(op1, block);
}
else
{
op1 = impGetStructAddr(op1, clsHnd, (unsigned)CHECK_SPILL_ALL, false);
}
JITDUMP("\n ... optimized to ...\n");
DISPTREE(op1);
}
}
// If op1 is non-overflow cast, throw it away since it is useless.
// Another reason for throwing away the useless cast is in the context of
// implicit tail calls when the operand of pop is GT_CAST(GT_CALL(..)).
// The cast gets added as part of importing GT_CALL, which gets in the way
// of fgMorphCall() on the forms of tail call nodes that we assert.
if ((op1->gtOper == GT_CAST) && !op1->gtOverflow())
{
op1 = op1->AsOp()->gtOp1;
}
if (op1->gtOper != GT_CALL)
{
if ((op1->gtFlags & GTF_SIDE_EFFECT) != 0)
{
op1 = gtUnusedValNode(op1);
}
else
{
// Can't bash to NOP here because op1 can be referenced from `currentBlock->bbEntryState`,
// if we ever need to reimport we need a valid LCL_VAR on it.
op1 = gtNewNothingNode();
}
}
/* Append the value to the tree list */
goto SPILL_APPEND;
}
/* No side effects - just throw the <BEEP> thing away */
}
break;
case CEE_DUP:
{
StackEntry se = impPopStack();
GenTree* tree = se.val;
tiRetVal = se.seTypeInfo;
op1 = tree;
// If the expression to dup is simple, just clone it.
// Otherwise spill it to a temp, and reload the temp twice.
bool cloneExpr = false;
if (!opts.compDbgCode)
{
// Duplicate 0 and +0.0
if (op1->IsIntegralConst(0) || op1->IsFloatPositiveZero())
{
cloneExpr = true;
}
// Duplicate locals and addresses of them
else if (op1->IsLocal())
{
cloneExpr = true;
}
else if (op1->TypeIs(TYP_BYREF) && op1->OperIs(GT_ADDR) && op1->gtGetOp1()->IsLocal() &&
(OPCODE)impGetNonPrefixOpcode(codeAddr + sz, codeEndp) != CEE_INITOBJ)
{
cloneExpr = true;
}
}
else
{
// Always clone for debug mode
cloneExpr = true;
}
if (!cloneExpr)
{
const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("dup spill"));
impAssignTempGen(tmpNum, op1, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL);
var_types type = genActualType(lvaTable[tmpNum].TypeGet());
op1 = gtNewLclvNode(tmpNum, type);
// Propagate type info to the temp from the stack and the original tree
if (type == TYP_REF)
{
assert(lvaTable[tmpNum].lvSingleDef == 0);
lvaTable[tmpNum].lvSingleDef = 1;
JITDUMP("Marked V%02u as a single def local\n", tmpNum);
lvaSetClass(tmpNum, tree, tiRetVal.GetClassHandle());
}
}
op1 = impCloneExpr(op1, &op2, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("DUP instruction"));
assert(!(op1->gtFlags & GTF_GLOB_EFFECT) && !(op2->gtFlags & GTF_GLOB_EFFECT));
impPushOnStack(op1, tiRetVal);
impPushOnStack(op2, tiRetVal);
}
break;
case CEE_STIND_I1:
lclTyp = TYP_BYTE;
goto STIND;
case CEE_STIND_I2:
lclTyp = TYP_SHORT;
goto STIND;
case CEE_STIND_I4:
lclTyp = TYP_INT;
goto STIND;
case CEE_STIND_I8:
lclTyp = TYP_LONG;
goto STIND;
case CEE_STIND_I:
lclTyp = TYP_I_IMPL;
goto STIND;
case CEE_STIND_REF:
lclTyp = TYP_REF;
goto STIND;
case CEE_STIND_R4:
lclTyp = TYP_FLOAT;
goto STIND;
case CEE_STIND_R8:
lclTyp = TYP_DOUBLE;
goto STIND;
STIND:
op2 = impPopStack().val; // value to store
op1 = impPopStack().val; // address to store to
// you can indirect off of a TYP_I_IMPL (if we are in C) or a BYREF
assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
impBashVarAddrsToI(op1, op2);
op2 = impImplicitR4orR8Cast(op2, lclTyp);
#ifdef TARGET_64BIT
// Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
{
op2->gtType = TYP_I_IMPL;
}
else
{
// Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
//
if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
{
op2 = gtNewCastNode(TYP_INT, op2, false, TYP_INT);
}
// Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
//
if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
{
op2 = gtNewCastNode(TYP_I_IMPL, op2, false, TYP_I_IMPL);
}
}
#endif // TARGET_64BIT
if (opcode == CEE_STIND_REF)
{
// STIND_REF can be used to store TYP_INT, TYP_I_IMPL, TYP_REF, or TYP_BYREF
assertImp(varTypeIsIntOrI(op2->gtType) || varTypeIsGC(op2->gtType));
lclTyp = genActualType(op2->TypeGet());
}
// Check target type.
#ifdef DEBUG
if (op2->gtType == TYP_BYREF || lclTyp == TYP_BYREF)
{
if (op2->gtType == TYP_BYREF)
{
assertImp(lclTyp == TYP_BYREF || lclTyp == TYP_I_IMPL);
}
else if (lclTyp == TYP_BYREF)
{
assertImp(op2->gtType == TYP_BYREF || varTypeIsIntOrI(op2->gtType));
}
}
else
{
assertImp(genActualType(op2->gtType) == genActualType(lclTyp) ||
((lclTyp == TYP_I_IMPL) && (genActualType(op2->gtType) == TYP_INT)) ||
(varTypeIsFloating(op2->gtType) && varTypeIsFloating(lclTyp)));
}
#endif
op1 = gtNewOperNode(GT_IND, lclTyp, op1);
// stind could point anywhere, example a boxed class static int
op1->gtFlags |= GTF_IND_TGTANYWHERE;
if (prefixFlags & PREFIX_VOLATILE)
{
assert(op1->OperGet() == GT_IND);
op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
op1->gtFlags |= GTF_IND_VOLATILE;
}
if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
{
assert(op1->OperGet() == GT_IND);
op1->gtFlags |= GTF_IND_UNALIGNED;
}
op1 = gtNewAssignNode(op1, op2);
op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
// Spill side-effects AND global-data-accesses
if (verCurrentState.esStackDepth > 0)
{
impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STIND"));
}
goto APPEND;
case CEE_LDIND_I1:
lclTyp = TYP_BYTE;
goto LDIND;
case CEE_LDIND_I2:
lclTyp = TYP_SHORT;
goto LDIND;
case CEE_LDIND_U4:
case CEE_LDIND_I4:
lclTyp = TYP_INT;
goto LDIND;
case CEE_LDIND_I8:
lclTyp = TYP_LONG;
goto LDIND;
case CEE_LDIND_REF:
lclTyp = TYP_REF;
goto LDIND;
case CEE_LDIND_I:
lclTyp = TYP_I_IMPL;
goto LDIND;
case CEE_LDIND_R4:
lclTyp = TYP_FLOAT;
goto LDIND;
case CEE_LDIND_R8:
lclTyp = TYP_DOUBLE;
goto LDIND;
case CEE_LDIND_U1:
lclTyp = TYP_UBYTE;
goto LDIND;
case CEE_LDIND_U2:
lclTyp = TYP_USHORT;
goto LDIND;
LDIND:
op1 = impPopStack().val; // address to load from
impBashVarAddrsToI(op1);
#ifdef TARGET_64BIT
// Allow an upcast of op1 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
//
if (genActualType(op1->gtType) == TYP_INT)
{
op1 = gtNewCastNode(TYP_I_IMPL, op1, false, TYP_I_IMPL);
}
#endif
assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
op1 = gtNewOperNode(GT_IND, lclTyp, op1);
// ldind could point anywhere, example a boxed class static int
op1->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
if (prefixFlags & PREFIX_VOLATILE)
{
assert(op1->OperGet() == GT_IND);
op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
op1->gtFlags |= GTF_IND_VOLATILE;
}
if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
{
assert(op1->OperGet() == GT_IND);
op1->gtFlags |= GTF_IND_UNALIGNED;
}
impPushOnStack(op1, tiRetVal);
break;
case CEE_UNALIGNED:
assert(sz == 1);
val = getU1LittleEndian(codeAddr);
++codeAddr;
JITDUMP(" %u", val);
if ((val != 1) && (val != 2) && (val != 4))
{
BADCODE("Alignment unaligned. must be 1, 2, or 4");
}
Verify(!(prefixFlags & PREFIX_UNALIGNED), "Multiple unaligned. prefixes");
prefixFlags |= PREFIX_UNALIGNED;
impValidateMemoryAccessOpcode(codeAddr, codeEndp, false);
PREFIX:
opcode = (OPCODE)getU1LittleEndian(codeAddr);
opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
codeAddr += sizeof(__int8);
goto DECODE_OPCODE;
case CEE_VOLATILE:
Verify(!(prefixFlags & PREFIX_VOLATILE), "Multiple volatile. prefixes");
prefixFlags |= PREFIX_VOLATILE;
impValidateMemoryAccessOpcode(codeAddr, codeEndp, true);
assert(sz == 0);
goto PREFIX;
case CEE_LDFTN:
{
// Need to do a lookup here so that we perform an access check
// and do a NOWAY if protections are violated
_impResolveToken(CORINFO_TOKENKIND_Method);
JITDUMP(" %08X", resolvedToken.token);
eeGetCallInfo(&resolvedToken, (prefixFlags & PREFIX_CONSTRAINED) ? &constrainedResolvedToken : nullptr,
combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN), &callInfo);
// This check really only applies to intrinsic Array.Address methods
if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
{
NO_WAY("Currently do not support LDFTN of Parameterized functions");
}
// Do this before DO_LDFTN since CEE_LDVIRTFN does it on its own.
impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
DO_LDFTN:
op1 = impMethodPointer(&resolvedToken, &callInfo);
if (compDonotInline())
{
return;
}
// Call info may have more precise information about the function than
// the resolved token.
mdToken constrainedToken = prefixFlags & PREFIX_CONSTRAINED ? constrainedResolvedToken.token : 0;
methodPointerInfo* heapToken = impAllocateMethodPointerInfo(resolvedToken, constrainedToken);
assert(callInfo.hMethod != nullptr);
heapToken->m_token.hMethod = callInfo.hMethod;
impPushOnStack(op1, typeInfo(heapToken));
break;
}
case CEE_LDVIRTFTN:
{
/* Get the method token */
_impResolveToken(CORINFO_TOKENKIND_Method);
JITDUMP(" %08X", resolvedToken.token);
eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef */,
combine(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN),
CORINFO_CALLINFO_CALLVIRT),
&callInfo);
// This check really only applies to intrinsic Array.Address methods
if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
{
NO_WAY("Currently do not support LDFTN of Parameterized functions");
}
mflags = callInfo.methodFlags;
impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
if (compIsForInlining())
{
if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
{
compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDVIRTFN_ON_NON_VIRTUAL);
return;
}
}
CORINFO_SIG_INFO& ftnSig = callInfo.sig;
/* Get the object-ref */
op1 = impPopStack().val;
assertImp(op1->gtType == TYP_REF);
if (opts.IsReadyToRun())
{
if (callInfo.kind != CORINFO_VIRTUALCALL_LDVIRTFTN)
{
if (op1->gtFlags & GTF_SIDE_EFFECT)
{
op1 = gtUnusedValNode(op1);
impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI);
}
goto DO_LDFTN;
}
}
else if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
{
if (op1->gtFlags & GTF_SIDE_EFFECT)
{
op1 = gtUnusedValNode(op1);
impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI);
}
goto DO_LDFTN;
}
GenTree* fptr = impImportLdvirtftn(op1, &resolvedToken, &callInfo);
if (compDonotInline())
{
return;
}
methodPointerInfo* heapToken = impAllocateMethodPointerInfo(resolvedToken, 0);
assert(heapToken->m_token.tokenType == CORINFO_TOKENKIND_Method);
assert(callInfo.hMethod != nullptr);
heapToken->m_token.tokenType = CORINFO_TOKENKIND_Ldvirtftn;
heapToken->m_token.hMethod = callInfo.hMethod;
impPushOnStack(fptr, typeInfo(heapToken));
break;
}
case CEE_CONSTRAINED:
assertImp(sz == sizeof(unsigned));
impResolveToken(codeAddr, &constrainedResolvedToken, CORINFO_TOKENKIND_Constrained);
codeAddr += sizeof(unsigned); // prefix instructions must increment codeAddr manually
JITDUMP(" (%08X) ", constrainedResolvedToken.token);
Verify(!(prefixFlags & PREFIX_CONSTRAINED), "Multiple constrained. prefixes");
prefixFlags |= PREFIX_CONSTRAINED;
{
OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
if (actualOpcode != CEE_CALLVIRT && actualOpcode != CEE_CALL && actualOpcode != CEE_LDFTN)
{
BADCODE("constrained. has to be followed by callvirt, call or ldftn");
}
}
goto PREFIX;
case CEE_READONLY:
JITDUMP(" readonly.");
Verify(!(prefixFlags & PREFIX_READONLY), "Multiple readonly. prefixes");
prefixFlags |= PREFIX_READONLY;
{
OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
if (actualOpcode != CEE_LDELEMA && !impOpcodeIsCallOpcode(actualOpcode))
{
BADCODE("readonly. has to be followed by ldelema or call");
}
}
assert(sz == 0);
goto PREFIX;
case CEE_TAILCALL:
JITDUMP(" tail.");
Verify(!(prefixFlags & PREFIX_TAILCALL_EXPLICIT), "Multiple tailcall. prefixes");
prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
{
OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
if (!impOpcodeIsCallOpcode(actualOpcode))
{
BADCODE("tailcall. has to be followed by call, callvirt or calli");
}
}
assert(sz == 0);
goto PREFIX;
case CEE_NEWOBJ:
/* Since we will implicitly insert newObjThisPtr at the start of the
argument list, spill any GTF_ORDER_SIDEEFF */
impSpillSpecialSideEff();
/* NEWOBJ does not respond to TAIL */
prefixFlags &= ~PREFIX_TAILCALL_EXPLICIT;
/* NEWOBJ does not respond to CONSTRAINED */
prefixFlags &= ~PREFIX_CONSTRAINED;
_impResolveToken(CORINFO_TOKENKIND_NewObj);
eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_ALLOWINSTPARAM), &callInfo);
mflags = callInfo.methodFlags;
if ((mflags & (CORINFO_FLG_STATIC | CORINFO_FLG_ABSTRACT)) != 0)
{
BADCODE("newobj on static or abstract method");
}
// Insert the security callout before any actual code is generated
impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
// There are three different cases for new
// Object size is variable (depends on arguments)
// 1) Object is an array (arrays treated specially by the EE)
// 2) Object is some other variable sized object (e.g. String)
// 3) Class Size can be determined beforehand (normal case)
// In the first case, we need to call a NEWOBJ helper (multinewarray)
// in the second case we call the constructor with a '0' this pointer
// In the third case we alloc the memory, then call the constuctor
clsFlags = callInfo.classFlags;
if (clsFlags & CORINFO_FLG_ARRAY)
{
// Arrays need to call the NEWOBJ helper.
assertImp(clsFlags & CORINFO_FLG_VAROBJSIZE);
impImportNewObjArray(&resolvedToken, &callInfo);
if (compDonotInline())
{
return;
}
callTyp = TYP_REF;
break;
}
// At present this can only be String
else if (clsFlags & CORINFO_FLG_VAROBJSIZE)
{
// Skip this thisPtr argument
newObjThisPtr = nullptr;
/* Remember that this basic block contains 'new' of an object */
block->bbFlags |= BBF_HAS_NEWOBJ;
optMethodFlags |= OMF_HAS_NEWOBJ;
}
else
{
// This is the normal case where the size of the object is
// fixed. Allocate the memory and call the constructor.
// Note: We cannot add a peep to avoid use of temp here
// becase we don't have enough interference info to detect when
// sources and destination interfere, example: s = new S(ref);
// TODO: We find the correct place to introduce a general
// reverse copy prop for struct return values from newobj or
// any function returning structs.
/* get a temporary for the new object */
lclNum = lvaGrabTemp(true DEBUGARG("NewObj constructor temp"));
if (compDonotInline())
{
// Fail fast if lvaGrabTemp fails with CALLSITE_TOO_MANY_LOCALS.
assert(compInlineResult->GetObservation() == InlineObservation::CALLSITE_TOO_MANY_LOCALS);
return;
}
// In the value class case we only need clsHnd for size calcs.
//
// The lookup of the code pointer will be handled by CALL in this case
if (clsFlags & CORINFO_FLG_VALUECLASS)
{
if (compIsForInlining())
{
// If value class has GC fields, inform the inliner. It may choose to
// bail out on the inline.
DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
{
compInlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
if (compInlineResult->IsFailure())
{
return;
}
// Do further notification in the case where the call site is rare;
// some policies do not track the relative hotness of call sites for
// "always" inline cases.
if (impInlineInfo->iciBlock->isRunRarely())
{
compInlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
if (compInlineResult->IsFailure())
{
return;
}
}
}
}
CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
if (impIsPrimitive(jitTyp))
{
lvaTable[lclNum].lvType = JITtype2varType(jitTyp);
}
else
{
// The local variable itself is the allocated space.
// Here we need unsafe value cls check, since the address of struct is taken for further use
// and potentially exploitable.
lvaSetStruct(lclNum, resolvedToken.hClass, true /* unsafe value cls check */);
}
bool bbInALoop = impBlockIsInALoop(block);
bool bbIsReturn = (block->bbJumpKind == BBJ_RETURN) &&
(!compIsForInlining() || (impInlineInfo->iciBlock->bbJumpKind == BBJ_RETURN));
LclVarDsc* const lclDsc = lvaGetDesc(lclNum);
if (fgVarNeedsExplicitZeroInit(lclNum, bbInALoop, bbIsReturn))
{
// Append a tree to zero-out the temp
newObjThisPtr = gtNewLclvNode(lclNum, lclDsc->TypeGet());
newObjThisPtr = gtNewBlkOpNode(newObjThisPtr, // Dest
gtNewIconNode(0), // Value
false, // isVolatile
false); // not copyBlock
impAppendTree(newObjThisPtr, (unsigned)CHECK_SPILL_NONE, impCurStmtDI);
}
else
{
JITDUMP("\nSuppressing zero-init for V%02u -- expect to zero in prolog\n", lclNum);
lclDsc->lvSuppressedZeroInit = 1;
compSuppressedZeroInit = true;
}
// Obtain the address of the temp
newObjThisPtr =
gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet()));
}
else
{
// If we're newing up a finalizable object, spill anything that can cause exceptions.
//
bool hasSideEffects = false;
CorInfoHelpFunc newHelper =
info.compCompHnd->getNewHelper(&resolvedToken, info.compMethodHnd, &hasSideEffects);
if (hasSideEffects)
{
JITDUMP("\nSpilling stack for finalizable newobj\n");
impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("finalizable newobj spill"));
}
const bool useParent = true;
op1 = gtNewAllocObjNode(&resolvedToken, useParent);
if (op1 == nullptr)
{
return;
}
// Remember that this basic block contains 'new' of an object
block->bbFlags |= BBF_HAS_NEWOBJ;
optMethodFlags |= OMF_HAS_NEWOBJ;
// Append the assignment to the temp/local. Dont need to spill
// at all as we are just calling an EE-Jit helper which can only
// cause an (async) OutOfMemoryException.
// We assign the newly allocated object (by a GT_ALLOCOBJ node)
// to a temp. Note that the pattern "temp = allocObj" is required
// by ObjectAllocator phase to be able to determine GT_ALLOCOBJ nodes
// without exhaustive walk over all expressions.
impAssignTempGen(lclNum, op1, (unsigned)CHECK_SPILL_NONE);
assert(lvaTable[lclNum].lvSingleDef == 0);
lvaTable[lclNum].lvSingleDef = 1;
JITDUMP("Marked V%02u as a single def local\n", lclNum);
lvaSetClass(lclNum, resolvedToken.hClass, true /* is Exact */);
newObjThisPtr = gtNewLclvNode(lclNum, TYP_REF);
}
}
goto CALL;
case CEE_CALLI:
/* CALLI does not respond to CONSTRAINED */
prefixFlags &= ~PREFIX_CONSTRAINED;
FALLTHROUGH;
case CEE_CALLVIRT:
case CEE_CALL:
// We can't call getCallInfo on the token from a CALLI, but we need it in
// many other places. We unfortunately embed that knowledge here.
if (opcode != CEE_CALLI)
{
_impResolveToken(CORINFO_TOKENKIND_Method);
eeGetCallInfo(&resolvedToken,
(prefixFlags & PREFIX_CONSTRAINED) ? &constrainedResolvedToken : nullptr,
// this is how impImportCall invokes getCallInfo
combine(combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS),
(opcode == CEE_CALLVIRT) ? CORINFO_CALLINFO_CALLVIRT : CORINFO_CALLINFO_NONE),
&callInfo);
}
else
{
// Suppress uninitialized use warning.
memset(&resolvedToken, 0, sizeof(resolvedToken));
memset(&callInfo, 0, sizeof(callInfo));
resolvedToken.token = getU4LittleEndian(codeAddr);
resolvedToken.tokenContext = impTokenLookupContextHandle;
resolvedToken.tokenScope = info.compScopeHnd;
}
CALL: // memberRef should be set.
// newObjThisPtr should be set for CEE_NEWOBJ
JITDUMP(" %08X", resolvedToken.token);
constraintCall = (prefixFlags & PREFIX_CONSTRAINED) != 0;
bool newBBcreatedForTailcallStress;
bool passedStressModeValidation;
newBBcreatedForTailcallStress = false;
passedStressModeValidation = true;
if (compIsForInlining())
{
if (compDonotInline())
{
return;
}
// We rule out inlinees with explicit tail calls in fgMakeBasicBlocks.
assert((prefixFlags & PREFIX_TAILCALL_EXPLICIT) == 0);
}
else
{
if (compTailCallStress())
{
// Have we created a new BB after the "call" instruction in fgMakeBasicBlocks()?
// Tail call stress only recognizes call+ret patterns and forces them to be
// explicit tail prefixed calls. Also fgMakeBasicBlocks() under tail call stress
// doesn't import 'ret' opcode following the call into the basic block containing
// the call instead imports it to a new basic block. Note that fgMakeBasicBlocks()
// is already checking that there is an opcode following call and hence it is
// safe here to read next opcode without bounds check.
newBBcreatedForTailcallStress =
impOpcodeIsCallOpcode(opcode) && // Current opcode is a CALL, (not a CEE_NEWOBJ). So, don't
// make it jump to RET.
(OPCODE)getU1LittleEndian(codeAddr + sz) == CEE_RET; // Next opcode is a CEE_RET
bool hasTailPrefix = (prefixFlags & PREFIX_TAILCALL_EXPLICIT);
if (newBBcreatedForTailcallStress && !hasTailPrefix)
{
// Do a more detailed evaluation of legality
const bool returnFalseIfInvalid = true;
const bool passedConstraintCheck =
verCheckTailCallConstraint(opcode, &resolvedToken,
constraintCall ? &constrainedResolvedToken : nullptr,
returnFalseIfInvalid);
if (passedConstraintCheck)
{
// Now check with the runtime
CORINFO_METHOD_HANDLE declaredCalleeHnd = callInfo.hMethod;
bool isVirtual = (callInfo.kind == CORINFO_VIRTUALCALL_STUB) ||
(callInfo.kind == CORINFO_VIRTUALCALL_VTABLE);
CORINFO_METHOD_HANDLE exactCalleeHnd = isVirtual ? nullptr : declaredCalleeHnd;
if (info.compCompHnd->canTailCall(info.compMethodHnd, declaredCalleeHnd, exactCalleeHnd,
hasTailPrefix)) // Is it legal to do tailcall?
{
// Stress the tailcall.
JITDUMP(" (Tailcall stress: prefixFlags |= PREFIX_TAILCALL_EXPLICIT)");
prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
prefixFlags |= PREFIX_TAILCALL_STRESS;
}
else
{
// Runtime disallows this tail call
JITDUMP(" (Tailcall stress: runtime preventing tailcall)");
passedStressModeValidation = false;
}
}
else
{
// Constraints disallow this tail call
JITDUMP(" (Tailcall stress: constraint check failed)");
passedStressModeValidation = false;
}
}
}
}
// This is split up to avoid goto flow warnings.
bool isRecursive;
isRecursive = !compIsForInlining() && (callInfo.hMethod == info.compMethodHnd);
// If we've already disqualified this call as a tail call under tail call stress,
// don't consider it for implicit tail calling either.
//
// When not running under tail call stress, we may mark this call as an implicit
// tail call candidate. We'll do an "equivalent" validation during impImportCall.
//
// Note that when running under tail call stress, a call marked as explicit
// tail prefixed will not be considered for implicit tail calling.
if (passedStressModeValidation &&
impIsImplicitTailCallCandidate(opcode, codeAddr + sz, codeEndp, prefixFlags, isRecursive))
{
if (compIsForInlining())
{
#if FEATURE_TAILCALL_OPT_SHARED_RETURN
// Are we inlining at an implicit tail call site? If so the we can flag
// implicit tail call sites in the inline body. These call sites
// often end up in non BBJ_RETURN blocks, so only flag them when
// we're able to handle shared returns.
if (impInlineInfo->iciCall->IsImplicitTailCall())
{
JITDUMP("\n (Inline Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
}
#endif // FEATURE_TAILCALL_OPT_SHARED_RETURN
}
else
{
JITDUMP("\n (Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
}
}
// Treat this call as tail call for verification only if "tail" prefixed (i.e. explicit tail call).
explicitTailCall = (prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0;
readonlyCall = (prefixFlags & PREFIX_READONLY) != 0;
if (opcode != CEE_CALLI && opcode != CEE_NEWOBJ)
{
// All calls and delegates need a security callout.
// For delegates, this is the call to the delegate constructor, not the access check on the
// LD(virt)FTN.
impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
}
callTyp = impImportCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
newObjThisPtr, prefixFlags, &callInfo, opcodeOffs);
if (compDonotInline())
{
// We do not check fails after lvaGrabTemp. It is covered with CoreCLR_13272 issue.
assert((callTyp == TYP_UNDEF) ||
(compInlineResult->GetObservation() == InlineObservation::CALLSITE_TOO_MANY_LOCALS));
return;
}
if (explicitTailCall || newBBcreatedForTailcallStress) // If newBBcreatedForTailcallStress is true, we
// have created a new BB after the "call"
// instruction in fgMakeBasicBlocks(). So we need to jump to RET regardless.
{
assert(!compIsForInlining());
goto RET;
}
break;
case CEE_LDFLD:
case CEE_LDSFLD:
case CEE_LDFLDA:
case CEE_LDSFLDA:
{
bool isLoadAddress = (opcode == CEE_LDFLDA || opcode == CEE_LDSFLDA);
bool isLoadStatic = (opcode == CEE_LDSFLD || opcode == CEE_LDSFLDA);
/* Get the CP_Fieldref index */
assertImp(sz == sizeof(unsigned));
_impResolveToken(CORINFO_TOKENKIND_Field);
JITDUMP(" %08X", resolvedToken.token);
int aflags = isLoadAddress ? CORINFO_ACCESS_ADDRESS : CORINFO_ACCESS_GET;
GenTree* obj = nullptr;
typeInfo* tiObj = nullptr;
CORINFO_CLASS_HANDLE objType = nullptr; // used for fields
if (opcode == CEE_LDFLD || opcode == CEE_LDFLDA)
{
tiObj = &impStackTop().seTypeInfo;
StackEntry se = impPopStack();
objType = se.seTypeInfo.GetClassHandle();
obj = se.val;
if (impIsThis(obj))
{
aflags |= CORINFO_ACCESS_THIS;
}
}
eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
// Figure out the type of the member. We always call canAccessField, so you always need this
// handle
CorInfoType ciType = fieldInfo.fieldType;
clsHnd = fieldInfo.structType;
lclTyp = JITtype2varType(ciType);
if (compIsForInlining())
{
switch (fieldInfo.fieldAccessor)
{
case CORINFO_FIELD_INSTANCE_HELPER:
case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
case CORINFO_FIELD_STATIC_ADDR_HELPER:
case CORINFO_FIELD_STATIC_TLS:
compInlineResult->NoteFatal(InlineObservation::CALLEE_LDFLD_NEEDS_HELPER);
return;
case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
/* We may be able to inline the field accessors in specific instantiations of generic
* methods */
compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDFLD_NEEDS_HELPER);
return;
default:
break;
}
if (!isLoadAddress && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && lclTyp == TYP_STRUCT &&
clsHnd)
{
if ((info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd) == CORINFO_TYPE_UNDEF) &&
!(info.compFlags & CORINFO_FLG_FORCEINLINE))
{
// Loading a static valuetype field usually will cause a JitHelper to be called
// for the static base. This will bloat the code.
compInlineResult->Note(InlineObservation::CALLEE_LDFLD_STATIC_VALUECLASS);
if (compInlineResult->IsFailure())
{
return;
}
}
}
}
tiRetVal = verMakeTypeInfo(ciType, clsHnd);
if (isLoadAddress)
{
tiRetVal.MakeByRef();
}
else
{
tiRetVal.NormaliseForStack();
}
// Perform this check always to ensure that we get field access exceptions even with
// SkipVerification.
impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
// Raise InvalidProgramException if static load accesses non-static field
if (isLoadStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
{
BADCODE("static access on an instance field");
}
// We are using ldfld/a on a static field. We allow it, but need to get side-effect from obj.
if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
{
if (obj->gtFlags & GTF_SIDE_EFFECT)
{
obj = gtUnusedValNode(obj);
impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtDI);
}
obj = nullptr;
}
/* Preserve 'small' int types */
if (!varTypeIsSmall(lclTyp))
{
lclTyp = genActualType(lclTyp);
}
bool usesHelper = false;
switch (fieldInfo.fieldAccessor)
{
case CORINFO_FIELD_INSTANCE:
#ifdef FEATURE_READYTORUN
case CORINFO_FIELD_INSTANCE_WITH_BASE:
#endif
{
// If the object is a struct, what we really want is
// for the field to operate on the address of the struct.
if (!varTypeGCtype(obj->TypeGet()) && impIsValueType(tiObj))
{
assert(opcode == CEE_LDFLD && objType != nullptr);
obj = impGetStructAddr(obj, objType, (unsigned)CHECK_SPILL_ALL, true);
}
/* Create the data member node */
op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset);
#ifdef FEATURE_READYTORUN
if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
{
op1->AsField()->gtFieldLookup = fieldInfo.fieldLookup;
}
#endif
op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
if (fgAddrCouldBeNull(obj))
{
op1->gtFlags |= GTF_EXCEPT;
}
// If the object is a BYREF then our target is a value class and
// it could point anywhere, example a boxed class static int
if (obj->gtType == TYP_BYREF)
{
op1->gtFlags |= GTF_IND_TGTANYWHERE;
}
DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
if (StructHasOverlappingFields(typeFlags))
{
op1->AsField()->gtFldMayOverlap = true;
}
// wrap it in a address of operator if necessary
if (isLoadAddress)
{
op1 = gtNewOperNode(GT_ADDR,
(var_types)(varTypeIsGC(obj->TypeGet()) ? TYP_BYREF : TYP_I_IMPL), op1);
}
else
{
if (compIsForInlining() &&
impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, nullptr, obj,
impInlineInfo->inlArgInfo))
{
impInlineInfo->thisDereferencedFirst = true;
}
}
}
break;
case CORINFO_FIELD_STATIC_TLS:
#ifdef TARGET_X86
// Legacy TLS access is implemented as intrinsic on x86 only
/* Create the data member node */
op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
if (isLoadAddress)
{
op1 = gtNewOperNode(GT_ADDR, (var_types)TYP_I_IMPL, op1);
}
break;
#else
fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
FALLTHROUGH;
#endif
case CORINFO_FIELD_STATIC_ADDR_HELPER:
case CORINFO_FIELD_INSTANCE_HELPER:
case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
clsHnd, nullptr);
usesHelper = true;
break;
case CORINFO_FIELD_STATIC_ADDRESS:
// Replace static read-only fields with constant if possible
if ((aflags & CORINFO_ACCESS_GET) && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_FINAL) &&
!(fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) &&
(varTypeIsIntegral(lclTyp) || varTypeIsFloating(lclTyp)))
{
CorInfoInitClassResult initClassResult =
info.compCompHnd->initClass(resolvedToken.hField, info.compMethodHnd,
impTokenLookupContextHandle);
if (initClassResult & CORINFO_INITCLASS_INITIALIZED)
{
void** pFldAddr = nullptr;
void* fldAddr =
info.compCompHnd->getFieldAddress(resolvedToken.hField, (void**)&pFldAddr);
// We should always be able to access this static's address directly
//
assert(pFldAddr == nullptr);
op1 = impImportStaticReadOnlyField(fldAddr, lclTyp);
// Widen small types since we're propagating the value
// instead of producing an indir.
//
op1->gtType = genActualType(lclTyp);
goto FIELD_DONE;
}
}
FALLTHROUGH;
case CORINFO_FIELD_STATIC_RVA_ADDRESS:
case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
lclTyp);
break;
case CORINFO_FIELD_INTRINSIC_ZERO:
{
assert(aflags & CORINFO_ACCESS_GET);
// Widen to stack type
lclTyp = genActualType(lclTyp);
op1 = gtNewIconNode(0, lclTyp);
goto FIELD_DONE;
}
break;
case CORINFO_FIELD_INTRINSIC_EMPTY_STRING:
{
assert(aflags & CORINFO_ACCESS_GET);
// Import String.Empty as "" (GT_CNS_STR with a fake SconCPX = 0)
op1 = gtNewSconNode(EMPTY_STRING_SCON, nullptr);
goto FIELD_DONE;
}
break;
case CORINFO_FIELD_INTRINSIC_ISLITTLEENDIAN:
{
assert(aflags & CORINFO_ACCESS_GET);
// Widen to stack type
lclTyp = genActualType(lclTyp);
#if BIGENDIAN
op1 = gtNewIconNode(0, lclTyp);
#else
op1 = gtNewIconNode(1, lclTyp);
#endif
goto FIELD_DONE;
}
break;
default:
assert(!"Unexpected fieldAccessor");
}
if (!isLoadAddress)
{
if (prefixFlags & PREFIX_VOLATILE)
{
op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
if (!usesHelper)
{
assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
(op1->OperGet() == GT_OBJ));
op1->gtFlags |= GTF_IND_VOLATILE;
}
}
if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
{
if (!usesHelper)
{
assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
(op1->OperGet() == GT_OBJ));
op1->gtFlags |= GTF_IND_UNALIGNED;
}
}
}
/* Check if the class needs explicit initialization */
if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
{
GenTree* helperNode = impInitClass(&resolvedToken);
if (compDonotInline())
{
return;
}
if (helperNode != nullptr)
{
op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
}
}
FIELD_DONE:
impPushOnStack(op1, tiRetVal);
}
break;
case CEE_STFLD:
case CEE_STSFLD:
{
bool isStoreStatic = (opcode == CEE_STSFLD);
CORINFO_CLASS_HANDLE fieldClsHnd; // class of the field (if it's a ref type)
/* Get the CP_Fieldref index */
assertImp(sz == sizeof(unsigned));
_impResolveToken(CORINFO_TOKENKIND_Field);
JITDUMP(" %08X", resolvedToken.token);
int aflags = CORINFO_ACCESS_SET;
GenTree* obj = nullptr;
typeInfo* tiObj = nullptr;
typeInfo tiVal;
/* Pull the value from the stack */
StackEntry se = impPopStack();
op2 = se.val;
tiVal = se.seTypeInfo;
clsHnd = tiVal.GetClassHandle();
if (opcode == CEE_STFLD)
{
tiObj = &impStackTop().seTypeInfo;
obj = impPopStack().val;
if (impIsThis(obj))
{
aflags |= CORINFO_ACCESS_THIS;
}
}
eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
// Figure out the type of the member. We always call canAccessField, so you always need this
// handle
CorInfoType ciType = fieldInfo.fieldType;
fieldClsHnd = fieldInfo.structType;
lclTyp = JITtype2varType(ciType);
if (compIsForInlining())
{
/* Is this a 'special' (COM) field? or a TLS ref static field?, field stored int GC heap? or
* per-inst static? */
switch (fieldInfo.fieldAccessor)
{
case CORINFO_FIELD_INSTANCE_HELPER:
case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
case CORINFO_FIELD_STATIC_ADDR_HELPER:
case CORINFO_FIELD_STATIC_TLS:
compInlineResult->NoteFatal(InlineObservation::CALLEE_STFLD_NEEDS_HELPER);
return;
case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
/* We may be able to inline the field accessors in specific instantiations of generic
* methods */
compInlineResult->NoteFatal(InlineObservation::CALLSITE_STFLD_NEEDS_HELPER);
return;
default:
break;
}
}
impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
// Raise InvalidProgramException if static store accesses non-static field
if (isStoreStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
{
BADCODE("static access on an instance field");
}
// We are using stfld on a static field.
// We allow it, but need to eval any side-effects for obj
if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
{
if (obj->gtFlags & GTF_SIDE_EFFECT)
{
obj = gtUnusedValNode(obj);
impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtDI);
}
obj = nullptr;
}
/* Preserve 'small' int types */
if (!varTypeIsSmall(lclTyp))
{
lclTyp = genActualType(lclTyp);
}
switch (fieldInfo.fieldAccessor)
{
case CORINFO_FIELD_INSTANCE:
#ifdef FEATURE_READYTORUN
case CORINFO_FIELD_INSTANCE_WITH_BASE:
#endif
{
/* Create the data member node */
op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset);
DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
if (StructHasOverlappingFields(typeFlags))
{
op1->AsField()->gtFldMayOverlap = true;
}
#ifdef FEATURE_READYTORUN
if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
{
op1->AsField()->gtFieldLookup = fieldInfo.fieldLookup;
}
#endif
op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
if (fgAddrCouldBeNull(obj))
{
op1->gtFlags |= GTF_EXCEPT;
}
// If object is a BYREF then our target is a value class and
// it could point anywhere, example a boxed class static int
if (obj->gtType == TYP_BYREF)
{
op1->gtFlags |= GTF_IND_TGTANYWHERE;
}
if (compIsForInlining() &&
impInlineIsGuaranteedThisDerefBeforeAnySideEffects(op2, nullptr, obj,
impInlineInfo->inlArgInfo))
{
impInlineInfo->thisDereferencedFirst = true;
}
}
break;
case CORINFO_FIELD_STATIC_TLS:
#ifdef TARGET_X86
// Legacy TLS access is implemented as intrinsic on x86 only
/* Create the data member node */
op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
break;
#else
fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
FALLTHROUGH;
#endif
case CORINFO_FIELD_STATIC_ADDR_HELPER:
case CORINFO_FIELD_INSTANCE_HELPER:
case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
clsHnd, op2);
goto SPILL_APPEND;
case CORINFO_FIELD_STATIC_ADDRESS:
case CORINFO_FIELD_STATIC_RVA_ADDRESS:
case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
lclTyp);
break;
default:
assert(!"Unexpected fieldAccessor");
}
// Create the member assignment, unless we have a TYP_STRUCT.
bool deferStructAssign = (lclTyp == TYP_STRUCT);
if (!deferStructAssign)
{
if (prefixFlags & PREFIX_VOLATILE)
{
assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
op1->gtFlags |= GTF_IND_VOLATILE;
}
if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
{
assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
op1->gtFlags |= GTF_IND_UNALIGNED;
}
/* V4.0 allows assignment of i4 constant values to i8 type vars when IL verifier is bypassed (full
trust apps). The reason this works is that JIT stores an i4 constant in Gentree union during
importation and reads from the union as if it were a long during code generation. Though this
can potentially read garbage, one can get lucky to have this working correctly.
This code pattern is generated by Dev10 MC++ compiler while storing to fields when compiled with
/O2 switch (default when compiling retail configs in Dev10) and a customer app has taken a
dependency on it. To be backward compatible, we will explicitly add an upward cast here so that
it works correctly always.
Note that this is limited to x86 alone as there is no back compat to be addressed for Arm JIT
for V4.0.
*/
CLANG_FORMAT_COMMENT_ANCHOR;
#ifndef TARGET_64BIT
// In UWP6.0 and beyond (post-.NET Core 2.0), we decided to let this cast from int to long be
// generated for ARM as well as x86, so the following IR will be accepted:
// STMTx (IL 0x... ???)
// * ASG long
// +--* CLS_VAR long
// \--* CNS_INT int 2
if ((op1->TypeGet() != op2->TypeGet()) && op2->OperIsConst() && varTypeIsIntOrI(op2->TypeGet()) &&
varTypeIsLong(op1->TypeGet()))
{
op2 = gtNewCastNode(op1->TypeGet(), op2, false, op1->TypeGet());
}
#endif
#ifdef TARGET_64BIT
// Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
{
op2->gtType = TYP_I_IMPL;
}
else
{
// Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
//
if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
{
op2 = gtNewCastNode(TYP_INT, op2, false, TYP_INT);
}
// Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
//
if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
{
op2 = gtNewCastNode(TYP_I_IMPL, op2, false, TYP_I_IMPL);
}
}
#endif
// We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
// We insert a cast to the dest 'op1' type
//
if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
varTypeIsFloating(op2->gtType))
{
op2 = gtNewCastNode(op1->TypeGet(), op2, false, op1->TypeGet());
}
op1 = gtNewAssignNode(op1, op2);
/* Mark the expression as containing an assignment */
op1->gtFlags |= GTF_ASG;
}
/* Check if the class needs explicit initialization */
if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
{
GenTree* helperNode = impInitClass(&resolvedToken);
if (compDonotInline())
{
return;
}
if (helperNode != nullptr)
{
op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
}
}
/* stfld can interfere with value classes (consider the sequence
ldloc, ldloca, ..., stfld, stloc). We will be conservative and
spill all value class references from the stack. */
if (obj && ((obj->gtType == TYP_BYREF) || (obj->gtType == TYP_I_IMPL)))
{
assert(tiObj);
// If we can resolve the field to be within some local,
// then just spill that local.
//
GenTreeLclVarCommon* const lcl = obj->IsLocalAddrExpr();
if (lcl != nullptr)
{
impSpillLclRefs(lcl->GetLclNum());
}
else if (impIsValueType(tiObj))
{
impSpillEvalStack();
}
else
{
impSpillValueClasses();
}
}
/* Spill any refs to the same member from the stack */
impSpillLclRefs((ssize_t)resolvedToken.hField);
/* stsfld also interferes with indirect accesses (for aliased
statics) and calls. But don't need to spill other statics
as we have explicitly spilled this particular static field. */
impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STFLD"));
if (deferStructAssign)
{
op1 = impAssignStruct(op1, op2, clsHnd, (unsigned)CHECK_SPILL_ALL);
}
}
goto APPEND;
case CEE_NEWARR:
{
/* Get the class type index operand */
_impResolveToken(CORINFO_TOKENKIND_Newarr);
JITDUMP(" %08X", resolvedToken.token);
if (!opts.IsReadyToRun())
{
// Need to restore array classes before creating array objects on the heap
op1 = impTokenToHandle(&resolvedToken, nullptr, true /*mustRestoreHandle*/);
if (op1 == nullptr)
{ // compDonotInline()
return;
}
}
tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
accessAllowedResult =
info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
/* Form the arglist: array class handle, size */
op2 = impPopStack().val;
assertImp(genActualTypeIsIntOrI(op2->gtType));
#ifdef TARGET_64BIT
// The array helper takes a native int for array length.
// So if we have an int, explicitly extend it to be a native int.
if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
{
if (op2->IsIntegralConst())
{
op2->gtType = TYP_I_IMPL;
}
else
{
bool isUnsigned = false;
op2 = gtNewCastNode(TYP_I_IMPL, op2, isUnsigned, TYP_I_IMPL);
}
}
#endif // TARGET_64BIT
#ifdef FEATURE_READYTORUN
if (opts.IsReadyToRun())
{
op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEWARR_1, TYP_REF,
gtNewCallArgs(op2));
usingReadyToRunHelper = (op1 != nullptr);
if (!usingReadyToRunHelper)
{
// TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
// and the newarr call with a single call to a dynamic R2R cell that will:
// 1) Load the context
// 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
// 3) Allocate the new array
// Reason: performance (today, we'll always use the slow helper for the R2R generics case)
// Need to restore array classes before creating array objects on the heap
op1 = impTokenToHandle(&resolvedToken, nullptr, true /*mustRestoreHandle*/);
if (op1 == nullptr)
{ // compDonotInline()
return;
}
}
}
if (!usingReadyToRunHelper)
#endif
{
GenTreeCall::Use* args = gtNewCallArgs(op1, op2);
/* Create a call to 'new' */
// Note that this only works for shared generic code because the same helper is used for all
// reference array types
op1 = gtNewHelperCallNode(info.compCompHnd->getNewArrHelper(resolvedToken.hClass), TYP_REF, args);
}
op1->AsCall()->compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)resolvedToken.hClass;
/* Remember that this basic block contains 'new' of an sd array */
block->bbFlags |= BBF_HAS_NEWARRAY;
optMethodFlags |= OMF_HAS_NEWARRAY;
/* Push the result of the call on the stack */
impPushOnStack(op1, tiRetVal);
callTyp = TYP_REF;
}
break;
case CEE_LOCALLOC:
// We don't allow locallocs inside handlers
if (block->hasHndIndex())
{
BADCODE("Localloc can't be inside handler");
}
// Get the size to allocate
op2 = impPopStack().val;
assertImp(genActualTypeIsIntOrI(op2->gtType));
if (verCurrentState.esStackDepth != 0)
{
BADCODE("Localloc can only be used when the stack is empty");
}
// If the localloc is not in a loop and its size is a small constant,
// create a new local var of TYP_BLK and return its address.
{
bool convertedToLocal = false;
// Need to aggressively fold here, as even fixed-size locallocs
// will have casts in the way.
op2 = gtFoldExpr(op2);
if (op2->IsIntegralConst())
{
const ssize_t allocSize = op2->AsIntCon()->IconValue();
bool bbInALoop = impBlockIsInALoop(block);
if (allocSize == 0)
{
// Result is nullptr
JITDUMP("Converting stackalloc of 0 bytes to push null unmanaged pointer\n");
op1 = gtNewIconNode(0, TYP_I_IMPL);
convertedToLocal = true;
}
else if ((allocSize > 0) && !bbInALoop)
{
// Get the size threshold for local conversion
ssize_t maxSize = DEFAULT_MAX_LOCALLOC_TO_LOCAL_SIZE;
#ifdef DEBUG
// Optionally allow this to be modified
maxSize = JitConfig.JitStackAllocToLocalSize();
#endif // DEBUG
if (allocSize <= maxSize)
{
const unsigned stackallocAsLocal = lvaGrabTemp(false DEBUGARG("stackallocLocal"));
JITDUMP("Converting stackalloc of %zd bytes to new local V%02u\n", allocSize,
stackallocAsLocal);
lvaTable[stackallocAsLocal].lvType = TYP_BLK;
lvaTable[stackallocAsLocal].lvExactSize = (unsigned)allocSize;
lvaTable[stackallocAsLocal].lvIsUnsafeBuffer = true;
op1 = gtNewLclvNode(stackallocAsLocal, TYP_BLK);
op1 = gtNewOperNode(GT_ADDR, TYP_I_IMPL, op1);
convertedToLocal = true;
if (!this->opts.compDbgEnC)
{
// Ensure we have stack security for this method.
// Reorder layout since the converted localloc is treated as an unsafe buffer.
setNeedsGSSecurityCookie();
compGSReorderStackLayout = true;
}
}
}
}
if (!convertedToLocal)
{
// Bail out if inlining and the localloc was not converted.
//
// Note we might consider allowing the inline, if the call
// site is not in a loop.
if (compIsForInlining())
{
InlineObservation obs = op2->IsIntegralConst()
? InlineObservation::CALLEE_LOCALLOC_TOO_LARGE
: InlineObservation::CALLSITE_LOCALLOC_SIZE_UNKNOWN;
compInlineResult->NoteFatal(obs);
return;
}
op1 = gtNewOperNode(GT_LCLHEAP, TYP_I_IMPL, op2);
// May throw a stack overflow exception. Obviously, we don't want locallocs to be CSE'd.
op1->gtFlags |= (GTF_EXCEPT | GTF_DONT_CSE);
// Ensure we have stack security for this method.
setNeedsGSSecurityCookie();
/* The FP register may not be back to the original value at the end
of the method, even if the frame size is 0, as localloc may
have modified it. So we will HAVE to reset it */
compLocallocUsed = true;
}
else
{
compLocallocOptimized = true;
}
}
impPushOnStack(op1, tiRetVal);
break;
case CEE_ISINST:
{
/* Get the type token */
assertImp(sz == sizeof(unsigned));
_impResolveToken(CORINFO_TOKENKIND_Casting);
JITDUMP(" %08X", resolvedToken.token);
if (!opts.IsReadyToRun())
{
op2 = impTokenToHandle(&resolvedToken, nullptr, false);
if (op2 == nullptr)
{ // compDonotInline()
return;
}
}
accessAllowedResult =
info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
op1 = impPopStack().val;
GenTree* optTree = impOptimizeCastClassOrIsInst(op1, &resolvedToken, false);
if (optTree != nullptr)
{
impPushOnStack(optTree, tiRetVal);
}
else
{
#ifdef FEATURE_READYTORUN
if (opts.IsReadyToRun())
{
GenTreeCall* opLookup =
impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_ISINSTANCEOF, TYP_REF,
gtNewCallArgs(op1));
usingReadyToRunHelper = (opLookup != nullptr);
op1 = (usingReadyToRunHelper ? opLookup : op1);
if (!usingReadyToRunHelper)
{
// TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
// and the isinstanceof_any call with a single call to a dynamic R2R cell that will:
// 1) Load the context
// 2) Perform the generic dictionary lookup and caching, and generate the appropriate
// stub
// 3) Perform the 'is instance' check on the input object
// Reason: performance (today, we'll always use the slow helper for the R2R generics case)
op2 = impTokenToHandle(&resolvedToken, nullptr, false);
if (op2 == nullptr)
{ // compDonotInline()
return;
}
}
}
if (!usingReadyToRunHelper)
#endif
{
op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, false, opcodeOffs);
}
if (compDonotInline())
{
return;
}
impPushOnStack(op1, tiRetVal);
}
break;
}
case CEE_REFANYVAL:
// get the class handle and make a ICON node out of it
_impResolveToken(CORINFO_TOKENKIND_Class);
JITDUMP(" %08X", resolvedToken.token);
op2 = impTokenToHandle(&resolvedToken);
if (op2 == nullptr)
{ // compDonotInline()
return;
}
op1 = impPopStack().val;
// make certain it is normalized;
op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
// Call helper GETREFANY(classHandle, op1);
op1 = gtNewHelperCallNode(CORINFO_HELP_GETREFANY, TYP_BYREF, gtNewCallArgs(op2, op1));
impPushOnStack(op1, tiRetVal);
break;
case CEE_REFANYTYPE:
op1 = impPopStack().val;
// make certain it is normalized;
op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
if (op1->gtOper == GT_OBJ)
{
// Get the address of the refany
op1 = op1->AsOp()->gtOp1;
// Fetch the type from the correct slot
op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
gtNewIconNode(OFFSETOF__CORINFO_TypedReference__type, TYP_I_IMPL));
op1 = gtNewOperNode(GT_IND, TYP_BYREF, op1);
}
else
{
assertImp(op1->gtOper == GT_MKREFANY);
// The pointer may have side-effects
if (op1->AsOp()->gtOp1->gtFlags & GTF_SIDE_EFFECT)
{
impAppendTree(op1->AsOp()->gtOp1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI);
#ifdef DEBUG
impNoteLastILoffs();
#endif
}
// We already have the class handle
op1 = op1->AsOp()->gtOp2;
}
// convert native TypeHandle to RuntimeTypeHandle
{
GenTreeCall::Use* helperArgs = gtNewCallArgs(op1);
op1 = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE_MAYBENULL, TYP_STRUCT,
helperArgs);
CORINFO_CLASS_HANDLE classHandle = impGetTypeHandleClass();
// The handle struct is returned in register
op1->AsCall()->gtReturnType = GetRuntimeHandleUnderlyingType();
op1->AsCall()->gtRetClsHnd = classHandle;
#if FEATURE_MULTIREG_RET
op1->AsCall()->InitializeStructReturnType(this, classHandle, op1->AsCall()->GetUnmanagedCallConv());
#endif
tiRetVal = typeInfo(TI_STRUCT, classHandle);
}
impPushOnStack(op1, tiRetVal);
break;
case CEE_LDTOKEN:
{
/* Get the Class index */
assertImp(sz == sizeof(unsigned));
lastLoadToken = codeAddr;
_impResolveToken(CORINFO_TOKENKIND_Ldtoken);
tokenType = info.compCompHnd->getTokenTypeAsHandle(&resolvedToken);
op1 = impTokenToHandle(&resolvedToken, nullptr, true);
if (op1 == nullptr)
{ // compDonotInline()
return;
}
helper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE;
assert(resolvedToken.hClass != nullptr);
if (resolvedToken.hMethod != nullptr)
{
helper = CORINFO_HELP_METHODDESC_TO_STUBRUNTIMEMETHOD;
}
else if (resolvedToken.hField != nullptr)
{
helper = CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD;
}
GenTreeCall::Use* helperArgs = gtNewCallArgs(op1);
op1 = gtNewHelperCallNode(helper, TYP_STRUCT, helperArgs);
// The handle struct is returned in register and
// it could be consumed both as `TYP_STRUCT` and `TYP_REF`.
op1->AsCall()->gtReturnType = GetRuntimeHandleUnderlyingType();
#if FEATURE_MULTIREG_RET
op1->AsCall()->InitializeStructReturnType(this, tokenType, op1->AsCall()->GetUnmanagedCallConv());
#endif
op1->AsCall()->gtRetClsHnd = tokenType;
tiRetVal = verMakeTypeInfo(tokenType);
impPushOnStack(op1, tiRetVal);
}
break;
case CEE_UNBOX:
case CEE_UNBOX_ANY:
{
/* Get the Class index */
assertImp(sz == sizeof(unsigned));
_impResolveToken(CORINFO_TOKENKIND_Class);
JITDUMP(" %08X", resolvedToken.token);
bool runtimeLookup;
op2 = impTokenToHandle(&resolvedToken, &runtimeLookup);
if (op2 == nullptr)
{
assert(compDonotInline());
return;
}
// Run this always so we can get access exceptions even with SkipVerification.
accessAllowedResult =
info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
if (opcode == CEE_UNBOX_ANY && !eeIsValueClass(resolvedToken.hClass))
{
JITDUMP("\n Importing UNBOX.ANY(refClass) as CASTCLASS\n");
op1 = impPopStack().val;
goto CASTCLASS;
}
/* Pop the object and create the unbox helper call */
/* You might think that for UNBOX_ANY we need to push a different */
/* (non-byref) type, but here we're making the tiRetVal that is used */
/* for the intermediate pointer which we then transfer onto the OBJ */
/* instruction. OBJ then creates the appropriate tiRetVal. */
op1 = impPopStack().val;
assertImp(op1->gtType == TYP_REF);
helper = info.compCompHnd->getUnBoxHelper(resolvedToken.hClass);
assert(helper == CORINFO_HELP_UNBOX || helper == CORINFO_HELP_UNBOX_NULLABLE);
// Check legality and profitability of inline expansion for unboxing.
const bool canExpandInline = (helper == CORINFO_HELP_UNBOX);
const bool shouldExpandInline = !compCurBB->isRunRarely() && opts.OptimizationEnabled();
if (canExpandInline && shouldExpandInline)
{
// See if we know anything about the type of op1, the object being unboxed.
bool isExact = false;
bool isNonNull = false;
CORINFO_CLASS_HANDLE clsHnd = gtGetClassHandle(op1, &isExact, &isNonNull);
// We can skip the "exact" bit here as we are comparing to a value class.
// compareTypesForEquality should bail on comparisions for shared value classes.
if (clsHnd != NO_CLASS_HANDLE)
{
const TypeCompareState compare =
info.compCompHnd->compareTypesForEquality(resolvedToken.hClass, clsHnd);
if (compare == TypeCompareState::Must)
{
JITDUMP("\nOptimizing %s (%s) -- type test will succeed\n",
opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY", eeGetClassName(clsHnd));
// For UNBOX, null check (if necessary), and then leave the box payload byref on the stack.
if (opcode == CEE_UNBOX)
{
GenTree* cloneOperand;
op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("optimized unbox clone"));
GenTree* boxPayloadOffset = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL);
GenTree* boxPayloadAddress =
gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, boxPayloadOffset);
GenTree* nullcheck = gtNewNullCheck(op1, block);
GenTree* result = gtNewOperNode(GT_COMMA, TYP_BYREF, nullcheck, boxPayloadAddress);
impPushOnStack(result, tiRetVal);
break;
}
// For UNBOX.ANY load the struct from the box payload byref (the load will nullcheck)
assert(opcode == CEE_UNBOX_ANY);
GenTree* boxPayloadOffset = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL);
GenTree* boxPayloadAddress = gtNewOperNode(GT_ADD, TYP_BYREF, op1, boxPayloadOffset);
impPushOnStack(boxPayloadAddress, tiRetVal);
oper = GT_OBJ;
goto OBJ;
}
else
{
JITDUMP("\nUnable to optimize %s -- can't resolve type comparison\n",
opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY");
}
}
else
{
JITDUMP("\nUnable to optimize %s -- class for [%06u] not known\n",
opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY", dspTreeID(op1));
}
JITDUMP("\n Importing %s as inline sequence\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY");
// we are doing normal unboxing
// inline the common case of the unbox helper
// UNBOX(exp) morphs into
// clone = pop(exp);
// ((*clone == typeToken) ? nop : helper(clone, typeToken));
// push(clone + TARGET_POINTER_SIZE)
//
GenTree* cloneOperand;
op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("inline UNBOX clone1"));
op1 = gtNewMethodTableLookup(op1);
GenTree* condBox = gtNewOperNode(GT_EQ, TYP_INT, op1, op2);
op1 = impCloneExpr(cloneOperand, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("inline UNBOX clone2"));
op2 = impTokenToHandle(&resolvedToken);
if (op2 == nullptr)
{ // compDonotInline()
return;
}
op1 = gtNewHelperCallNode(helper, TYP_VOID, gtNewCallArgs(op2, op1));
op1 = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), op1);
op1 = gtNewQmarkNode(TYP_VOID, condBox, op1->AsColon());
// QMARK nodes cannot reside on the evaluation stack. Because there
// may be other trees on the evaluation stack that side-effect the
// sources of the UNBOX operation we must spill the stack.
impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI);
// Create the address-expression to reference past the object header
// to the beginning of the value-type. Today this means adjusting
// past the base of the objects vtable field which is pointer sized.
op2 = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL);
op1 = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, op2);
}
else
{
JITDUMP("\n Importing %s as helper call because %s\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY",
canExpandInline ? "want smaller code or faster jitting" : "inline expansion not legal");
// Don't optimize, just call the helper and be done with it
op1 = gtNewHelperCallNode(helper,
(var_types)((helper == CORINFO_HELP_UNBOX) ? TYP_BYREF : TYP_STRUCT),
gtNewCallArgs(op2, op1));
if (op1->gtType == TYP_STRUCT)
{
op1->AsCall()->gtRetClsHnd = resolvedToken.hClass;
}
}
assert((helper == CORINFO_HELP_UNBOX && op1->gtType == TYP_BYREF) || // Unbox helper returns a byref.
(helper == CORINFO_HELP_UNBOX_NULLABLE &&
varTypeIsStruct(op1)) // UnboxNullable helper returns a struct.
);
/*
----------------------------------------------------------------------
| \ helper | | |
| \ | | |
| \ | CORINFO_HELP_UNBOX | CORINFO_HELP_UNBOX_NULLABLE |
| \ | (which returns a BYREF) | (which returns a STRUCT) | |
| opcode \ | | |
|---------------------------------------------------------------------
| UNBOX | push the BYREF | spill the STRUCT to a local, |
| | | push the BYREF to this local |
|---------------------------------------------------------------------
| UNBOX_ANY | push a GT_OBJ of | push the STRUCT |
| | the BYREF | For Linux when the |
| | | struct is returned in two |
| | | registers create a temp |
| | | which address is passed to |
| | | the unbox_nullable helper. |
|---------------------------------------------------------------------
*/
if (opcode == CEE_UNBOX)
{
if (helper == CORINFO_HELP_UNBOX_NULLABLE)
{
// Unbox nullable helper returns a struct type.
// We need to spill it to a temp so than can take the address of it.
// Here we need unsafe value cls check, since the address of struct is taken to be used
// further along and potetially be exploitable.
unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a nullable"));
lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
op2 = gtNewLclvNode(tmp, TYP_STRUCT);
op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
op2 = gtNewLclvNode(tmp, TYP_STRUCT);
op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
}
assert(op1->gtType == TYP_BYREF);
}
else
{
assert(opcode == CEE_UNBOX_ANY);
if (helper == CORINFO_HELP_UNBOX)
{
// Normal unbox helper returns a TYP_BYREF.
impPushOnStack(op1, tiRetVal);
oper = GT_OBJ;
goto OBJ;
}
assert(helper == CORINFO_HELP_UNBOX_NULLABLE && "Make sure the helper is nullable!");
#if FEATURE_MULTIREG_RET
if (varTypeIsStruct(op1) &&
IsMultiRegReturnedType(resolvedToken.hClass, CorInfoCallConvExtension::Managed))
{
// Unbox nullable helper returns a TYP_STRUCT.
// For the multi-reg case we need to spill it to a temp so that
// we can pass the address to the unbox_nullable jit helper.
unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a register returnable nullable"));
lvaTable[tmp].lvIsMultiRegArg = true;
lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
op2 = gtNewLclvNode(tmp, TYP_STRUCT);
op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
op2 = gtNewLclvNode(tmp, TYP_STRUCT);
op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
// In this case the return value of the unbox helper is TYP_BYREF.
// Make sure the right type is placed on the operand type stack.
impPushOnStack(op1, tiRetVal);
// Load the struct.
oper = GT_OBJ;
assert(op1->gtType == TYP_BYREF);
goto OBJ;
}
else
#endif // !FEATURE_MULTIREG_RET
{
// If non register passable struct we have it materialized in the RetBuf.
assert(op1->gtType == TYP_STRUCT);
tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
assert(tiRetVal.IsValueClass());
}
}
impPushOnStack(op1, tiRetVal);
}
break;
case CEE_BOX:
{
/* Get the Class index */
assertImp(sz == sizeof(unsigned));
_impResolveToken(CORINFO_TOKENKIND_Box);
JITDUMP(" %08X", resolvedToken.token);
accessAllowedResult =
info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
// Note BOX can be used on things that are not value classes, in which
// case we get a NOP. However the verifier's view of the type on the
// stack changes (in generic code a 'T' becomes a 'boxed T')
if (!eeIsValueClass(resolvedToken.hClass))
{
JITDUMP("\n Importing BOX(refClass) as NOP\n");
verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo = tiRetVal;
break;
}
// Look ahead for box idioms
int matched = impBoxPatternMatch(&resolvedToken, codeAddr + sz, codeEndp);
if (matched >= 0)
{
// Skip the matched IL instructions
sz += matched;
break;
}
impImportAndPushBox(&resolvedToken);
if (compDonotInline())
{
return;
}
}
break;
case CEE_SIZEOF:
/* Get the Class index */
assertImp(sz == sizeof(unsigned));
_impResolveToken(CORINFO_TOKENKIND_Class);
JITDUMP(" %08X", resolvedToken.token);
op1 = gtNewIconNode(info.compCompHnd->getClassSize(resolvedToken.hClass));
impPushOnStack(op1, tiRetVal);
break;
case CEE_CASTCLASS:
/* Get the Class index */
assertImp(sz == sizeof(unsigned));
_impResolveToken(CORINFO_TOKENKIND_Casting);
JITDUMP(" %08X", resolvedToken.token);
if (!opts.IsReadyToRun())
{
op2 = impTokenToHandle(&resolvedToken, nullptr, false);
if (op2 == nullptr)
{ // compDonotInline()
return;
}
}
accessAllowedResult =
info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
op1 = impPopStack().val;
/* Pop the address and create the 'checked cast' helper call */
// At this point we expect typeRef to contain the token, op1 to contain the value being cast,
// and op2 to contain code that creates the type handle corresponding to typeRef
CASTCLASS:
{
GenTree* optTree = impOptimizeCastClassOrIsInst(op1, &resolvedToken, true);
if (optTree != nullptr)
{
impPushOnStack(optTree, tiRetVal);
}
else
{
#ifdef FEATURE_READYTORUN
if (opts.IsReadyToRun())
{
GenTreeCall* opLookup =
impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_CHKCAST, TYP_REF,
gtNewCallArgs(op1));
usingReadyToRunHelper = (opLookup != nullptr);
op1 = (usingReadyToRunHelper ? opLookup : op1);
if (!usingReadyToRunHelper)
{
// TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
// and the chkcastany call with a single call to a dynamic R2R cell that will:
// 1) Load the context
// 2) Perform the generic dictionary lookup and caching, and generate the appropriate
// stub
// 3) Check the object on the stack for the type-cast
// Reason: performance (today, we'll always use the slow helper for the R2R generics case)
op2 = impTokenToHandle(&resolvedToken, nullptr, false);
if (op2 == nullptr)
{ // compDonotInline()
return;
}
}
}
if (!usingReadyToRunHelper)
#endif
{
op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, true, opcodeOffs);
}
if (compDonotInline())
{
return;
}
/* Push the result back on the stack */
impPushOnStack(op1, tiRetVal);
}
}
break;
case CEE_THROW:
// Any block with a throw is rarely executed.
block->bbSetRunRarely();
// Pop the exception object and create the 'throw' helper call
op1 = gtNewHelperCallNode(CORINFO_HELP_THROW, TYP_VOID, gtNewCallArgs(impPopStack().val));
// Fall through to clear out the eval stack.
EVAL_APPEND:
if (verCurrentState.esStackDepth > 0)
{
impEvalSideEffects();
}
assert(verCurrentState.esStackDepth == 0);
goto APPEND;
case CEE_RETHROW:
assert(!compIsForInlining());
if (info.compXcptnsCount == 0)
{
BADCODE("rethrow outside catch");
}
/* Create the 'rethrow' helper call */
op1 = gtNewHelperCallNode(CORINFO_HELP_RETHROW, TYP_VOID);
goto EVAL_APPEND;
case CEE_INITOBJ:
assertImp(sz == sizeof(unsigned));
_impResolveToken(CORINFO_TOKENKIND_Class);
JITDUMP(" %08X", resolvedToken.token);
op2 = gtNewIconNode(0); // Value
op1 = impPopStack().val; // Dest
if (eeIsValueClass(resolvedToken.hClass))
{
op1 = gtNewStructVal(resolvedToken.hClass, op1);
if (op1->OperIs(GT_OBJ))
{
gtSetObjGcInfo(op1->AsObj());
}
}
else
{
size = info.compCompHnd->getClassSize(resolvedToken.hClass);
assert(size == TARGET_POINTER_SIZE);
op1 = gtNewBlockVal(op1, size);
}
op1 = gtNewBlkOpNode(op1, op2, (prefixFlags & PREFIX_VOLATILE) != 0, false);
goto SPILL_APPEND;
case CEE_INITBLK:
op3 = impPopStack().val; // Size
op2 = impPopStack().val; // Value
op1 = impPopStack().val; // Dst addr
if (op3->IsCnsIntOrI())
{
size = (unsigned)op3->AsIntConCommon()->IconValue();
op1 = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, typGetBlkLayout(size));
op1 = gtNewBlkOpNode(op1, op2, (prefixFlags & PREFIX_VOLATILE) != 0, false);
}
else
{
if (!op2->IsIntegralConst(0))
{
op2 = gtNewOperNode(GT_INIT_VAL, TYP_INT, op2);
}
op1 = new (this, GT_STORE_DYN_BLK) GenTreeStoreDynBlk(op1, op2, op3);
size = 0;
if ((prefixFlags & PREFIX_VOLATILE) != 0)
{
op1->gtFlags |= GTF_BLK_VOLATILE;
}
}
goto SPILL_APPEND;
case CEE_CPBLK:
op3 = impPopStack().val; // Size
op2 = impPopStack().val; // Src addr
op1 = impPopStack().val; // Dst addr
if (op2->OperGet() == GT_ADDR)
{
op2 = op2->AsOp()->gtOp1;
}
else
{
op2 = gtNewOperNode(GT_IND, TYP_STRUCT, op2);
}
if (op3->IsCnsIntOrI())
{
size = (unsigned)op3->AsIntConCommon()->IconValue();
op1 = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, typGetBlkLayout(size));
op1 = gtNewBlkOpNode(op1, op2, (prefixFlags & PREFIX_VOLATILE) != 0, true);
}
else
{
op1 = new (this, GT_STORE_DYN_BLK) GenTreeStoreDynBlk(op1, op2, op3);
size = 0;
if ((prefixFlags & PREFIX_VOLATILE) != 0)
{
op1->gtFlags |= GTF_BLK_VOLATILE;
}
}
goto SPILL_APPEND;
case CEE_CPOBJ:
assertImp(sz == sizeof(unsigned));
_impResolveToken(CORINFO_TOKENKIND_Class);
JITDUMP(" %08X", resolvedToken.token);
if (!eeIsValueClass(resolvedToken.hClass))
{
op1 = impPopStack().val; // address to load from
impBashVarAddrsToI(op1);
assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
impPushOnStack(op1, typeInfo());
opcode = CEE_STIND_REF;
lclTyp = TYP_REF;
goto STIND;
}
op2 = impPopStack().val; // Src
op1 = impPopStack().val; // Dest
op1 = gtNewCpObjNode(op1, op2, resolvedToken.hClass, ((prefixFlags & PREFIX_VOLATILE) != 0));
goto SPILL_APPEND;
case CEE_STOBJ:
{
assertImp(sz == sizeof(unsigned));
_impResolveToken(CORINFO_TOKENKIND_Class);
JITDUMP(" %08X", resolvedToken.token);
if (eeIsValueClass(resolvedToken.hClass))
{
lclTyp = TYP_STRUCT;
}
else
{
lclTyp = TYP_REF;
}
if (lclTyp == TYP_REF)
{
opcode = CEE_STIND_REF;
goto STIND;
}
CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
if (impIsPrimitive(jitTyp))
{
lclTyp = JITtype2varType(jitTyp);
goto STIND;
}
op2 = impPopStack().val; // Value
op1 = impPopStack().val; // Ptr
assertImp(varTypeIsStruct(op2));
op1 = impAssignStructPtr(op1, op2, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
if (op1->OperIsBlkOp() && (prefixFlags & PREFIX_UNALIGNED))
{
op1->gtFlags |= GTF_BLK_UNALIGNED;
}
goto SPILL_APPEND;
}
case CEE_MKREFANY:
assert(!compIsForInlining());
// Being lazy here. Refanys are tricky in terms of gc tracking.
// Since it is uncommon, just don't perform struct promotion in any method that contains mkrefany.
JITDUMP("disabling struct promotion because of mkrefany\n");
fgNoStructPromotion = true;
oper = GT_MKREFANY;
assertImp(sz == sizeof(unsigned));
_impResolveToken(CORINFO_TOKENKIND_Class);
JITDUMP(" %08X", resolvedToken.token);
op2 = impTokenToHandle(&resolvedToken, nullptr, true);
if (op2 == nullptr)
{ // compDonotInline()
return;
}
accessAllowedResult =
info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
op1 = impPopStack().val;
// @SPECVIOLATION: TYP_INT should not be allowed here by a strict reading of the spec.
// But JIT32 allowed it, so we continue to allow it.
assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL || op1->TypeGet() == TYP_INT);
// MKREFANY returns a struct. op2 is the class token.
op1 = gtNewOperNode(oper, TYP_STRUCT, op1, op2);
impPushOnStack(op1, verMakeTypeInfo(impGetRefAnyClass()));
break;
case CEE_LDOBJ:
{
oper = GT_OBJ;
assertImp(sz == sizeof(unsigned));
_impResolveToken(CORINFO_TOKENKIND_Class);
JITDUMP(" %08X", resolvedToken.token);
OBJ:
tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
if (eeIsValueClass(resolvedToken.hClass))
{
lclTyp = TYP_STRUCT;
}
else
{
lclTyp = TYP_REF;
opcode = CEE_LDIND_REF;
goto LDIND;
}
op1 = impPopStack().val;
assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL);
CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
if (impIsPrimitive(jitTyp))
{
op1 = gtNewOperNode(GT_IND, JITtype2varType(jitTyp), op1);
// Could point anywhere, example a boxed class static int
op1->gtFlags |= GTF_IND_TGTANYWHERE | GTF_GLOB_REF;
assertImp(varTypeIsArithmetic(op1->gtType));
}
else
{
// OBJ returns a struct
// and an inline argument which is the class token of the loaded obj
op1 = gtNewObjNode(resolvedToken.hClass, op1);
}
op1->gtFlags |= GTF_EXCEPT;
if (prefixFlags & PREFIX_UNALIGNED)
{
op1->gtFlags |= GTF_IND_UNALIGNED;
}
impPushOnStack(op1, tiRetVal);
break;
}
case CEE_LDLEN:
op1 = impPopStack().val;
if (opts.OptimizationEnabled())
{
/* Use GT_ARR_LENGTH operator so rng check opts see this */
GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, OFFSETOF__CORINFO_Array__length, block);
op1 = arrLen;
}
else
{
/* Create the expression "*(array_addr + ArrLenOffs)" */
op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
gtNewIconNode(OFFSETOF__CORINFO_Array__length, TYP_I_IMPL));
op1 = gtNewIndir(TYP_INT, op1);
}
/* Push the result back on the stack */
impPushOnStack(op1, tiRetVal);
break;
case CEE_BREAK:
op1 = gtNewHelperCallNode(CORINFO_HELP_USER_BREAKPOINT, TYP_VOID);
goto SPILL_APPEND;
case CEE_NOP:
if (opts.compDbgCode)
{
op1 = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
goto SPILL_APPEND;
}
break;
/******************************** NYI *******************************/
case 0xCC:
OutputDebugStringA("CLR: Invalid x86 breakpoint in IL stream\n");
FALLTHROUGH;
case CEE_ILLEGAL:
case CEE_MACRO_END:
default:
if (compIsForInlining())
{
compInlineResult->NoteFatal(InlineObservation::CALLEE_COMPILATION_ERROR);
return;
}
BADCODE3("unknown opcode", ": %02X", (int)opcode);
}
codeAddr += sz;
prevOpcode = opcode;
prefixFlags = 0;
}
return;
#undef _impResolveToken
}
#ifdef _PREFAST_
#pragma warning(pop)
#endif
// Push a local/argument treeon the operand stack
void Compiler::impPushVar(GenTree* op, typeInfo tiRetVal)
{
tiRetVal.NormaliseForStack();
if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init) && tiRetVal.IsThisPtr())
{
tiRetVal.SetUninitialisedObjRef();
}
impPushOnStack(op, tiRetVal);
}
//------------------------------------------------------------------------
// impCreateLocal: create a GT_LCL_VAR node to access a local that might need to be normalized on load
//
// Arguments:
// lclNum -- The index into lvaTable
// offset -- The offset to associate with the node
//
// Returns:
// The node
//
GenTreeLclVar* Compiler::impCreateLocalNode(unsigned lclNum DEBUGARG(IL_OFFSET offset))
{
var_types lclTyp;
if (lvaTable[lclNum].lvNormalizeOnLoad())
{
lclTyp = lvaGetRealType(lclNum);
}
else
{
lclTyp = lvaGetActualType(lclNum);
}
return gtNewLclvNode(lclNum, lclTyp DEBUGARG(offset));
}
// Load a local/argument on the operand stack
// lclNum is an index into lvaTable *NOT* the arg/lcl index in the IL
void Compiler::impLoadVar(unsigned lclNum, IL_OFFSET offset, const typeInfo& tiRetVal)
{
impPushVar(impCreateLocalNode(lclNum DEBUGARG(offset)), tiRetVal);
}
// Load an argument on the operand stack
// Shared by the various CEE_LDARG opcodes
// ilArgNum is the argument index as specified in IL.
// It will be mapped to the correct lvaTable index
void Compiler::impLoadArg(unsigned ilArgNum, IL_OFFSET offset)
{
Verify(ilArgNum < info.compILargsCount, "bad arg num");
if (compIsForInlining())
{
if (ilArgNum >= info.compArgsCount)
{
compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_ARGUMENT_NUMBER);
return;
}
impPushVar(impInlineFetchArg(ilArgNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo),
impInlineInfo->lclVarInfo[ilArgNum].lclVerTypeInfo);
}
else
{
if (ilArgNum >= info.compArgsCount)
{
BADCODE("Bad IL");
}
unsigned lclNum = compMapILargNum(ilArgNum); // account for possible hidden param
if (lclNum == info.compThisArg)
{
lclNum = lvaArg0Var;
}
impLoadVar(lclNum, offset);
}
}
// Load a local on the operand stack
// Shared by the various CEE_LDLOC opcodes
// ilLclNum is the local index as specified in IL.
// It will be mapped to the correct lvaTable index
void Compiler::impLoadLoc(unsigned ilLclNum, IL_OFFSET offset)
{
if (compIsForInlining())
{
if (ilLclNum >= info.compMethodInfo->locals.numArgs)
{
compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_LOCAL_NUMBER);
return;
}
// Get the local type
var_types lclTyp = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclTypeInfo;
typeInfo tiRetVal = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclVerTypeInfo;
/* Have we allocated a temp for this local? */
unsigned lclNum = impInlineFetchLocal(ilLclNum DEBUGARG("Inline ldloc first use temp"));
// All vars of inlined methods should be !lvNormalizeOnLoad()
assert(!lvaTable[lclNum].lvNormalizeOnLoad());
lclTyp = genActualType(lclTyp);
impPushVar(gtNewLclvNode(lclNum, lclTyp), tiRetVal);
}
else
{
if (ilLclNum >= info.compMethodInfo->locals.numArgs)
{
BADCODE("Bad IL");
}
unsigned lclNum = info.compArgsCount + ilLclNum;
impLoadVar(lclNum, offset);
}
}
#ifdef TARGET_ARM
/**************************************************************************************
*
* When assigning a vararg call src to a HFA lcl dest, mark that we cannot promote the
* dst struct, because struct promotion will turn it into a float/double variable while
* the rhs will be an int/long variable. We don't code generate assignment of int into
* a float, but there is nothing that might prevent us from doing so. The tree however
* would like: (=, (typ_float, typ_int)) or (GT_TRANSFER, (typ_float, typ_int))
*
* tmpNum - the lcl dst variable num that is a struct.
* src - the src tree assigned to the dest that is a struct/int (when varargs call.)
* hClass - the type handle for the struct variable.
*
* TODO-ARM-CQ: [301608] This is a rare scenario with varargs and struct promotion coming into play,
* however, we could do a codegen of transferring from int to float registers
* (transfer, not a cast.)
*
*/
void Compiler::impMarkLclDstNotPromotable(unsigned tmpNum, GenTree* src, CORINFO_CLASS_HANDLE hClass)
{
if (src->gtOper == GT_CALL && src->AsCall()->IsVarargs() && IsHfa(hClass))
{
int hfaSlots = GetHfaCount(hClass);
var_types hfaType = GetHfaType(hClass);
// If we have varargs we morph the method's return type to be "int" irrespective of its original
// type: struct/float at importer because the ABI calls out return in integer registers.
// We don't want struct promotion to replace an expression like this:
// lclFld_int = callvar_int() into lclFld_float = callvar_int();
// This means an int is getting assigned to a float without a cast. Prevent the promotion.
if ((hfaType == TYP_DOUBLE && hfaSlots == sizeof(double) / REGSIZE_BYTES) ||
(hfaType == TYP_FLOAT && hfaSlots == sizeof(float) / REGSIZE_BYTES))
{
// Make sure this struct type stays as struct so we can receive the call in a struct.
lvaTable[tmpNum].lvIsMultiRegRet = true;
}
}
}
#endif // TARGET_ARM
#if FEATURE_MULTIREG_RET
//------------------------------------------------------------------------
// impAssignMultiRegTypeToVar: ensure calls that return structs in multiple
// registers return values to suitable temps.
//
// Arguments:
// op -- call returning a struct in registers
// hClass -- class handle for struct
//
// Returns:
// Tree with reference to struct local to use as call return value.
GenTree* Compiler::impAssignMultiRegTypeToVar(GenTree* op,
CORINFO_CLASS_HANDLE hClass DEBUGARG(CorInfoCallConvExtension callConv))
{
unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for multireg return"));
impAssignTempGen(tmpNum, op, hClass, (unsigned)CHECK_SPILL_ALL);
GenTree* ret = gtNewLclvNode(tmpNum, lvaTable[tmpNum].lvType);
// TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
ret->gtFlags |= GTF_DONT_CSE;
assert(IsMultiRegReturnedType(hClass, callConv));
// Mark the var so that fields are not promoted and stay together.
lvaTable[tmpNum].lvIsMultiRegRet = true;
return ret;
}
#endif // FEATURE_MULTIREG_RET
//------------------------------------------------------------------------
// impReturnInstruction: import a return or an explicit tail call
//
// Arguments:
// prefixFlags -- active IL prefixes
// opcode -- [in, out] IL opcode
//
// Returns:
// True if import was successful (may fail for some inlinees)
//
bool Compiler::impReturnInstruction(int prefixFlags, OPCODE& opcode)
{
const bool isTailCall = (prefixFlags & PREFIX_TAILCALL) != 0;
#ifdef DEBUG
// If we are importing an inlinee and have GC ref locals we always
// need to have a spill temp for the return value. This temp
// should have been set up in advance, over in fgFindBasicBlocks.
if (compIsForInlining() && impInlineInfo->HasGcRefLocals() && (info.compRetType != TYP_VOID))
{
assert(lvaInlineeReturnSpillTemp != BAD_VAR_NUM);
}
#endif // DEBUG
GenTree* op2 = nullptr;
GenTree* op1 = nullptr;
CORINFO_CLASS_HANDLE retClsHnd = nullptr;
if (info.compRetType != TYP_VOID)
{
StackEntry se = impPopStack();
retClsHnd = se.seTypeInfo.GetClassHandle();
op2 = se.val;
if (!compIsForInlining())
{
impBashVarAddrsToI(op2);
op2 = impImplicitIorI4Cast(op2, info.compRetType);
op2 = impImplicitR4orR8Cast(op2, info.compRetType);
// Note that we allow TYP_I_IMPL<->TYP_BYREF transformation, but only TYP_I_IMPL<-TYP_REF.
assertImp((genActualType(op2->TypeGet()) == genActualType(info.compRetType)) ||
((op2->TypeGet() == TYP_I_IMPL) && TypeIs(info.compRetType, TYP_BYREF)) ||
(op2->TypeIs(TYP_BYREF, TYP_REF) && (info.compRetType == TYP_I_IMPL)) ||
(varTypeIsFloating(op2->gtType) && varTypeIsFloating(info.compRetType)) ||
(varTypeIsStruct(op2) && varTypeIsStruct(info.compRetType)));
#ifdef DEBUG
if (!isTailCall && opts.compGcChecks && (info.compRetType == TYP_REF))
{
// DDB 3483 : JIT Stress: early termination of GC ref's life time in exception code path
// VSW 440513: Incorrect gcinfo on the return value under COMPlus_JitGCChecks=1 for methods with
// one-return BB.
assert(op2->gtType == TYP_REF);
// confirm that the argument is a GC pointer (for debugging (GC stress))
GenTreeCall::Use* args = gtNewCallArgs(op2);
op2 = gtNewHelperCallNode(CORINFO_HELP_CHECK_OBJ, TYP_REF, args);
if (verbose)
{
printf("\ncompGcChecks tree:\n");
gtDispTree(op2);
}
}
#endif
}
else
{
if (verCurrentState.esStackDepth != 0)
{
assert(compIsForInlining());
JITDUMP("CALLSITE_COMPILATION_ERROR: inlinee's stack is not empty.");
compInlineResult->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR);
return false;
}
#ifdef DEBUG
if (verbose)
{
printf("\n\n Inlinee Return expression (before normalization) =>\n");
gtDispTree(op2);
}
#endif
// Make sure the type matches the original call.
var_types returnType = genActualType(op2->gtType);
var_types originalCallType = impInlineInfo->inlineCandidateInfo->fncRetType;
if ((returnType != originalCallType) && (originalCallType == TYP_STRUCT))
{
originalCallType = impNormStructType(impInlineInfo->inlineCandidateInfo->methInfo.args.retTypeClass);
}
if (returnType != originalCallType)
{
// Allow TYP_BYREF to be returned as TYP_I_IMPL and vice versa.
// Allow TYP_REF to be returned as TYP_I_IMPL and NOT vice verse.
if ((TypeIs(returnType, TYP_BYREF, TYP_REF) && (originalCallType == TYP_I_IMPL)) ||
((returnType == TYP_I_IMPL) && TypeIs(originalCallType, TYP_BYREF)))
{
JITDUMP("Allowing return type mismatch: have %s, needed %s\n", varTypeName(returnType),
varTypeName(originalCallType));
}
else
{
JITDUMP("Return type mismatch: have %s, needed %s\n", varTypeName(returnType),
varTypeName(originalCallType));
compInlineResult->NoteFatal(InlineObservation::CALLSITE_RETURN_TYPE_MISMATCH);
return false;
}
}
// Below, we are going to set impInlineInfo->retExpr to the tree with the return
// expression. At this point, retExpr could already be set if there are multiple
// return blocks (meaning fgNeedReturnSpillTemp() == true) and one of
// the other blocks already set it. If there is only a single return block,
// retExpr shouldn't be set. However, this is not true if we reimport a block
// with a return. In that case, retExpr will be set, then the block will be
// reimported, but retExpr won't get cleared as part of setting the block to
// be reimported. The reimported retExpr value should be the same, so even if
// we don't unconditionally overwrite it, it shouldn't matter.
if (info.compRetNativeType != TYP_STRUCT)
{
// compRetNativeType is not TYP_STRUCT.
// This implies it could be either a scalar type or SIMD vector type or
// a struct type that can be normalized to a scalar type.
if (varTypeIsStruct(info.compRetType))
{
noway_assert(info.compRetBuffArg == BAD_VAR_NUM);
// adjust the type away from struct to integral
// and no normalizing
op2 = impFixupStructReturnType(op2, retClsHnd, info.compCallConv);
}
else
{
// Do we have to normalize?
var_types fncRealRetType = JITtype2varType(info.compMethodInfo->args.retType);
if ((varTypeIsSmall(op2->TypeGet()) || varTypeIsSmall(fncRealRetType)) &&
fgCastNeeded(op2, fncRealRetType))
{
// Small-typed return values are normalized by the callee
op2 = gtNewCastNode(TYP_INT, op2, false, fncRealRetType);
}
}
if (fgNeedReturnSpillTemp())
{
assert(info.compRetNativeType != TYP_VOID &&
(fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals()));
// If this method returns a ref type, track the actual types seen
// in the returns.
if (info.compRetType == TYP_REF)
{
bool isExact = false;
bool isNonNull = false;
CORINFO_CLASS_HANDLE returnClsHnd = gtGetClassHandle(op2, &isExact, &isNonNull);
if (impInlineInfo->retExpr == nullptr)
{
// This is the first return, so best known type is the type
// of this return value.
impInlineInfo->retExprClassHnd = returnClsHnd;
impInlineInfo->retExprClassHndIsExact = isExact;
}
else if (impInlineInfo->retExprClassHnd != returnClsHnd)
{
// This return site type differs from earlier seen sites,
// so reset the info and we'll fall back to using the method's
// declared return type for the return spill temp.
impInlineInfo->retExprClassHnd = nullptr;
impInlineInfo->retExprClassHndIsExact = false;
}
}
impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
(unsigned)CHECK_SPILL_ALL);
var_types lclRetType = lvaGetDesc(lvaInlineeReturnSpillTemp)->lvType;
GenTree* tmpOp2 = gtNewLclvNode(lvaInlineeReturnSpillTemp, lclRetType);
op2 = tmpOp2;
#ifdef DEBUG
if (impInlineInfo->retExpr)
{
// Some other block(s) have seen the CEE_RET first.
// Better they spilled to the same temp.
assert(impInlineInfo->retExpr->gtOper == GT_LCL_VAR);
assert(impInlineInfo->retExpr->AsLclVarCommon()->GetLclNum() ==
op2->AsLclVarCommon()->GetLclNum());
}
#endif
}
#ifdef DEBUG
if (verbose)
{
printf("\n\n Inlinee Return expression (after normalization) =>\n");
gtDispTree(op2);
}
#endif
// Report the return expression
impInlineInfo->retExpr = op2;
}
else
{
// compRetNativeType is TYP_STRUCT.
// This implies that struct return via RetBuf arg or multi-reg struct return
GenTreeCall* iciCall = impInlineInfo->iciCall->AsCall();
// Assign the inlinee return into a spill temp.
// spill temp only exists if there are multiple return points
if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
{
// in this case we have to insert multiple struct copies to the temp
// and the retexpr is just the temp.
assert(info.compRetNativeType != TYP_VOID);
assert(fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals());
impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
(unsigned)CHECK_SPILL_ALL);
}
#if defined(TARGET_ARM) || defined(UNIX_AMD64_ABI)
#if defined(TARGET_ARM)
// TODO-ARM64-NYI: HFA
// TODO-AMD64-Unix and TODO-ARM once the ARM64 functionality is implemented the
// next ifdefs could be refactored in a single method with the ifdef inside.
if (IsHfa(retClsHnd))
{
// Same as !IsHfa but just don't bother with impAssignStructPtr.
#else // defined(UNIX_AMD64_ABI)
ReturnTypeDesc retTypeDesc;
retTypeDesc.InitializeStructReturnType(this, retClsHnd, info.compCallConv);
unsigned retRegCount = retTypeDesc.GetReturnRegCount();
if (retRegCount != 0)
{
// If single eightbyte, the return type would have been normalized and there won't be a temp var.
// This code will be called only if the struct return has not been normalized (i.e. 2 eightbytes -
// max allowed.)
assert(retRegCount == MAX_RET_REG_COUNT);
// Same as !structDesc.passedInRegisters but just don't bother with impAssignStructPtr.
CLANG_FORMAT_COMMENT_ANCHOR;
#endif // defined(UNIX_AMD64_ABI)
if (fgNeedReturnSpillTemp())
{
if (!impInlineInfo->retExpr)
{
#if defined(TARGET_ARM)
impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType);
#else // defined(UNIX_AMD64_ABI)
// The inlinee compiler has figured out the type of the temp already. Use it here.
impInlineInfo->retExpr =
gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
#endif // defined(UNIX_AMD64_ABI)
}
}
else
{
impInlineInfo->retExpr = op2;
}
}
else
#elif defined(TARGET_ARM64)
ReturnTypeDesc retTypeDesc;
retTypeDesc.InitializeStructReturnType(this, retClsHnd, info.compCallConv);
unsigned retRegCount = retTypeDesc.GetReturnRegCount();
if (retRegCount != 0)
{
assert(!iciCall->HasRetBufArg());
assert(retRegCount >= 2);
if (fgNeedReturnSpillTemp())
{
if (!impInlineInfo->retExpr)
{
// The inlinee compiler has figured out the type of the temp already. Use it here.
impInlineInfo->retExpr =
gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
}
}
else
{
impInlineInfo->retExpr = op2;
}
}
else
#elif defined(TARGET_X86)
ReturnTypeDesc retTypeDesc;
retTypeDesc.InitializeStructReturnType(this, retClsHnd, info.compCallConv);
unsigned retRegCount = retTypeDesc.GetReturnRegCount();
if (retRegCount != 0)
{
assert(!iciCall->HasRetBufArg());
assert(retRegCount == MAX_RET_REG_COUNT);
if (fgNeedReturnSpillTemp())
{
if (!impInlineInfo->retExpr)
{
// The inlinee compiler has figured out the type of the temp already. Use it here.
impInlineInfo->retExpr =
gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
}
}
else
{
impInlineInfo->retExpr = op2;
}
}
else
#endif // defined(TARGET_ARM64)
{
assert(iciCall->HasRetBufArg());
GenTree* dest = gtCloneExpr(iciCall->gtCallArgs->GetNode());
// spill temp only exists if there are multiple return points
if (fgNeedReturnSpillTemp())
{
// if this is the first return we have seen set the retExpr
if (!impInlineInfo->retExpr)
{
impInlineInfo->retExpr =
impAssignStructPtr(dest, gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType),
retClsHnd, (unsigned)CHECK_SPILL_ALL);
}
}
else
{
impInlineInfo->retExpr = impAssignStructPtr(dest, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
}
}
}
if (impInlineInfo->retExpr != nullptr)
{
impInlineInfo->retBB = compCurBB;
}
}
}
if (compIsForInlining())
{
return true;
}
if (info.compRetType == TYP_VOID)
{
// return void
op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
}
else if (info.compRetBuffArg != BAD_VAR_NUM)
{
// Assign value to return buff (first param)
GenTree* retBuffAddr =
gtNewLclvNode(info.compRetBuffArg, TYP_BYREF DEBUGARG(impCurStmtDI.GetLocation().GetOffset()));
op2 = impAssignStructPtr(retBuffAddr, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
impAppendTree(op2, (unsigned)CHECK_SPILL_NONE, impCurStmtDI);
// There are cases where the address of the implicit RetBuf should be returned explicitly (in RAX).
CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_AMD64)
// x64 (System V and Win64) calling convention requires to
// return the implicit return buffer explicitly (in RAX).
// Change the return type to be BYREF.
op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
#else // !defined(TARGET_AMD64)
// In case of non-AMD64 targets the profiler hook requires to return the implicit RetBuf explicitly (in RAX).
// In such case the return value of the function is changed to BYREF.
// If profiler hook is not needed the return type of the function is TYP_VOID.
if (compIsProfilerHookNeeded())
{
op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
}
#if defined(TARGET_ARM64)
// On ARM64, the native instance calling convention variant
// requires the implicit ByRef to be explicitly returned.
else if (TargetOS::IsWindows && callConvIsInstanceMethodCallConv(info.compCallConv))
{
op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
}
#endif
#if defined(TARGET_X86)
else if (info.compCallConv != CorInfoCallConvExtension::Managed)
{
op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
}
#endif
else
{
// return void
op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
}
#endif // !defined(TARGET_AMD64)
}
else if (varTypeIsStruct(info.compRetType))
{
#if !FEATURE_MULTIREG_RET
// For both ARM architectures the HFA native types are maintained as structs.
// Also on System V AMD64 the multireg structs returns are also left as structs.
noway_assert(info.compRetNativeType != TYP_STRUCT);
#endif
op2 = impFixupStructReturnType(op2, retClsHnd, info.compCallConv);
// return op2
var_types returnType = info.compRetType;
op1 = gtNewOperNode(GT_RETURN, genActualType(returnType), op2);
}
else
{
// return op2
op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetType), op2);
}
// We must have imported a tailcall and jumped to RET
if (isTailCall)
{
assert(verCurrentState.esStackDepth == 0 && impOpcodeIsCallOpcode(opcode));
opcode = CEE_RET; // To prevent trying to spill if CALL_SITE_BOUNDARIES
// impImportCall() would have already appended TYP_VOID calls
if (info.compRetType == TYP_VOID)
{
return true;
}
}
impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtDI);
#ifdef DEBUG
// Remember at which BC offset the tree was finished
impNoteLastILoffs();
#endif
return true;
}
/*****************************************************************************
* Mark the block as unimported.
* Note that the caller is responsible for calling impImportBlockPending(),
* with the appropriate stack-state
*/
inline void Compiler::impReimportMarkBlock(BasicBlock* block)
{
#ifdef DEBUG
if (verbose && (block->bbFlags & BBF_IMPORTED))
{
printf("\n" FMT_BB " will be reimported\n", block->bbNum);
}
#endif
block->bbFlags &= ~BBF_IMPORTED;
}
/*****************************************************************************
* Mark the successors of the given block as unimported.
* Note that the caller is responsible for calling impImportBlockPending()
* for all the successors, with the appropriate stack-state.
*/
void Compiler::impReimportMarkSuccessors(BasicBlock* block)
{
for (BasicBlock* const succBlock : block->Succs())
{
impReimportMarkBlock(succBlock);
}
}
/*****************************************************************************
*
* Filter wrapper to handle only passed in exception code
* from it).
*/
LONG FilterVerificationExceptions(PEXCEPTION_POINTERS pExceptionPointers, LPVOID lpvParam)
{
if (pExceptionPointers->ExceptionRecord->ExceptionCode == SEH_VERIFICATION_EXCEPTION)
{
return EXCEPTION_EXECUTE_HANDLER;
}
return EXCEPTION_CONTINUE_SEARCH;
}
void Compiler::impVerifyEHBlock(BasicBlock* block, bool isTryStart)
{
assert(block->hasTryIndex());
assert(!compIsForInlining());
unsigned tryIndex = block->getTryIndex();
EHblkDsc* HBtab = ehGetDsc(tryIndex);
if (isTryStart)
{
assert(block->bbFlags & BBF_TRY_BEG);
// The Stack must be empty
//
if (block->bbStkDepth != 0)
{
BADCODE("Evaluation stack must be empty on entry into a try block");
}
}
// Save the stack contents, we'll need to restore it later
//
SavedStack blockState;
impSaveStackState(&blockState, false);
while (HBtab != nullptr)
{
if (isTryStart)
{
// Are we verifying that an instance constructor properly initializes it's 'this' pointer once?
// We do not allow the 'this' pointer to be uninitialized when entering most kinds try regions
//
if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
{
// We trigger an invalid program exception here unless we have a try/fault region.
//
if (HBtab->HasCatchHandler() || HBtab->HasFinallyHandler() || HBtab->HasFilter())
{
BADCODE(
"The 'this' pointer of an instance constructor is not intialized upon entry to a try region");
}
else
{
// Allow a try/fault region to proceed.
assert(HBtab->HasFaultHandler());
}
}
}
// Recursively process the handler block, if we haven't already done so.
BasicBlock* hndBegBB = HBtab->ebdHndBeg;
if (((hndBegBB->bbFlags & BBF_IMPORTED) == 0) && (impGetPendingBlockMember(hndBegBB) == 0))
{
// Construct the proper verification stack state
// either empty or one that contains just
// the Exception Object that we are dealing with
//
verCurrentState.esStackDepth = 0;
if (handlerGetsXcptnObj(hndBegBB->bbCatchTyp))
{
CORINFO_CLASS_HANDLE clsHnd;
if (HBtab->HasFilter())
{
clsHnd = impGetObjectClass();
}
else
{
CORINFO_RESOLVED_TOKEN resolvedToken;
resolvedToken.tokenContext = impTokenLookupContextHandle;
resolvedToken.tokenScope = info.compScopeHnd;
resolvedToken.token = HBtab->ebdTyp;
resolvedToken.tokenType = CORINFO_TOKENKIND_Class;
info.compCompHnd->resolveToken(&resolvedToken);
clsHnd = resolvedToken.hClass;
}
// push catch arg the stack, spill to a temp if necessary
// Note: can update HBtab->ebdHndBeg!
hndBegBB = impPushCatchArgOnStack(hndBegBB, clsHnd, false);
}
// Queue up the handler for importing
//
impImportBlockPending(hndBegBB);
}
// Process the filter block, if we haven't already done so.
if (HBtab->HasFilter())
{
/* @VERIFICATION : Ideally the end of filter state should get
propagated to the catch handler, this is an incompleteness,
but is not a security/compliance issue, since the only
interesting state is the 'thisInit' state.
*/
BasicBlock* filterBB = HBtab->ebdFilter;
if (((filterBB->bbFlags & BBF_IMPORTED) == 0) && (impGetPendingBlockMember(filterBB) == 0))
{
verCurrentState.esStackDepth = 0;
// push catch arg the stack, spill to a temp if necessary
// Note: can update HBtab->ebdFilter!
const bool isSingleBlockFilter = (filterBB->bbNext == hndBegBB);
filterBB = impPushCatchArgOnStack(filterBB, impGetObjectClass(), isSingleBlockFilter);
impImportBlockPending(filterBB);
}
}
// This seems redundant ....??
if (verTrackObjCtorInitState && HBtab->HasFaultHandler())
{
/* Recursively process the handler block */
verCurrentState.esStackDepth = 0;
// Queue up the fault handler for importing
//
impImportBlockPending(HBtab->ebdHndBeg);
}
// Now process our enclosing try index (if any)
//
tryIndex = HBtab->ebdEnclosingTryIndex;
if (tryIndex == EHblkDsc::NO_ENCLOSING_INDEX)
{
HBtab = nullptr;
}
else
{
HBtab = ehGetDsc(tryIndex);
}
}
// Restore the stack contents
impRestoreStackState(&blockState);
}
//***************************************************************
// Import the instructions for the given basic block. Perform
// verification, throwing an exception on failure. Push any successor blocks that are enabled for the first
// time, or whose verification pre-state is changed.
#ifdef _PREFAST_
#pragma warning(push)
#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
void Compiler::impImportBlock(BasicBlock* block)
{
// BBF_INTERNAL blocks only exist during importation due to EH canonicalization. We need to
// handle them specially. In particular, there is no IL to import for them, but we do need
// to mark them as imported and put their successors on the pending import list.
if (block->bbFlags & BBF_INTERNAL)
{
JITDUMP("Marking BBF_INTERNAL block " FMT_BB " as BBF_IMPORTED\n", block->bbNum);
block->bbFlags |= BBF_IMPORTED;
for (BasicBlock* const succBlock : block->Succs())
{
impImportBlockPending(succBlock);
}
return;
}
bool markImport;
assert(block);
/* Make the block globaly available */
compCurBB = block;
#ifdef DEBUG
/* Initialize the debug variables */
impCurOpcName = "unknown";
impCurOpcOffs = block->bbCodeOffs;
#endif
/* Set the current stack state to the merged result */
verResetCurrentState(block, &verCurrentState);
/* Now walk the code and import the IL into GenTrees */
struct FilterVerificationExceptionsParam
{
Compiler* pThis;
BasicBlock* block;
};
FilterVerificationExceptionsParam param;
param.pThis = this;
param.block = block;
PAL_TRY(FilterVerificationExceptionsParam*, pParam, ¶m)
{
/* @VERIFICATION : For now, the only state propagation from try
to it's handler is "thisInit" state (stack is empty at start of try).
In general, for state that we track in verification, we need to
model the possibility that an exception might happen at any IL
instruction, so we really need to merge all states that obtain
between IL instructions in a try block into the start states of
all handlers.
However we do not allow the 'this' pointer to be uninitialized when
entering most kinds try regions (only try/fault are allowed to have
an uninitialized this pointer on entry to the try)
Fortunately, the stack is thrown away when an exception
leads to a handler, so we don't have to worry about that.
We DO, however, have to worry about the "thisInit" state.
But only for the try/fault case.
The only allowed transition is from TIS_Uninit to TIS_Init.
So for a try/fault region for the fault handler block
we will merge the start state of the try begin
and the post-state of each block that is part of this try region
*/
// merge the start state of the try begin
//
if (pParam->block->bbFlags & BBF_TRY_BEG)
{
pParam->pThis->impVerifyEHBlock(pParam->block, true);
}
pParam->pThis->impImportBlockCode(pParam->block);
// As discussed above:
// merge the post-state of each block that is part of this try region
//
if (pParam->block->hasTryIndex())
{
pParam->pThis->impVerifyEHBlock(pParam->block, false);
}
}
PAL_EXCEPT_FILTER(FilterVerificationExceptions)
{
verHandleVerificationFailure(block DEBUGARG(false));
}
PAL_ENDTRY
if (compDonotInline())
{
return;
}
assert(!compDonotInline());
markImport = false;
SPILLSTACK:
unsigned baseTmp = NO_BASE_TMP; // input temps assigned to successor blocks
bool reimportSpillClique = false;
BasicBlock* tgtBlock = nullptr;
/* If the stack is non-empty, we might have to spill its contents */
if (verCurrentState.esStackDepth != 0)
{
impBoxTemp = BAD_VAR_NUM; // if a box temp is used in a block that leaves something
// on the stack, its lifetime is hard to determine, simply
// don't reuse such temps.
Statement* addStmt = nullptr;
/* Do the successors of 'block' have any other predecessors ?
We do not want to do some of the optimizations related to multiRef
if we can reimport blocks */
unsigned multRef = impCanReimport ? unsigned(~0) : 0;
switch (block->bbJumpKind)
{
case BBJ_COND:
addStmt = impExtractLastStmt();
assert(addStmt->GetRootNode()->gtOper == GT_JTRUE);
/* Note if the next block has more than one ancestor */
multRef |= block->bbNext->bbRefs;
/* Does the next block have temps assigned? */
baseTmp = block->bbNext->bbStkTempsIn;
tgtBlock = block->bbNext;
if (baseTmp != NO_BASE_TMP)
{
break;
}
/* Try the target of the jump then */
multRef |= block->bbJumpDest->bbRefs;
baseTmp = block->bbJumpDest->bbStkTempsIn;
tgtBlock = block->bbJumpDest;
break;
case BBJ_ALWAYS:
multRef |= block->bbJumpDest->bbRefs;
baseTmp = block->bbJumpDest->bbStkTempsIn;
tgtBlock = block->bbJumpDest;
break;
case BBJ_NONE:
multRef |= block->bbNext->bbRefs;
baseTmp = block->bbNext->bbStkTempsIn;
tgtBlock = block->bbNext;
break;
case BBJ_SWITCH:
addStmt = impExtractLastStmt();
assert(addStmt->GetRootNode()->gtOper == GT_SWITCH);
for (BasicBlock* const tgtBlock : block->SwitchTargets())
{
multRef |= tgtBlock->bbRefs;
// Thanks to spill cliques, we should have assigned all or none
assert((baseTmp == NO_BASE_TMP) || (baseTmp == tgtBlock->bbStkTempsIn));
baseTmp = tgtBlock->bbStkTempsIn;
if (multRef > 1)
{
break;
}
}
break;
case BBJ_CALLFINALLY:
case BBJ_EHCATCHRET:
case BBJ_RETURN:
case BBJ_EHFINALLYRET:
case BBJ_EHFILTERRET:
case BBJ_THROW:
NO_WAY("can't have 'unreached' end of BB with non-empty stack");
break;
default:
noway_assert(!"Unexpected bbJumpKind");
break;
}
assert(multRef >= 1);
/* Do we have a base temp number? */
bool newTemps = (baseTmp == NO_BASE_TMP);
if (newTemps)
{
/* Grab enough temps for the whole stack */
baseTmp = impGetSpillTmpBase(block);
}
/* Spill all stack entries into temps */
unsigned level, tempNum;
JITDUMP("\nSpilling stack entries into temps\n");
for (level = 0, tempNum = baseTmp; level < verCurrentState.esStackDepth; level++, tempNum++)
{
GenTree* tree = verCurrentState.esStack[level].val;
/* VC generates code where it pushes a byref from one branch, and an int (ldc.i4 0) from
the other. This should merge to a byref in unverifiable code.
However, if the branch which leaves the TYP_I_IMPL on the stack is imported first, the
successor would be imported assuming there was a TYP_I_IMPL on
the stack. Thus the value would not get GC-tracked. Hence,
change the temp to TYP_BYREF and reimport the successors.
Note: We should only allow this in unverifiable code.
*/
if (tree->gtType == TYP_BYREF && lvaTable[tempNum].lvType == TYP_I_IMPL)
{
lvaTable[tempNum].lvType = TYP_BYREF;
impReimportMarkSuccessors(block);
markImport = true;
}
#ifdef TARGET_64BIT
if (genActualType(tree->gtType) == TYP_I_IMPL && lvaTable[tempNum].lvType == TYP_INT)
{
// Some other block in the spill clique set this to "int", but now we have "native int".
// Change the type and go back to re-import any blocks that used the wrong type.
lvaTable[tempNum].lvType = TYP_I_IMPL;
reimportSpillClique = true;
}
else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_I_IMPL)
{
// Spill clique has decided this should be "native int", but this block only pushes an "int".
// Insert a sign-extension to "native int" so we match the clique.
verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL);
}
// Consider the case where one branch left a 'byref' on the stack and the other leaves
// an 'int'. On 32-bit, this is allowed (in non-verifiable code) since they are the same
// size. JIT64 managed to make this work on 64-bit. For compatibility, we support JIT64
// behavior instead of asserting and then generating bad code (where we save/restore the
// low 32 bits of a byref pointer to an 'int' sized local). If the 'int' side has been
// imported already, we need to change the type of the local and reimport the spill clique.
// If the 'byref' side has imported, we insert a cast from int to 'native int' to match
// the 'byref' size.
if (genActualType(tree->gtType) == TYP_BYREF && lvaTable[tempNum].lvType == TYP_INT)
{
// Some other block in the spill clique set this to "int", but now we have "byref".
// Change the type and go back to re-import any blocks that used the wrong type.
lvaTable[tempNum].lvType = TYP_BYREF;
reimportSpillClique = true;
}
else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_BYREF)
{
// Spill clique has decided this should be "byref", but this block only pushes an "int".
// Insert a sign-extension to "native int" so we match the clique size.
verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL);
}
#endif // TARGET_64BIT
if (tree->gtType == TYP_DOUBLE && lvaTable[tempNum].lvType == TYP_FLOAT)
{
// Some other block in the spill clique set this to "float", but now we have "double".
// Change the type and go back to re-import any blocks that used the wrong type.
lvaTable[tempNum].lvType = TYP_DOUBLE;
reimportSpillClique = true;
}
else if (tree->gtType == TYP_FLOAT && lvaTable[tempNum].lvType == TYP_DOUBLE)
{
// Spill clique has decided this should be "double", but this block only pushes a "float".
// Insert a cast to "double" so we match the clique.
verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, false, TYP_DOUBLE);
}
/* If addStmt has a reference to tempNum (can only happen if we
are spilling to the temps already used by a previous block),
we need to spill addStmt */
if (addStmt != nullptr && !newTemps && gtHasRef(addStmt->GetRootNode(), tempNum))
{
GenTree* addTree = addStmt->GetRootNode();
if (addTree->gtOper == GT_JTRUE)
{
GenTree* relOp = addTree->AsOp()->gtOp1;
assert(relOp->OperIsCompare());
var_types type = genActualType(relOp->AsOp()->gtOp1->TypeGet());
if (gtHasRef(relOp->AsOp()->gtOp1, tempNum))
{
unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op1"));
impAssignTempGen(temp, relOp->AsOp()->gtOp1, level);
type = genActualType(lvaTable[temp].TypeGet());
relOp->AsOp()->gtOp1 = gtNewLclvNode(temp, type);
}
if (gtHasRef(relOp->AsOp()->gtOp2, tempNum))
{
unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op2"));
impAssignTempGen(temp, relOp->AsOp()->gtOp2, level);
type = genActualType(lvaTable[temp].TypeGet());
relOp->AsOp()->gtOp2 = gtNewLclvNode(temp, type);
}
}
else
{
assert(addTree->gtOper == GT_SWITCH && genActualTypeIsIntOrI(addTree->AsOp()->gtOp1->TypeGet()));
unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt SWITCH"));
impAssignTempGen(temp, addTree->AsOp()->gtOp1, level);
addTree->AsOp()->gtOp1 = gtNewLclvNode(temp, genActualType(addTree->AsOp()->gtOp1->TypeGet()));
}
}
/* Spill the stack entry, and replace with the temp */
if (!impSpillStackEntry(level, tempNum
#ifdef DEBUG
,
true, "Spill Stack Entry"
#endif
))
{
if (markImport)
{
BADCODE("bad stack state");
}
// Oops. Something went wrong when spilling. Bad code.
verHandleVerificationFailure(block DEBUGARG(true));
goto SPILLSTACK;
}
}
/* Put back the 'jtrue'/'switch' if we removed it earlier */
if (addStmt != nullptr)
{
impAppendStmt(addStmt, (unsigned)CHECK_SPILL_NONE);
}
}
// Some of the append/spill logic works on compCurBB
assert(compCurBB == block);
/* Save the tree list in the block */
impEndTreeList(block);
// impEndTreeList sets BBF_IMPORTED on the block
// We do *NOT* want to set it later than this because
// impReimportSpillClique might clear it if this block is both a
// predecessor and successor in the current spill clique
assert(block->bbFlags & BBF_IMPORTED);
// If we had a int/native int, or float/double collision, we need to re-import
if (reimportSpillClique)
{
// This will re-import all the successors of block (as well as each of their predecessors)
impReimportSpillClique(block);
// For blocks that haven't been imported yet, we still need to mark them as pending import.
for (BasicBlock* const succ : block->Succs())
{
if ((succ->bbFlags & BBF_IMPORTED) == 0)
{
impImportBlockPending(succ);
}
}
}
else // the normal case
{
// otherwise just import the successors of block
/* Does this block jump to any other blocks? */
for (BasicBlock* const succ : block->Succs())
{
impImportBlockPending(succ);
}
}
}
#ifdef _PREFAST_
#pragma warning(pop)
#endif
/*****************************************************************************/
//
// Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
// necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
// impPendingBlockMembers). Merges the current verification state into the verification state of "block"
// (its "pre-state").
void Compiler::impImportBlockPending(BasicBlock* block)
{
#ifdef DEBUG
if (verbose)
{
printf("\nimpImportBlockPending for " FMT_BB "\n", block->bbNum);
}
#endif
// We will add a block to the pending set if it has not already been imported (or needs to be re-imported),
// or if it has, but merging in a predecessor's post-state changes the block's pre-state.
// (When we're doing verification, we always attempt the merge to detect verification errors.)
// If the block has not been imported, add to pending set.
bool addToPending = ((block->bbFlags & BBF_IMPORTED) == 0);
// Initialize bbEntryState just the first time we try to add this block to the pending list
// Just because bbEntryState is NULL, doesn't mean the pre-state wasn't previously set
// We use NULL to indicate the 'common' state to avoid memory allocation
if ((block->bbEntryState == nullptr) && ((block->bbFlags & (BBF_IMPORTED | BBF_FAILED_VERIFICATION)) == 0) &&
(impGetPendingBlockMember(block) == 0))
{
verInitBBEntryState(block, &verCurrentState);
assert(block->bbStkDepth == 0);
block->bbStkDepth = static_cast<unsigned short>(verCurrentState.esStackDepth);
assert(addToPending);
assert(impGetPendingBlockMember(block) == 0);
}
else
{
// The stack should have the same height on entry to the block from all its predecessors.
if (block->bbStkDepth != verCurrentState.esStackDepth)
{
#ifdef DEBUG
char buffer[400];
sprintf_s(buffer, sizeof(buffer),
"Block at offset %4.4x to %4.4x in %0.200s entered with different stack depths.\n"
"Previous depth was %d, current depth is %d",
block->bbCodeOffs, block->bbCodeOffsEnd, info.compFullName, block->bbStkDepth,
verCurrentState.esStackDepth);
buffer[400 - 1] = 0;
NO_WAY(buffer);
#else
NO_WAY("Block entered with different stack depths");
#endif
}
if (!addToPending)
{
return;
}
if (block->bbStkDepth > 0)
{
// We need to fix the types of any spill temps that might have changed:
// int->native int, float->double, int->byref, etc.
impRetypeEntryStateTemps(block);
}
// OK, we must add to the pending list, if it's not already in it.
if (impGetPendingBlockMember(block) != 0)
{
return;
}
}
// Get an entry to add to the pending list
PendingDsc* dsc;
if (impPendingFree)
{
// We can reuse one of the freed up dscs.
dsc = impPendingFree;
impPendingFree = dsc->pdNext;
}
else
{
// We have to create a new dsc
dsc = new (this, CMK_Unknown) PendingDsc;
}
dsc->pdBB = block;
dsc->pdSavedStack.ssDepth = verCurrentState.esStackDepth;
dsc->pdThisPtrInit = verCurrentState.thisInitialized;
// Save the stack trees for later
if (verCurrentState.esStackDepth)
{
impSaveStackState(&dsc->pdSavedStack, false);
}
// Add the entry to the pending list
dsc->pdNext = impPendingList;
impPendingList = dsc;
impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
// Various assertions require us to now to consider the block as not imported (at least for
// the final time...)
block->bbFlags &= ~BBF_IMPORTED;
#ifdef DEBUG
if (verbose && 0)
{
printf("Added PendingDsc - %08p for " FMT_BB "\n", dspPtr(dsc), block->bbNum);
}
#endif
}
/*****************************************************************************/
//
// Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
// necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
// impPendingBlockMembers). Does *NOT* change the existing "pre-state" of the block.
void Compiler::impReimportBlockPending(BasicBlock* block)
{
JITDUMP("\nimpReimportBlockPending for " FMT_BB, block->bbNum);
assert(block->bbFlags & BBF_IMPORTED);
// OK, we must add to the pending list, if it's not already in it.
if (impGetPendingBlockMember(block) != 0)
{
return;
}
// Get an entry to add to the pending list
PendingDsc* dsc;
if (impPendingFree)
{
// We can reuse one of the freed up dscs.
dsc = impPendingFree;
impPendingFree = dsc->pdNext;
}
else
{
// We have to create a new dsc
dsc = new (this, CMK_ImpStack) PendingDsc;
}
dsc->pdBB = block;
if (block->bbEntryState)
{
dsc->pdThisPtrInit = block->bbEntryState->thisInitialized;
dsc->pdSavedStack.ssDepth = block->bbEntryState->esStackDepth;
dsc->pdSavedStack.ssTrees = block->bbEntryState->esStack;
}
else
{
dsc->pdThisPtrInit = TIS_Bottom;
dsc->pdSavedStack.ssDepth = 0;
dsc->pdSavedStack.ssTrees = nullptr;
}
// Add the entry to the pending list
dsc->pdNext = impPendingList;
impPendingList = dsc;
impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
// Various assertions require us to now to consider the block as not imported (at least for
// the final time...)
block->bbFlags &= ~BBF_IMPORTED;
#ifdef DEBUG
if (verbose && 0)
{
printf("Added PendingDsc - %08p for " FMT_BB "\n", dspPtr(dsc), block->bbNum);
}
#endif
}
void* Compiler::BlockListNode::operator new(size_t sz, Compiler* comp)
{
if (comp->impBlockListNodeFreeList == nullptr)
{
return comp->getAllocator(CMK_BasicBlock).allocate<BlockListNode>(1);
}
else
{
BlockListNode* res = comp->impBlockListNodeFreeList;
comp->impBlockListNodeFreeList = res->m_next;
return res;
}
}
void Compiler::FreeBlockListNode(Compiler::BlockListNode* node)
{
node->m_next = impBlockListNodeFreeList;
impBlockListNodeFreeList = node;
}
void Compiler::impWalkSpillCliqueFromPred(BasicBlock* block, SpillCliqueWalker* callback)
{
bool toDo = true;
noway_assert(!fgComputePredsDone);
if (!fgCheapPredsValid)
{
fgComputeCheapPreds();
}
BlockListNode* succCliqueToDo = nullptr;
BlockListNode* predCliqueToDo = new (this) BlockListNode(block);
while (toDo)
{
toDo = false;
// Look at the successors of every member of the predecessor to-do list.
while (predCliqueToDo != nullptr)
{
BlockListNode* node = predCliqueToDo;
predCliqueToDo = node->m_next;
BasicBlock* blk = node->m_blk;
FreeBlockListNode(node);
for (BasicBlock* const succ : blk->Succs())
{
// If it's not already in the clique, add it, and also add it
// as a member of the successor "toDo" set.
if (impSpillCliqueGetMember(SpillCliqueSucc, succ) == 0)
{
callback->Visit(SpillCliqueSucc, succ);
impSpillCliqueSetMember(SpillCliqueSucc, succ, 1);
succCliqueToDo = new (this) BlockListNode(succ, succCliqueToDo);
toDo = true;
}
}
}
// Look at the predecessors of every member of the successor to-do list.
while (succCliqueToDo != nullptr)
{
BlockListNode* node = succCliqueToDo;
succCliqueToDo = node->m_next;
BasicBlock* blk = node->m_blk;
FreeBlockListNode(node);
for (BasicBlockList* pred = blk->bbCheapPreds; pred != nullptr; pred = pred->next)
{
BasicBlock* predBlock = pred->block;
// If it's not already in the clique, add it, and also add it
// as a member of the predecessor "toDo" set.
if (impSpillCliqueGetMember(SpillCliquePred, predBlock) == 0)
{
callback->Visit(SpillCliquePred, predBlock);
impSpillCliqueSetMember(SpillCliquePred, predBlock, 1);
predCliqueToDo = new (this) BlockListNode(predBlock, predCliqueToDo);
toDo = true;
}
}
}
}
// If this fails, it means we didn't walk the spill clique properly and somehow managed
// miss walking back to include the predecessor we started from.
// This most likely cause: missing or out of date bbPreds
assert(impSpillCliqueGetMember(SpillCliquePred, block) != 0);
}
void Compiler::SetSpillTempsBase::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
{
if (predOrSucc == SpillCliqueSucc)
{
assert(blk->bbStkTempsIn == NO_BASE_TMP); // Should not already be a member of a clique as a successor.
blk->bbStkTempsIn = m_baseTmp;
}
else
{
assert(predOrSucc == SpillCliquePred);
assert(blk->bbStkTempsOut == NO_BASE_TMP); // Should not already be a member of a clique as a predecessor.
blk->bbStkTempsOut = m_baseTmp;
}
}
void Compiler::ReimportSpillClique::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
{
// For Preds we could be a little smarter and just find the existing store
// and re-type it/add a cast, but that is complicated and hopefully very rare, so
// just re-import the whole block (just like we do for successors)
if (((blk->bbFlags & BBF_IMPORTED) == 0) && (m_pComp->impGetPendingBlockMember(blk) == 0))
{
// If we haven't imported this block and we're not going to (because it isn't on
// the pending list) then just ignore it for now.
// This block has either never been imported (EntryState == NULL) or it failed
// verification. Neither state requires us to force it to be imported now.
assert((blk->bbEntryState == nullptr) || (blk->bbFlags & BBF_FAILED_VERIFICATION));
return;
}
// For successors we have a valid verCurrentState, so just mark them for reimport
// the 'normal' way
// Unlike predecessors, we *DO* need to reimport the current block because the
// initial import had the wrong entry state types.
// Similarly, blocks that are currently on the pending list, still need to call
// impImportBlockPending to fixup their entry state.
if (predOrSucc == SpillCliqueSucc)
{
m_pComp->impReimportMarkBlock(blk);
// Set the current stack state to that of the blk->bbEntryState
m_pComp->verResetCurrentState(blk, &m_pComp->verCurrentState);
assert(m_pComp->verCurrentState.thisInitialized == blk->bbThisOnEntry());
m_pComp->impImportBlockPending(blk);
}
else if ((blk != m_pComp->compCurBB) && ((blk->bbFlags & BBF_IMPORTED) != 0))
{
// As described above, we are only visiting predecessors so they can
// add the appropriate casts, since we have already done that for the current
// block, it does not need to be reimported.
// Nor do we need to reimport blocks that are still pending, but not yet
// imported.
//
// For predecessors, we have no state to seed the EntryState, so we just have
// to assume the existing one is correct.
// If the block is also a successor, it will get the EntryState properly
// updated when it is visited as a successor in the above "if" block.
assert(predOrSucc == SpillCliquePred);
m_pComp->impReimportBlockPending(blk);
}
}
// Re-type the incoming lclVar nodes to match the varDsc.
void Compiler::impRetypeEntryStateTemps(BasicBlock* blk)
{
if (blk->bbEntryState != nullptr)
{
EntryState* es = blk->bbEntryState;
for (unsigned level = 0; level < es->esStackDepth; level++)
{
GenTree* tree = es->esStack[level].val;
if ((tree->gtOper == GT_LCL_VAR) || (tree->gtOper == GT_LCL_FLD))
{
es->esStack[level].val->gtType = lvaGetDesc(tree->AsLclVarCommon())->TypeGet();
}
}
}
}
unsigned Compiler::impGetSpillTmpBase(BasicBlock* block)
{
if (block->bbStkTempsOut != NO_BASE_TMP)
{
return block->bbStkTempsOut;
}
#ifdef DEBUG
if (verbose)
{
printf("\n*************** In impGetSpillTmpBase(" FMT_BB ")\n", block->bbNum);
}
#endif // DEBUG
// Otherwise, choose one, and propagate to all members of the spill clique.
// Grab enough temps for the whole stack.
unsigned baseTmp = lvaGrabTemps(verCurrentState.esStackDepth DEBUGARG("IL Stack Entries"));
SetSpillTempsBase callback(baseTmp);
// We do *NOT* need to reset the SpillClique*Members because a block can only be the predecessor
// to one spill clique, and similarly can only be the successor to one spill clique
impWalkSpillCliqueFromPred(block, &callback);
return baseTmp;
}
void Compiler::impReimportSpillClique(BasicBlock* block)
{
#ifdef DEBUG
if (verbose)
{
printf("\n*************** In impReimportSpillClique(" FMT_BB ")\n", block->bbNum);
}
#endif // DEBUG
// If we get here, it is because this block is already part of a spill clique
// and one predecessor had an outgoing live stack slot of type int, and this
// block has an outgoing live stack slot of type native int.
// We need to reset these before traversal because they have already been set
// by the previous walk to determine all the members of the spill clique.
impInlineRoot()->impSpillCliquePredMembers.Reset();
impInlineRoot()->impSpillCliqueSuccMembers.Reset();
ReimportSpillClique callback(this);
impWalkSpillCliqueFromPred(block, &callback);
}
// Set the pre-state of "block" (which should not have a pre-state allocated) to
// a copy of "srcState", cloning tree pointers as required.
void Compiler::verInitBBEntryState(BasicBlock* block, EntryState* srcState)
{
if (srcState->esStackDepth == 0 && srcState->thisInitialized == TIS_Bottom)
{
block->bbEntryState = nullptr;
return;
}
block->bbEntryState = getAllocator(CMK_Unknown).allocate<EntryState>(1);
// block->bbEntryState.esRefcount = 1;
block->bbEntryState->esStackDepth = srcState->esStackDepth;
block->bbEntryState->thisInitialized = TIS_Bottom;
if (srcState->esStackDepth > 0)
{
block->bbSetStack(new (this, CMK_Unknown) StackEntry[srcState->esStackDepth]);
unsigned stackSize = srcState->esStackDepth * sizeof(StackEntry);
memcpy(block->bbEntryState->esStack, srcState->esStack, stackSize);
for (unsigned level = 0; level < srcState->esStackDepth; level++)
{
GenTree* tree = srcState->esStack[level].val;
block->bbEntryState->esStack[level].val = gtCloneExpr(tree);
}
}
if (verTrackObjCtorInitState)
{
verSetThisInit(block, srcState->thisInitialized);
}
return;
}
void Compiler::verSetThisInit(BasicBlock* block, ThisInitState tis)
{
assert(tis != TIS_Bottom); // Precondition.
if (block->bbEntryState == nullptr)
{
block->bbEntryState = new (this, CMK_Unknown) EntryState();
}
block->bbEntryState->thisInitialized = tis;
}
/*
* Resets the current state to the state at the start of the basic block
*/
void Compiler::verResetCurrentState(BasicBlock* block, EntryState* destState)
{
if (block->bbEntryState == nullptr)
{
destState->esStackDepth = 0;
destState->thisInitialized = TIS_Bottom;
return;
}
destState->esStackDepth = block->bbEntryState->esStackDepth;
if (destState->esStackDepth > 0)
{
unsigned stackSize = destState->esStackDepth * sizeof(StackEntry);
memcpy(destState->esStack, block->bbStackOnEntry(), stackSize);
}
destState->thisInitialized = block->bbThisOnEntry();
return;
}
ThisInitState BasicBlock::bbThisOnEntry() const
{
return bbEntryState ? bbEntryState->thisInitialized : TIS_Bottom;
}
unsigned BasicBlock::bbStackDepthOnEntry() const
{
return (bbEntryState ? bbEntryState->esStackDepth : 0);
}
void BasicBlock::bbSetStack(void* stackBuffer)
{
assert(bbEntryState);
assert(stackBuffer);
bbEntryState->esStack = (StackEntry*)stackBuffer;
}
StackEntry* BasicBlock::bbStackOnEntry() const
{
assert(bbEntryState);
return bbEntryState->esStack;
}
void Compiler::verInitCurrentState()
{
verTrackObjCtorInitState = false;
verCurrentState.thisInitialized = TIS_Bottom;
// initialize stack info
verCurrentState.esStackDepth = 0;
assert(verCurrentState.esStack != nullptr);
// copy current state to entry state of first BB
verInitBBEntryState(fgFirstBB, &verCurrentState);
}
Compiler* Compiler::impInlineRoot()
{
if (impInlineInfo == nullptr)
{
return this;
}
else
{
return impInlineInfo->InlineRoot;
}
}
BYTE Compiler::impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk)
{
if (predOrSucc == SpillCliquePred)
{
return impInlineRoot()->impSpillCliquePredMembers.Get(blk->bbInd());
}
else
{
assert(predOrSucc == SpillCliqueSucc);
return impInlineRoot()->impSpillCliqueSuccMembers.Get(blk->bbInd());
}
}
void Compiler::impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val)
{
if (predOrSucc == SpillCliquePred)
{
impInlineRoot()->impSpillCliquePredMembers.Set(blk->bbInd(), val);
}
else
{
assert(predOrSucc == SpillCliqueSucc);
impInlineRoot()->impSpillCliqueSuccMembers.Set(blk->bbInd(), val);
}
}
/*****************************************************************************
*
* Convert the instrs ("import") into our internal format (trees). The
* basic flowgraph has already been constructed and is passed in.
*/
void Compiler::impImport()
{
#ifdef DEBUG
if (verbose)
{
printf("*************** In impImport() for %s\n", info.compFullName);
}
#endif
Compiler* inlineRoot = impInlineRoot();
if (info.compMaxStack <= SMALL_STACK_SIZE)
{
impStkSize = SMALL_STACK_SIZE;
}
else
{
impStkSize = info.compMaxStack;
}
if (this == inlineRoot)
{
// Allocate the stack contents
verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize];
}
else
{
// This is the inlinee compiler, steal the stack from the inliner compiler
// (after ensuring that it is large enough).
if (inlineRoot->impStkSize < impStkSize)
{
inlineRoot->impStkSize = impStkSize;
inlineRoot->verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize];
}
verCurrentState.esStack = inlineRoot->verCurrentState.esStack;
}
// initialize the entry state at start of method
verInitCurrentState();
// Initialize stuff related to figuring "spill cliques" (see spec comment for impGetSpillTmpBase).
if (this == inlineRoot) // These are only used on the root of the inlining tree.
{
// We have initialized these previously, but to size 0. Make them larger.
impPendingBlockMembers.Init(getAllocator(), fgBBNumMax * 2);
impSpillCliquePredMembers.Init(getAllocator(), fgBBNumMax * 2);
impSpillCliqueSuccMembers.Init(getAllocator(), fgBBNumMax * 2);
}
inlineRoot->impPendingBlockMembers.Reset(fgBBNumMax * 2);
inlineRoot->impSpillCliquePredMembers.Reset(fgBBNumMax * 2);
inlineRoot->impSpillCliqueSuccMembers.Reset(fgBBNumMax * 2);
impBlockListNodeFreeList = nullptr;
#ifdef DEBUG
impLastILoffsStmt = nullptr;
impNestedStackSpill = false;
#endif
impBoxTemp = BAD_VAR_NUM;
impPendingList = impPendingFree = nullptr;
// Skip leading internal blocks.
// These can arise from needing a leading scratch BB, from EH normalization, and from OSR entry redirects.
//
BasicBlock* entryBlock = fgFirstBB;
while (entryBlock->bbFlags & BBF_INTERNAL)
{
JITDUMP("Marking leading BBF_INTERNAL block " FMT_BB " as BBF_IMPORTED\n", entryBlock->bbNum);
entryBlock->bbFlags |= BBF_IMPORTED;
if (entryBlock->bbJumpKind == BBJ_NONE)
{
entryBlock = entryBlock->bbNext;
}
else if (opts.IsOSR() && (entryBlock->bbJumpKind == BBJ_ALWAYS))
{
entryBlock = entryBlock->bbJumpDest;
}
else
{
assert(!"unexpected bbJumpKind in entry sequence");
}
}
// Note for OSR we'd like to be able to verify this block must be
// stack empty, but won't know that until we've imported...so instead
// we'll BADCODE out if we mess up.
//
// (the concern here is that the runtime asks us to OSR a
// different IL version than the one that matched the method that
// triggered OSR). This should not happen but I might have the
// IL versioning stuff wrong.
//
// TODO: we also currently expect this block to be a join point,
// which we should verify over when we find jump targets.
impImportBlockPending(entryBlock);
/* Import blocks in the worker-list until there are no more */
while (impPendingList)
{
/* Remove the entry at the front of the list */
PendingDsc* dsc = impPendingList;
impPendingList = impPendingList->pdNext;
impSetPendingBlockMember(dsc->pdBB, 0);
/* Restore the stack state */
verCurrentState.thisInitialized = dsc->pdThisPtrInit;
verCurrentState.esStackDepth = dsc->pdSavedStack.ssDepth;
if (verCurrentState.esStackDepth)
{
impRestoreStackState(&dsc->pdSavedStack);
}
/* Add the entry to the free list for reuse */
dsc->pdNext = impPendingFree;
impPendingFree = dsc;
/* Now import the block */
if (dsc->pdBB->bbFlags & BBF_FAILED_VERIFICATION)
{
verConvertBBToThrowVerificationException(dsc->pdBB DEBUGARG(true));
impEndTreeList(dsc->pdBB);
}
else
{
impImportBlock(dsc->pdBB);
if (compDonotInline())
{
return;
}
if (compIsForImportOnly())
{
return;
}
}
}
#ifdef DEBUG
if (verbose && info.compXcptnsCount)
{
printf("\nAfter impImport() added block for try,catch,finally");
fgDispBasicBlocks();
printf("\n");
}
// Used in impImportBlockPending() for STRESS_CHK_REIMPORT
for (BasicBlock* const block : Blocks())
{
block->bbFlags &= ~BBF_VISITED;
}
#endif
}
// Checks if a typeinfo (usually stored in the type stack) is a struct.
// The invariant here is that if it's not a ref or a method and has a class handle
// it's a valuetype
bool Compiler::impIsValueType(typeInfo* pTypeInfo)
{
if (pTypeInfo && pTypeInfo->IsValueClassWithClsHnd())
{
return true;
}
else
{
return false;
}
}
/*****************************************************************************
* Check to see if the tree is the address of a local or
the address of a field in a local.
*lclVarTreeOut will contain the GT_LCL_VAR tree when it returns true.
*/
bool Compiler::impIsAddressInLocal(const GenTree* tree, GenTree** lclVarTreeOut)
{
if (tree->gtOper != GT_ADDR)
{
return false;
}
GenTree* op = tree->AsOp()->gtOp1;
while (op->gtOper == GT_FIELD)
{
op = op->AsField()->GetFldObj();
if (op && op->gtOper == GT_ADDR) // Skip static fields where op will be NULL.
{
op = op->AsOp()->gtOp1;
}
else
{
return false;
}
}
if (op->gtOper == GT_LCL_VAR)
{
if (lclVarTreeOut != nullptr)
{
*lclVarTreeOut = op;
}
return true;
}
else
{
return false;
}
}
//------------------------------------------------------------------------
// impMakeDiscretionaryInlineObservations: make observations that help
// determine the profitability of a discretionary inline
//
// Arguments:
// pInlineInfo -- InlineInfo for the inline, or null for the prejit root
// inlineResult -- InlineResult accumulating information about this inline
//
// Notes:
// If inlining or prejitting the root, this method also makes
// various observations about the method that factor into inline
// decisions. It sets `compNativeSizeEstimate` as a side effect.
void Compiler::impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult)
{
assert((pInlineInfo != nullptr && compIsForInlining()) || // Perform the actual inlining.
(pInlineInfo == nullptr && !compIsForInlining()) // Calculate the static inlining hint for ngen.
);
// If we're really inlining, we should just have one result in play.
assert((pInlineInfo == nullptr) || (inlineResult == pInlineInfo->inlineResult));
// If this is a "forceinline" method, the JIT probably shouldn't have gone
// to the trouble of estimating the native code size. Even if it did, it
// shouldn't be relying on the result of this method.
assert(inlineResult->GetObservation() == InlineObservation::CALLEE_IS_DISCRETIONARY_INLINE);
// Note if the caller contains NEWOBJ or NEWARR.
Compiler* rootCompiler = impInlineRoot();
if ((rootCompiler->optMethodFlags & OMF_HAS_NEWARRAY) != 0)
{
inlineResult->Note(InlineObservation::CALLER_HAS_NEWARRAY);
}
if ((rootCompiler->optMethodFlags & OMF_HAS_NEWOBJ) != 0)
{
inlineResult->Note(InlineObservation::CALLER_HAS_NEWOBJ);
}
bool calleeIsStatic = (info.compFlags & CORINFO_FLG_STATIC) != 0;
bool isSpecialMethod = (info.compFlags & CORINFO_FLG_CONSTRUCTOR) != 0;
if (isSpecialMethod)
{
if (calleeIsStatic)
{
inlineResult->Note(InlineObservation::CALLEE_IS_CLASS_CTOR);
}
else
{
inlineResult->Note(InlineObservation::CALLEE_IS_INSTANCE_CTOR);
}
}
else if (!calleeIsStatic)
{
// Callee is an instance method.
//
// Check if the callee has the same 'this' as the root.
if (pInlineInfo != nullptr)
{
GenTree* thisArg = pInlineInfo->iciCall->AsCall()->gtCallThisArg->GetNode();
assert(thisArg);
bool isSameThis = impIsThis(thisArg);
inlineResult->NoteBool(InlineObservation::CALLSITE_IS_SAME_THIS, isSameThis);
}
}
bool callsiteIsGeneric = (rootCompiler->info.compMethodInfo->args.sigInst.methInstCount != 0) ||
(rootCompiler->info.compMethodInfo->args.sigInst.classInstCount != 0);
bool calleeIsGeneric = (info.compMethodInfo->args.sigInst.methInstCount != 0) ||
(info.compMethodInfo->args.sigInst.classInstCount != 0);
if (!callsiteIsGeneric && calleeIsGeneric)
{
inlineResult->Note(InlineObservation::CALLSITE_NONGENERIC_CALLS_GENERIC);
}
// Inspect callee's arguments (and the actual values at the callsite for them)
CORINFO_SIG_INFO sig = info.compMethodInfo->args;
CORINFO_ARG_LIST_HANDLE sigArg = sig.args;
GenTreeCall::Use* argUse = pInlineInfo == nullptr ? nullptr : pInlineInfo->iciCall->AsCall()->gtCallArgs;
for (unsigned i = 0; i < info.compMethodInfo->args.numArgs; i++)
{
CORINFO_CLASS_HANDLE sigClass;
CorInfoType corType = strip(info.compCompHnd->getArgType(&sig, sigArg, &sigClass));
GenTree* argNode = argUse == nullptr ? nullptr : argUse->GetNode()->gtSkipPutArgType();
if (corType == CORINFO_TYPE_CLASS)
{
sigClass = info.compCompHnd->getArgClass(&sig, sigArg);
}
else if (corType == CORINFO_TYPE_VALUECLASS)
{
inlineResult->Note(InlineObservation::CALLEE_ARG_STRUCT);
}
else if (corType == CORINFO_TYPE_BYREF)
{
sigClass = info.compCompHnd->getArgClass(&sig, sigArg);
corType = info.compCompHnd->getChildType(sigClass, &sigClass);
}
if (argNode != nullptr)
{
bool isExact = false;
bool isNonNull = false;
CORINFO_CLASS_HANDLE argCls = gtGetClassHandle(argNode, &isExact, &isNonNull);
if (argCls != nullptr)
{
const bool isArgValueType = eeIsValueClass(argCls);
// Exact class of the arg is known
if (isExact && !isArgValueType)
{
inlineResult->Note(InlineObservation::CALLSITE_ARG_EXACT_CLS);
if ((argCls != sigClass) && (sigClass != nullptr))
{
// .. but the signature accepts a less concrete type.
inlineResult->Note(InlineObservation::CALLSITE_ARG_EXACT_CLS_SIG_IS_NOT);
}
}
// Arg is a reference type in the signature and a boxed value type was passed.
else if (isArgValueType && (corType == CORINFO_TYPE_CLASS))
{
inlineResult->Note(InlineObservation::CALLSITE_ARG_BOXED);
}
}
if (argNode->OperIsConst())
{
inlineResult->Note(InlineObservation::CALLSITE_ARG_CONST);
}
argUse = argUse->GetNext();
}
sigArg = info.compCompHnd->getArgNext(sigArg);
}
// Note if the callee's return type is a value type
if (info.compMethodInfo->args.retType == CORINFO_TYPE_VALUECLASS)
{
inlineResult->Note(InlineObservation::CALLEE_RETURNS_STRUCT);
}
// Note if the callee's class is a promotable struct
if ((info.compClassAttr & CORINFO_FLG_VALUECLASS) != 0)
{
assert(structPromotionHelper != nullptr);
if (structPromotionHelper->CanPromoteStructType(info.compClassHnd))
{
inlineResult->Note(InlineObservation::CALLEE_CLASS_PROMOTABLE);
}
inlineResult->Note(InlineObservation::CALLEE_CLASS_VALUETYPE);
}
#ifdef FEATURE_SIMD
// Note if this method is has SIMD args or return value
if (pInlineInfo != nullptr && pInlineInfo->hasSIMDTypeArgLocalOrReturn)
{
inlineResult->Note(InlineObservation::CALLEE_HAS_SIMD);
}
#endif // FEATURE_SIMD
// Roughly classify callsite frequency.
InlineCallsiteFrequency frequency = InlineCallsiteFrequency::UNUSED;
// If this is a prejit root, or a maximally hot block...
if ((pInlineInfo == nullptr) || (pInlineInfo->iciBlock->isMaxBBWeight()))
{
frequency = InlineCallsiteFrequency::HOT;
}
// No training data. Look for loop-like things.
// We consider a recursive call loop-like. Do not give the inlining boost to the method itself.
// However, give it to things nearby.
else if ((pInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) &&
(pInlineInfo->fncHandle != pInlineInfo->inlineCandidateInfo->ilCallerHandle))
{
frequency = InlineCallsiteFrequency::LOOP;
}
else if (pInlineInfo->iciBlock->hasProfileWeight() && (pInlineInfo->iciBlock->bbWeight > BB_ZERO_WEIGHT))
{
frequency = InlineCallsiteFrequency::WARM;
}
// Now modify the multiplier based on where we're called from.
else if (pInlineInfo->iciBlock->isRunRarely() || ((info.compFlags & FLG_CCTOR) == FLG_CCTOR))
{
frequency = InlineCallsiteFrequency::RARE;
}
else
{
frequency = InlineCallsiteFrequency::BORING;
}
// Also capture the block weight of the call site.
//
// In the prejit root case, assume at runtime there might be a hot call site
// for this method, so we won't prematurely conclude this method should never
// be inlined.
//
weight_t weight = 0;
if (pInlineInfo != nullptr)
{
weight = pInlineInfo->iciBlock->bbWeight;
}
else
{
const weight_t prejitHotCallerWeight = 1000000.0;
weight = prejitHotCallerWeight;
}
inlineResult->NoteInt(InlineObservation::CALLSITE_FREQUENCY, static_cast<int>(frequency));
inlineResult->NoteInt(InlineObservation::CALLSITE_WEIGHT, (int)(weight));
bool hasProfile = false;
double profileFreq = 0.0;
// If the call site has profile data, report the relative frequency of the site.
//
if ((pInlineInfo != nullptr) && rootCompiler->fgHaveSufficientProfileData())
{
const weight_t callSiteWeight = pInlineInfo->iciBlock->bbWeight;
const weight_t entryWeight = rootCompiler->fgFirstBB->bbWeight;
profileFreq = fgProfileWeightsEqual(entryWeight, 0.0) ? 0.0 : callSiteWeight / entryWeight;
hasProfile = true;
assert(callSiteWeight >= 0);
assert(entryWeight >= 0);
}
else if (pInlineInfo == nullptr)
{
// Simulate a hot callsite for PrejitRoot mode.
hasProfile = true;
profileFreq = 1.0;
}
inlineResult->NoteBool(InlineObservation::CALLSITE_HAS_PROFILE, hasProfile);
inlineResult->NoteDouble(InlineObservation::CALLSITE_PROFILE_FREQUENCY, profileFreq);
}
/*****************************************************************************
This method makes STATIC inlining decision based on the IL code.
It should not make any inlining decision based on the context.
If forceInline is true, then the inlining decision should not depend on
performance heuristics (code size, etc.).
*/
void Compiler::impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle,
CORINFO_METHOD_INFO* methInfo,
bool forceInline,
InlineResult* inlineResult)
{
unsigned codeSize = methInfo->ILCodeSize;
// We shouldn't have made up our minds yet...
assert(!inlineResult->IsDecided());
if (methInfo->EHcount)
{
inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_EH);
return;
}
if ((methInfo->ILCode == nullptr) || (codeSize == 0))
{
inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NO_BODY);
return;
}
// For now we don't inline varargs (import code can't handle it)
if (methInfo->args.isVarArg())
{
inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
return;
}
// Reject if it has too many locals.
// This is currently an implementation limit due to fixed-size arrays in the
// inline info, rather than a performance heuristic.
inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_LOCALS, methInfo->locals.numArgs);
if (methInfo->locals.numArgs > MAX_INL_LCLS)
{
inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_LOCALS);
return;
}
// Make sure there aren't too many arguments.
// This is currently an implementation limit due to fixed-size arrays in the
// inline info, rather than a performance heuristic.
inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_ARGUMENTS, methInfo->args.numArgs);
if (methInfo->args.numArgs > MAX_INL_ARGS)
{
inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_ARGUMENTS);
return;
}
// Note force inline state
inlineResult->NoteBool(InlineObservation::CALLEE_IS_FORCE_INLINE, forceInline);
// Note IL code size
inlineResult->NoteInt(InlineObservation::CALLEE_IL_CODE_SIZE, codeSize);
if (inlineResult->IsFailure())
{
return;
}
// Make sure maxstack is not too big
inlineResult->NoteInt(InlineObservation::CALLEE_MAXSTACK, methInfo->maxStack);
if (inlineResult->IsFailure())
{
return;
}
}
/*****************************************************************************
*/
void Compiler::impCheckCanInline(GenTreeCall* call,
CORINFO_METHOD_HANDLE fncHandle,
unsigned methAttr,
CORINFO_CONTEXT_HANDLE exactContextHnd,
InlineCandidateInfo** ppInlineCandidateInfo,
InlineResult* inlineResult)
{
// Either EE or JIT might throw exceptions below.
// If that happens, just don't inline the method.
struct Param
{
Compiler* pThis;
GenTreeCall* call;
CORINFO_METHOD_HANDLE fncHandle;
unsigned methAttr;
CORINFO_CONTEXT_HANDLE exactContextHnd;
InlineResult* result;
InlineCandidateInfo** ppInlineCandidateInfo;
} param;
memset(¶m, 0, sizeof(param));
param.pThis = this;
param.call = call;
param.fncHandle = fncHandle;
param.methAttr = methAttr;
param.exactContextHnd = (exactContextHnd != nullptr) ? exactContextHnd : MAKE_METHODCONTEXT(fncHandle);
param.result = inlineResult;
param.ppInlineCandidateInfo = ppInlineCandidateInfo;
bool success = eeRunWithErrorTrap<Param>(
[](Param* pParam) {
CorInfoInitClassResult initClassResult;
#ifdef DEBUG
const char* methodName;
const char* className;
methodName = pParam->pThis->eeGetMethodName(pParam->fncHandle, &className);
if (JitConfig.JitNoInline())
{
pParam->result->NoteFatal(InlineObservation::CALLEE_IS_JIT_NOINLINE);
goto _exit;
}
#endif
/* Try to get the code address/size for the method */
CORINFO_METHOD_INFO methInfo;
if (!pParam->pThis->info.compCompHnd->getMethodInfo(pParam->fncHandle, &methInfo))
{
pParam->result->NoteFatal(InlineObservation::CALLEE_NO_METHOD_INFO);
goto _exit;
}
// Profile data allows us to avoid early "too many IL bytes" outs.
pParam->result->NoteBool(InlineObservation::CALLSITE_HAS_PROFILE,
pParam->pThis->fgHaveSufficientProfileData());
bool forceInline;
forceInline = !!(pParam->methAttr & CORINFO_FLG_FORCEINLINE);
pParam->pThis->impCanInlineIL(pParam->fncHandle, &methInfo, forceInline, pParam->result);
if (pParam->result->IsFailure())
{
assert(pParam->result->IsNever());
goto _exit;
}
// Speculatively check if initClass() can be done.
// If it can be done, we will try to inline the method.
initClassResult =
pParam->pThis->info.compCompHnd->initClass(nullptr /* field */, pParam->fncHandle /* method */,
pParam->exactContextHnd /* context */);
if (initClassResult & CORINFO_INITCLASS_DONT_INLINE)
{
pParam->result->NoteFatal(InlineObservation::CALLSITE_CANT_CLASS_INIT);
goto _exit;
}
// Given the EE the final say in whether to inline or not.
// This should be last since for verifiable code, this can be expensive
/* VM Inline check also ensures that the method is verifiable if needed */
CorInfoInline vmResult;
vmResult = pParam->pThis->info.compCompHnd->canInline(pParam->pThis->info.compMethodHnd, pParam->fncHandle);
if (vmResult == INLINE_FAIL)
{
pParam->result->NoteFatal(InlineObservation::CALLSITE_IS_VM_NOINLINE);
}
else if (vmResult == INLINE_NEVER)
{
pParam->result->NoteFatal(InlineObservation::CALLEE_IS_VM_NOINLINE);
}
if (pParam->result->IsFailure())
{
// Make sure not to report this one. It was already reported by the VM.
pParam->result->SetReported();
goto _exit;
}
/* Get the method properties */
CORINFO_CLASS_HANDLE clsHandle;
clsHandle = pParam->pThis->info.compCompHnd->getMethodClass(pParam->fncHandle);
unsigned clsAttr;
clsAttr = pParam->pThis->info.compCompHnd->getClassAttribs(clsHandle);
/* Get the return type */
var_types fncRetType;
fncRetType = pParam->call->TypeGet();
#ifdef DEBUG
var_types fncRealRetType;
fncRealRetType = JITtype2varType(methInfo.args.retType);
assert((genActualType(fncRealRetType) == genActualType(fncRetType)) ||
// <BUGNUM> VSW 288602 </BUGNUM>
// In case of IJW, we allow to assign a native pointer to a BYREF.
(fncRetType == TYP_BYREF && methInfo.args.retType == CORINFO_TYPE_PTR) ||
(varTypeIsStruct(fncRetType) && (fncRealRetType == TYP_STRUCT)));
#endif
// Allocate an InlineCandidateInfo structure,
//
// Or, reuse the existing GuardedDevirtualizationCandidateInfo,
// which was pre-allocated to have extra room.
//
InlineCandidateInfo* pInfo;
if (pParam->call->IsGuardedDevirtualizationCandidate())
{
pInfo = pParam->call->gtInlineCandidateInfo;
}
else
{
pInfo = new (pParam->pThis, CMK_Inlining) InlineCandidateInfo;
// Null out bits we don't use when we're just inlining
pInfo->guardedClassHandle = nullptr;
pInfo->guardedMethodHandle = nullptr;
pInfo->guardedMethodUnboxedEntryHandle = nullptr;
pInfo->likelihood = 0;
pInfo->requiresInstMethodTableArg = false;
}
pInfo->methInfo = methInfo;
pInfo->ilCallerHandle = pParam->pThis->info.compMethodHnd;
pInfo->clsHandle = clsHandle;
pInfo->exactContextHnd = pParam->exactContextHnd;
pInfo->retExpr = nullptr;
pInfo->preexistingSpillTemp = BAD_VAR_NUM;
pInfo->clsAttr = clsAttr;
pInfo->methAttr = pParam->methAttr;
pInfo->initClassResult = initClassResult;
pInfo->fncRetType = fncRetType;
pInfo->exactContextNeedsRuntimeLookup = false;
pInfo->inlinersContext = pParam->pThis->compInlineContext;
// Note exactContextNeedsRuntimeLookup is reset later on,
// over in impMarkInlineCandidate.
*(pParam->ppInlineCandidateInfo) = pInfo;
_exit:;
},
¶m);
if (!success)
{
param.result->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR);
}
}
//------------------------------------------------------------------------
// impInlineRecordArgInfo: record information about an inline candidate argument
//
// Arguments:
// pInlineInfo - inline info for the inline candidate
// curArgVal - tree for the caller actual argument value
// argNum - logical index of this argument
// inlineResult - result of ongoing inline evaluation
//
// Notes:
//
// Checks for various inline blocking conditions and makes notes in
// the inline info arg table about the properties of the actual. These
// properties are used later by impInlineFetchArg to determine how best to
// pass the argument into the inlinee.
void Compiler::impInlineRecordArgInfo(InlineInfo* pInlineInfo,
GenTree* curArgVal,
unsigned argNum,
InlineResult* inlineResult)
{
InlArgInfo* inlCurArgInfo = &pInlineInfo->inlArgInfo[argNum];
inlCurArgInfo->argNode = curArgVal; // Save the original tree, with PUT_ARG and RET_EXPR.
curArgVal = curArgVal->gtSkipPutArgType();
curArgVal = curArgVal->gtRetExprVal();
if (curArgVal->gtOper == GT_MKREFANY)
{
inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_IS_MKREFANY);
return;
}
GenTree* lclVarTree;
const bool isAddressInLocal = impIsAddressInLocal(curArgVal, &lclVarTree);
if (isAddressInLocal && varTypeIsStruct(lclVarTree))
{
inlCurArgInfo->argIsByRefToStructLocal = true;
#ifdef FEATURE_SIMD
if (lvaTable[lclVarTree->AsLclVarCommon()->GetLclNum()].lvSIMDType)
{
pInlineInfo->hasSIMDTypeArgLocalOrReturn = true;
}
#endif // FEATURE_SIMD
}
if (curArgVal->gtFlags & GTF_ALL_EFFECT)
{
inlCurArgInfo->argHasGlobRef = (curArgVal->gtFlags & GTF_GLOB_REF) != 0;
inlCurArgInfo->argHasSideEff = (curArgVal->gtFlags & (GTF_ALL_EFFECT & ~GTF_GLOB_REF)) != 0;
}
if (curArgVal->gtOper == GT_LCL_VAR)
{
inlCurArgInfo->argIsLclVar = true;
/* Remember the "original" argument number */
INDEBUG(curArgVal->AsLclVar()->gtLclILoffs = argNum;)
}
if (curArgVal->IsInvariant())
{
inlCurArgInfo->argIsInvariant = true;
if (inlCurArgInfo->argIsThis && (curArgVal->gtOper == GT_CNS_INT) && (curArgVal->AsIntCon()->gtIconVal == 0))
{
// Abort inlining at this call site
inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_HAS_NULL_THIS);
return;
}
}
bool isExact = false;
bool isNonNull = false;
inlCurArgInfo->argIsExact = (gtGetClassHandle(curArgVal, &isExact, &isNonNull) != NO_CLASS_HANDLE) && isExact;
// If the arg is a local that is address-taken, we can't safely
// directly substitute it into the inlinee.
//
// Previously we'd accomplish this by setting "argHasLdargaOp" but
// that has a stronger meaning: that the arg value can change in
// the method body. Using that flag prevents type propagation,
// which is safe in this case.
//
// Instead mark the arg as having a caller local ref.
if (!inlCurArgInfo->argIsInvariant && gtHasLocalsWithAddrOp(curArgVal))
{
inlCurArgInfo->argHasCallerLocalRef = true;
}
#ifdef DEBUG
if (verbose)
{
if (inlCurArgInfo->argIsThis)
{
printf("thisArg:");
}
else
{
printf("\nArgument #%u:", argNum);
}
if (inlCurArgInfo->argIsLclVar)
{
printf(" is a local var");
}
if (inlCurArgInfo->argIsInvariant)
{
printf(" is a constant");
}
if (inlCurArgInfo->argHasGlobRef)
{
printf(" has global refs");
}
if (inlCurArgInfo->argHasCallerLocalRef)
{
printf(" has caller local ref");
}
if (inlCurArgInfo->argHasSideEff)
{
printf(" has side effects");
}
if (inlCurArgInfo->argHasLdargaOp)
{
printf(" has ldarga effect");
}
if (inlCurArgInfo->argHasStargOp)
{
printf(" has starg effect");
}
if (inlCurArgInfo->argIsByRefToStructLocal)
{
printf(" is byref to a struct local");
}
printf("\n");
gtDispTree(curArgVal);
printf("\n");
}
#endif
}
//------------------------------------------------------------------------
// impInlineInitVars: setup inline information for inlinee args and locals
//
// Arguments:
// pInlineInfo - inline info for the inline candidate
//
// Notes:
// This method primarily adds caller-supplied info to the inlArgInfo
// and sets up the lclVarInfo table.
//
// For args, the inlArgInfo records properties of the actual argument
// including the tree node that produces the arg value. This node is
// usually the tree node present at the call, but may also differ in
// various ways:
// - when the call arg is a GT_RET_EXPR, we search back through the ret
// expr chain for the actual node. Note this will either be the original
// call (which will be a failed inline by this point), or the return
// expression from some set of inlines.
// - when argument type casting is needed the necessary casts are added
// around the argument node.
// - if an argument can be simplified by folding then the node here is the
// folded value.
//
// The method may make observations that lead to marking this candidate as
// a failed inline. If this happens the initialization is abandoned immediately
// to try and reduce the jit time cost for a failed inline.
void Compiler::impInlineInitVars(InlineInfo* pInlineInfo)
{
assert(!compIsForInlining());
GenTreeCall* call = pInlineInfo->iciCall;
CORINFO_METHOD_INFO* methInfo = &pInlineInfo->inlineCandidateInfo->methInfo;
unsigned clsAttr = pInlineInfo->inlineCandidateInfo->clsAttr;
InlArgInfo* inlArgInfo = pInlineInfo->inlArgInfo;
InlLclVarInfo* lclVarInfo = pInlineInfo->lclVarInfo;
InlineResult* inlineResult = pInlineInfo->inlineResult;
// Inlined methods always use the managed calling convention
const bool hasRetBuffArg = impMethodInfo_hasRetBuffArg(methInfo, CorInfoCallConvExtension::Managed);
/* init the argument stuct */
memset(inlArgInfo, 0, (MAX_INL_ARGS + 1) * sizeof(inlArgInfo[0]));
GenTreeCall::Use* thisArg = call->gtCallThisArg;
unsigned argCnt = 0; // Count of the arguments
assert((methInfo->args.hasThis()) == (thisArg != nullptr));
if (thisArg != nullptr)
{
inlArgInfo[0].argIsThis = true;
impInlineRecordArgInfo(pInlineInfo, thisArg->GetNode(), argCnt, inlineResult);
if (inlineResult->IsFailure())
{
return;
}
/* Increment the argument count */
argCnt++;
}
/* Record some information about each of the arguments */
bool hasTypeCtxtArg = (methInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) != 0;
#if USER_ARGS_COME_LAST
unsigned typeCtxtArg = (thisArg != nullptr) ? 1 : 0;
#else // USER_ARGS_COME_LAST
unsigned typeCtxtArg = methInfo->args.totalILArgs();
#endif // USER_ARGS_COME_LAST
for (GenTreeCall::Use& use : call->Args())
{
if (hasRetBuffArg && (&use == call->gtCallArgs))
{
continue;
}
// Ignore the type context argument
if (hasTypeCtxtArg && (argCnt == typeCtxtArg))
{
pInlineInfo->typeContextArg = typeCtxtArg;
typeCtxtArg = 0xFFFFFFFF;
continue;
}
GenTree* actualArg = gtFoldExpr(use.GetNode());
impInlineRecordArgInfo(pInlineInfo, actualArg, argCnt, inlineResult);
if (inlineResult->IsFailure())
{
return;
}
/* Increment the argument count */
argCnt++;
}
/* Make sure we got the arg number right */
assert(argCnt == methInfo->args.totalILArgs());
#ifdef FEATURE_SIMD
bool foundSIMDType = pInlineInfo->hasSIMDTypeArgLocalOrReturn;
#endif // FEATURE_SIMD
/* We have typeless opcodes, get type information from the signature */
if (thisArg != nullptr)
{
lclVarInfo[0].lclVerTypeInfo = verMakeTypeInfo(pInlineInfo->inlineCandidateInfo->clsHandle);
lclVarInfo[0].lclHasLdlocaOp = false;
#ifdef FEATURE_SIMD
// We always want to check isSIMDClass, since we want to set foundSIMDType (to increase
// the inlining multiplier) for anything in that assembly.
// But we only need to normalize it if it is a TYP_STRUCT
// (which we need to do even if we have already set foundSIMDType).
if (!foundSIMDType && isSIMDorHWSIMDClass(&(lclVarInfo[0].lclVerTypeInfo)))
{
foundSIMDType = true;
}
#endif // FEATURE_SIMD
var_types sigType = ((clsAttr & CORINFO_FLG_VALUECLASS) != 0) ? TYP_BYREF : TYP_REF;
lclVarInfo[0].lclTypeInfo = sigType;
GenTree* thisArgNode = thisArg->GetNode();
assert(varTypeIsGC(thisArgNode->TypeGet()) || // "this" is managed
((thisArgNode->TypeGet() == TYP_I_IMPL) && // "this" is unmgd but the method's class doesnt care
(clsAttr & CORINFO_FLG_VALUECLASS)));
if (genActualType(thisArgNode->TypeGet()) != genActualType(sigType))
{
if (sigType == TYP_REF)
{
/* The argument cannot be bashed into a ref (see bug 750871) */
inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_REF);
return;
}
/* This can only happen with byrefs <-> ints/shorts */
assert(sigType == TYP_BYREF);
assert((genActualType(thisArgNode->TypeGet()) == TYP_I_IMPL) || (thisArgNode->TypeGet() == TYP_BYREF));
lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
}
}
/* Init the types of the arguments and make sure the types
* from the trees match the types in the signature */
CORINFO_ARG_LIST_HANDLE argLst;
argLst = methInfo->args.args;
unsigned i;
for (i = (thisArg ? 1 : 0); i < argCnt; i++, argLst = info.compCompHnd->getArgNext(argLst))
{
var_types sigType = (var_types)eeGetArgType(argLst, &methInfo->args);
lclVarInfo[i].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->args, argLst);
#ifdef FEATURE_SIMD
if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[i].lclVerTypeInfo)))
{
// If this is a SIMD class (i.e. in the SIMD assembly), then we will consider that we've
// found a SIMD type, even if this may not be a type we recognize (the assumption is that
// it is likely to use a SIMD type, and therefore we want to increase the inlining multiplier).
foundSIMDType = true;
if (sigType == TYP_STRUCT)
{
var_types structType = impNormStructType(lclVarInfo[i].lclVerTypeInfo.GetClassHandle());
sigType = structType;
}
}
#endif // FEATURE_SIMD
lclVarInfo[i].lclTypeInfo = sigType;
lclVarInfo[i].lclHasLdlocaOp = false;
/* Does the tree type match the signature type? */
GenTree* inlArgNode = inlArgInfo[i].argNode;
if ((sigType != inlArgNode->gtType) || inlArgNode->OperIs(GT_PUTARG_TYPE))
{
assert(impCheckImplicitArgumentCoercion(sigType, inlArgNode->gtType));
assert(!varTypeIsStruct(inlArgNode->gtType) && !varTypeIsStruct(sigType));
/* In valid IL, this can only happen for short integer types or byrefs <-> [native] ints,
but in bad IL cases with caller-callee signature mismatches we can see other types.
Intentionally reject cases with mismatches so the jit is more flexible when
encountering bad IL. */
bool isPlausibleTypeMatch = (genActualType(sigType) == genActualType(inlArgNode->gtType)) ||
(genActualTypeIsIntOrI(sigType) && inlArgNode->gtType == TYP_BYREF) ||
(sigType == TYP_BYREF && genActualTypeIsIntOrI(inlArgNode->gtType));
if (!isPlausibleTypeMatch)
{
inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_TYPES_INCOMPATIBLE);
return;
}
GenTree** pInlArgNode;
if (inlArgNode->OperIs(GT_PUTARG_TYPE))
{
// There was a widening or narrowing cast.
GenTreeUnOp* putArgType = inlArgNode->AsUnOp();
pInlArgNode = &putArgType->gtOp1;
inlArgNode = putArgType->gtOp1;
}
else
{
// The same size but different type of the arguments.
pInlArgNode = &inlArgInfo[i].argNode;
}
/* Is it a narrowing or widening cast?
* Widening casts are ok since the value computed is already
* normalized to an int (on the IL stack) */
if (genTypeSize(inlArgNode->gtType) >= genTypeSize(sigType))
{
if (sigType == TYP_BYREF)
{
lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
}
else if (inlArgNode->gtType == TYP_BYREF)
{
assert(varTypeIsIntOrI(sigType));
/* If possible bash the BYREF to an int */
if (inlArgNode->IsLocalAddrExpr() != nullptr)
{
inlArgNode->gtType = TYP_I_IMPL;
lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
}
else
{
/* Arguments 'int <- byref' cannot be changed */
inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
return;
}
}
else if (genTypeSize(sigType) < TARGET_POINTER_SIZE)
{
// Narrowing cast.
if (inlArgNode->OperIs(GT_LCL_VAR))
{
const unsigned lclNum = inlArgNode->AsLclVarCommon()->GetLclNum();
if (!lvaTable[lclNum].lvNormalizeOnLoad() && sigType == lvaGetRealType(lclNum))
{
// We don't need to insert a cast here as the variable
// was assigned a normalized value of the right type.
continue;
}
}
inlArgNode = gtNewCastNode(TYP_INT, inlArgNode, false, sigType);
inlArgInfo[i].argIsLclVar = false;
// Try to fold the node in case we have constant arguments.
if (inlArgInfo[i].argIsInvariant)
{
inlArgNode = gtFoldExprConst(inlArgNode);
assert(inlArgNode->OperIsConst());
}
*pInlArgNode = inlArgNode;
}
#ifdef TARGET_64BIT
else if (genTypeSize(genActualType(inlArgNode->gtType)) < genTypeSize(sigType))
{
// This should only happen for int -> native int widening
inlArgNode = gtNewCastNode(genActualType(sigType), inlArgNode, false, sigType);
inlArgInfo[i].argIsLclVar = false;
/* Try to fold the node in case we have constant arguments */
if (inlArgInfo[i].argIsInvariant)
{
inlArgNode = gtFoldExprConst(inlArgNode);
assert(inlArgNode->OperIsConst());
}
*pInlArgNode = inlArgNode;
}
#endif // TARGET_64BIT
}
}
}
/* Init the types of the local variables */
CORINFO_ARG_LIST_HANDLE localsSig;
localsSig = methInfo->locals.args;
for (i = 0; i < methInfo->locals.numArgs; i++)
{
bool isPinned;
var_types type = (var_types)eeGetArgType(localsSig, &methInfo->locals, &isPinned);
lclVarInfo[i + argCnt].lclHasLdlocaOp = false;
lclVarInfo[i + argCnt].lclTypeInfo = type;
if (varTypeIsGC(type))
{
if (isPinned)
{
JITDUMP("Inlinee local #%02u is pinned\n", i);
lclVarInfo[i + argCnt].lclIsPinned = true;
// Pinned locals may cause inlines to fail.
inlineResult->Note(InlineObservation::CALLEE_HAS_PINNED_LOCALS);
if (inlineResult->IsFailure())
{
return;
}
}
pInlineInfo->numberOfGcRefLocals++;
}
else if (isPinned)
{
JITDUMP("Ignoring pin on inlinee local #%02u -- not a GC type\n", i);
}
lclVarInfo[i + argCnt].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->locals, localsSig);
// If this local is a struct type with GC fields, inform the inliner. It may choose to bail
// out on the inline.
if (type == TYP_STRUCT)
{
CORINFO_CLASS_HANDLE lclHandle = lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle();
DWORD typeFlags = info.compCompHnd->getClassAttribs(lclHandle);
if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
{
inlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
if (inlineResult->IsFailure())
{
return;
}
// Do further notification in the case where the call site is rare; some policies do
// not track the relative hotness of call sites for "always" inline cases.
if (pInlineInfo->iciBlock->isRunRarely())
{
inlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
if (inlineResult->IsFailure())
{
return;
}
}
}
}
localsSig = info.compCompHnd->getArgNext(localsSig);
#ifdef FEATURE_SIMD
if ((!foundSIMDType || (type == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[i + argCnt].lclVerTypeInfo)))
{
foundSIMDType = true;
if (type == TYP_STRUCT)
{
var_types structType = impNormStructType(lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle());
lclVarInfo[i + argCnt].lclTypeInfo = structType;
}
}
#endif // FEATURE_SIMD
}
#ifdef FEATURE_SIMD
if (!foundSIMDType && (call->AsCall()->gtRetClsHnd != nullptr) && isSIMDorHWSIMDClass(call->AsCall()->gtRetClsHnd))
{
foundSIMDType = true;
}
pInlineInfo->hasSIMDTypeArgLocalOrReturn = foundSIMDType;
#endif // FEATURE_SIMD
}
//------------------------------------------------------------------------
// impInlineFetchLocal: get a local var that represents an inlinee local
//
// Arguments:
// lclNum -- number of the inlinee local
// reason -- debug string describing purpose of the local var
//
// Returns:
// Number of the local to use
//
// Notes:
// This method is invoked only for locals actually used in the
// inlinee body.
//
// Allocates a new temp if necessary, and copies key properties
// over from the inlinee local var info.
unsigned Compiler::impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason))
{
assert(compIsForInlining());
unsigned tmpNum = impInlineInfo->lclTmpNum[lclNum];
if (tmpNum == BAD_VAR_NUM)
{
const InlLclVarInfo& inlineeLocal = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt];
const var_types lclTyp = inlineeLocal.lclTypeInfo;
// The lifetime of this local might span multiple BBs.
// So it is a long lifetime local.
impInlineInfo->lclTmpNum[lclNum] = tmpNum = lvaGrabTemp(false DEBUGARG(reason));
// Copy over key info
lvaTable[tmpNum].lvType = lclTyp;
lvaTable[tmpNum].lvHasLdAddrOp = inlineeLocal.lclHasLdlocaOp;
lvaTable[tmpNum].lvPinned = inlineeLocal.lclIsPinned;
lvaTable[tmpNum].lvHasILStoreOp = inlineeLocal.lclHasStlocOp;
lvaTable[tmpNum].lvHasMultipleILStoreOp = inlineeLocal.lclHasMultipleStlocOp;
// Copy over class handle for ref types. Note this may be a
// shared type -- someday perhaps we can get the exact
// signature and pass in a more precise type.
if (lclTyp == TYP_REF)
{
assert(lvaTable[tmpNum].lvSingleDef == 0);
lvaTable[tmpNum].lvSingleDef = !inlineeLocal.lclHasMultipleStlocOp && !inlineeLocal.lclHasLdlocaOp;
if (lvaTable[tmpNum].lvSingleDef)
{
JITDUMP("Marked V%02u as a single def temp\n", tmpNum);
}
lvaSetClass(tmpNum, inlineeLocal.lclVerTypeInfo.GetClassHandleForObjRef());
}
if (inlineeLocal.lclVerTypeInfo.IsStruct())
{
if (varTypeIsStruct(lclTyp))
{
lvaSetStruct(tmpNum, inlineeLocal.lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */);
}
else
{
// This is a wrapped primitive. Make sure the verstate knows that
lvaTable[tmpNum].lvVerTypeInfo = inlineeLocal.lclVerTypeInfo;
}
}
#ifdef DEBUG
// Sanity check that we're properly prepared for gc ref locals.
if (varTypeIsGC(lclTyp))
{
// Since there are gc locals we should have seen them earlier
// and if there was a return value, set up the spill temp.
assert(impInlineInfo->HasGcRefLocals());
assert((info.compRetNativeType == TYP_VOID) || fgNeedReturnSpillTemp());
}
else
{
// Make sure all pinned locals count as gc refs.
assert(!inlineeLocal.lclIsPinned);
}
#endif // DEBUG
}
return tmpNum;
}
//------------------------------------------------------------------------
// impInlineFetchArg: return tree node for argument value in an inlinee
//
// Arguments:
// lclNum -- argument number in inlinee IL
// inlArgInfo -- argument info for inlinee
// lclVarInfo -- var info for inlinee
//
// Returns:
// Tree for the argument's value. Often an inlinee-scoped temp
// GT_LCL_VAR but can be other tree kinds, if the argument
// expression from the caller can be directly substituted into the
// inlinee body.
//
// Notes:
// Must be used only for arguments -- use impInlineFetchLocal for
// inlinee locals.
//
// Direct substitution is performed when the formal argument cannot
// change value in the inlinee body (no starg or ldarga), and the
// actual argument expression's value cannot be changed if it is
// substituted it into the inlinee body.
//
// Even if an inlinee-scoped temp is returned here, it may later be
// "bashed" to a caller-supplied tree when arguments are actually
// passed (see fgInlinePrependStatements). Bashing can happen if
// the argument ends up being single use and other conditions are
// met. So the contents of the tree returned here may not end up
// being the ones ultimately used for the argument.
//
// This method will side effect inlArgInfo. It should only be called
// for actual uses of the argument in the inlinee.
GenTree* Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclVarInfo)
{
// Cache the relevant arg and lcl info for this argument.
// We will modify argInfo but not lclVarInfo.
InlArgInfo& argInfo = inlArgInfo[lclNum];
const InlLclVarInfo& lclInfo = lclVarInfo[lclNum];
const bool argCanBeModified = argInfo.argHasLdargaOp || argInfo.argHasStargOp;
const var_types lclTyp = lclInfo.lclTypeInfo;
GenTree* op1 = nullptr;
GenTree* argNode = argInfo.argNode->gtSkipPutArgType()->gtRetExprVal();
if (argInfo.argIsInvariant && !argCanBeModified)
{
// Directly substitute constants or addresses of locals
//
// Clone the constant. Note that we cannot directly use
// argNode in the trees even if !argInfo.argIsUsed as this
// would introduce aliasing between inlArgInfo[].argNode and
// impInlineExpr. Then gtFoldExpr() could change it, causing
// further references to the argument working off of the
// bashed copy.
op1 = gtCloneExpr(argNode);
PREFIX_ASSUME(op1 != nullptr);
argInfo.argTmpNum = BAD_VAR_NUM;
// We may need to retype to ensure we match the callee's view of the type.
// Otherwise callee-pass throughs of arguments can create return type
// mismatches that block inlining.
//
// Note argument type mismatches that prevent inlining should
// have been caught in impInlineInitVars.
if (op1->TypeGet() != lclTyp)
{
op1->gtType = genActualType(lclTyp);
}
}
else if (argInfo.argIsLclVar && !argCanBeModified && !argInfo.argHasCallerLocalRef)
{
// Directly substitute unaliased caller locals for args that cannot be modified
//
// Use the caller-supplied node if this is the first use.
op1 = argNode;
unsigned argLclNum = op1->AsLclVarCommon()->GetLclNum();
argInfo.argTmpNum = argLclNum;
// Use an equivalent copy if this is the second or subsequent
// use.
//
// Note argument type mismatches that prevent inlining should
// have been caught in impInlineInitVars. If inlining is not prevented
// but a cast is necessary, we similarly expect it to have been inserted then.
// So here we may have argument type mismatches that are benign, for instance
// passing a TYP_SHORT local (eg. normalized-on-load) as a TYP_INT arg.
// The exception is when the inlining means we should start tracking the argument.
if (argInfo.argIsUsed || ((lclTyp == TYP_BYREF) && (op1->TypeGet() != TYP_BYREF)))
{
assert(op1->gtOper == GT_LCL_VAR);
assert(lclNum == op1->AsLclVar()->gtLclILoffs);
// Create a new lcl var node - remember the argument lclNum
op1 = impCreateLocalNode(argLclNum DEBUGARG(op1->AsLclVar()->gtLclILoffs));
// Start tracking things as a byref if the parameter is a byref.
if (lclTyp == TYP_BYREF)
{
op1->gtType = TYP_BYREF;
}
}
}
else if (argInfo.argIsByRefToStructLocal && !argInfo.argHasStargOp)
{
/* Argument is a by-ref address to a struct, a normed struct, or its field.
In these cases, don't spill the byref to a local, simply clone the tree and use it.
This way we will increase the chance for this byref to be optimized away by
a subsequent "dereference" operation.
From Dev11 bug #139955: Argument node can also be TYP_I_IMPL if we've bashed the tree
(in impInlineInitVars()), if the arg has argHasLdargaOp as well as argIsByRefToStructLocal.
For example, if the caller is:
ldloca.s V_1 // V_1 is a local struct
call void Test.ILPart::RunLdargaOnPointerArg(int32*)
and the callee being inlined has:
.method public static void RunLdargaOnPointerArg(int32* ptrToInts) cil managed
ldarga.s ptrToInts
call void Test.FourInts::NotInlined_SetExpectedValuesThroughPointerToPointer(int32**)
then we change the argument tree (of "ldloca.s V_1") to TYP_I_IMPL to match the callee signature. We'll
soon afterwards reject the inlining anyway, since the tree we return isn't a GT_LCL_VAR.
*/
assert(argNode->TypeGet() == TYP_BYREF || argNode->TypeGet() == TYP_I_IMPL);
op1 = gtCloneExpr(argNode);
}
else
{
/* Argument is a complex expression - it must be evaluated into a temp */
if (argInfo.argHasTmp)
{
assert(argInfo.argIsUsed);
assert(argInfo.argTmpNum < lvaCount);
/* Create a new lcl var node - remember the argument lclNum */
op1 = gtNewLclvNode(argInfo.argTmpNum, genActualType(lclTyp));
/* This is the second or later use of the this argument,
so we have to use the temp (instead of the actual arg) */
argInfo.argBashTmpNode = nullptr;
}
else
{
/* First time use */
assert(!argInfo.argIsUsed);
/* Reserve a temp for the expression.
* Use a large size node as we may change it later */
const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Inlining Arg"));
lvaTable[tmpNum].lvType = lclTyp;
// For ref types, determine the type of the temp.
if (lclTyp == TYP_REF)
{
if (!argCanBeModified)
{
// If the arg can't be modified in the method
// body, use the type of the value, if
// known. Otherwise, use the declared type.
assert(lvaTable[tmpNum].lvSingleDef == 0);
lvaTable[tmpNum].lvSingleDef = 1;
JITDUMP("Marked V%02u as a single def temp\n", tmpNum);
lvaSetClass(tmpNum, argNode, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef());
}
else
{
// Arg might be modified, use the declared type of
// the argument.
lvaSetClass(tmpNum, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef());
}
}
assert(!lvaTable[tmpNum].IsAddressExposed());
if (argInfo.argHasLdargaOp)
{
lvaTable[tmpNum].lvHasLdAddrOp = 1;
}
if (lclInfo.lclVerTypeInfo.IsStruct())
{
if (varTypeIsStruct(lclTyp))
{
lvaSetStruct(tmpNum, lclInfo.lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */);
if (info.compIsVarArgs)
{
lvaSetStructUsedAsVarArg(tmpNum);
}
}
else
{
// This is a wrapped primitive. Make sure the verstate knows that
lvaTable[tmpNum].lvVerTypeInfo = lclInfo.lclVerTypeInfo;
}
}
argInfo.argHasTmp = true;
argInfo.argTmpNum = tmpNum;
// If we require strict exception order, then arguments must
// be evaluated in sequence before the body of the inlined method.
// So we need to evaluate them to a temp.
// Also, if arguments have global or local references, we need to
// evaluate them to a temp before the inlined body as the
// inlined body may be modifying the global ref.
// TODO-1stClassStructs: We currently do not reuse an existing lclVar
// if it is a struct, because it requires some additional handling.
if ((!varTypeIsStruct(lclTyp) && !argInfo.argHasSideEff && !argInfo.argHasGlobRef &&
!argInfo.argHasCallerLocalRef))
{
/* Get a *LARGE* LCL_VAR node */
op1 = gtNewLclLNode(tmpNum, genActualType(lclTyp) DEBUGARG(lclNum));
/* Record op1 as the very first use of this argument.
If there are no further uses of the arg, we may be
able to use the actual arg node instead of the temp.
If we do see any further uses, we will clear this. */
argInfo.argBashTmpNode = op1;
}
else
{
/* Get a small LCL_VAR node */
op1 = gtNewLclvNode(tmpNum, genActualType(lclTyp));
/* No bashing of this argument */
argInfo.argBashTmpNode = nullptr;
}
}
}
// Mark this argument as used.
argInfo.argIsUsed = true;
return op1;
}
/******************************************************************************
Is this the original "this" argument to the call being inlined?
Note that we do not inline methods with "starg 0", and so we do not need to
worry about it.
*/
bool Compiler::impInlineIsThis(GenTree* tree, InlArgInfo* inlArgInfo)
{
assert(compIsForInlining());
return (tree->gtOper == GT_LCL_VAR && tree->AsLclVarCommon()->GetLclNum() == inlArgInfo[0].argTmpNum);
}
//-----------------------------------------------------------------------------
// impInlineIsGuaranteedThisDerefBeforeAnySideEffects: Check if a dereference in
// the inlinee can guarantee that the "this" pointer is non-NULL.
//
// Arguments:
// additionalTree - a tree to check for side effects
// additionalCallArgs - a list of call args to check for side effects
// dereferencedAddress - address expression being dereferenced
// inlArgInfo - inlinee argument information
//
// Notes:
// If we haven't hit a branch or a side effect, and we are dereferencing
// from 'this' to access a field or make GTF_CALL_NULLCHECK call,
// then we can avoid a separate null pointer check.
//
// The importer stack and current statement list are searched for side effects.
// Trees that have been popped of the stack but haven't been appended to the
// statement list and have to be checked for side effects may be provided via
// additionalTree and additionalCallArgs.
//
bool Compiler::impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTree* additionalTree,
GenTreeCall::Use* additionalCallArgs,
GenTree* dereferencedAddress,
InlArgInfo* inlArgInfo)
{
assert(compIsForInlining());
assert(opts.OptEnabled(CLFLG_INLINING));
BasicBlock* block = compCurBB;
if (block != fgFirstBB)
{
return false;
}
if (!impInlineIsThis(dereferencedAddress, inlArgInfo))
{
return false;
}
if ((additionalTree != nullptr) && GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(additionalTree->gtFlags))
{
return false;
}
for (GenTreeCall::Use& use : GenTreeCall::UseList(additionalCallArgs))
{
if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(use.GetNode()->gtFlags))
{
return false;
}
}
for (Statement* stmt : StatementList(impStmtList))
{
GenTree* expr = stmt->GetRootNode();
if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(expr->gtFlags))
{
return false;
}
}
for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
{
GenTreeFlags stackTreeFlags = verCurrentState.esStack[level].val->gtFlags;
if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(stackTreeFlags))
{
return false;
}
}
return true;
}
//------------------------------------------------------------------------
// impMarkInlineCandidate: determine if this call can be subsequently inlined
//
// Arguments:
// callNode -- call under scrutiny
// exactContextHnd -- context handle for inlining
// exactContextNeedsRuntimeLookup -- true if context required runtime lookup
// callInfo -- call info from VM
//
// Notes:
// Mostly a wrapper for impMarkInlineCandidateHelper that also undoes
// guarded devirtualization for virtual calls where the method we'd
// devirtualize to cannot be inlined.
void Compiler::impMarkInlineCandidate(GenTree* callNode,
CORINFO_CONTEXT_HANDLE exactContextHnd,
bool exactContextNeedsRuntimeLookup,
CORINFO_CALL_INFO* callInfo)
{
GenTreeCall* call = callNode->AsCall();
// Do the actual evaluation
impMarkInlineCandidateHelper(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo);
// If this call is an inline candidate or is not a guarded devirtualization
// candidate, we're done.
if (call->IsInlineCandidate() || !call->IsGuardedDevirtualizationCandidate())
{
return;
}
// If we can't inline the call we'd guardedly devirtualize to,
// we undo the guarded devirtualization, as the benefit from
// just guarded devirtualization alone is likely not worth the
// extra jit time and code size.
//
// TODO: it is possibly interesting to allow this, but requires
// fixes elsewhere too...
JITDUMP("Revoking guarded devirtualization candidacy for call [%06u]: target method can't be inlined\n",
dspTreeID(call));
call->ClearGuardedDevirtualizationCandidate();
}
//------------------------------------------------------------------------
// impMarkInlineCandidateHelper: determine if this call can be subsequently
// inlined
//
// Arguments:
// callNode -- call under scrutiny
// exactContextHnd -- context handle for inlining
// exactContextNeedsRuntimeLookup -- true if context required runtime lookup
// callInfo -- call info from VM
//
// Notes:
// If callNode is an inline candidate, this method sets the flag
// GTF_CALL_INLINE_CANDIDATE, and ensures that helper methods have
// filled in the associated InlineCandidateInfo.
//
// If callNode is not an inline candidate, and the reason is
// something that is inherent to the method being called, the
// method may be marked as "noinline" to short-circuit any
// future assessments of calls to this method.
void Compiler::impMarkInlineCandidateHelper(GenTreeCall* call,
CORINFO_CONTEXT_HANDLE exactContextHnd,
bool exactContextNeedsRuntimeLookup,
CORINFO_CALL_INFO* callInfo)
{
// Let the strategy know there's another call
impInlineRoot()->m_inlineStrategy->NoteCall();
if (!opts.OptEnabled(CLFLG_INLINING))
{
/* XXX Mon 8/18/2008
* This assert is misleading. The caller does not ensure that we have CLFLG_INLINING set before
* calling impMarkInlineCandidate. However, if this assert trips it means that we're an inlinee and
* CLFLG_MINOPT is set. That doesn't make a lot of sense. If you hit this assert, work back and
* figure out why we did not set MAXOPT for this compile.
*/
assert(!compIsForInlining());
return;
}
if (compIsForImportOnly())
{
// Don't bother creating the inline candidate during verification.
// Otherwise the call to info.compCompHnd->canInline will trigger a recursive verification
// that leads to the creation of multiple instances of Compiler.
return;
}
InlineResult inlineResult(this, call, nullptr, "impMarkInlineCandidate");
// Don't inline if not optimizing root method
if (opts.compDbgCode)
{
inlineResult.NoteFatal(InlineObservation::CALLER_DEBUG_CODEGEN);
return;
}
// Don't inline if inlining into this method is disabled.
if (impInlineRoot()->m_inlineStrategy->IsInliningDisabled())
{
inlineResult.NoteFatal(InlineObservation::CALLER_IS_JIT_NOINLINE);
return;
}
// Don't inline into callers that use the NextCallReturnAddress intrinsic.
if (info.compHasNextCallRetAddr)
{
inlineResult.NoteFatal(InlineObservation::CALLER_USES_NEXT_CALL_RET_ADDR);
return;
}
// Inlining candidate determination needs to honor only IL tail prefix.
// Inlining takes precedence over implicit tail call optimization (if the call is not directly recursive).
if (call->IsTailPrefixedCall())
{
inlineResult.NoteFatal(InlineObservation::CALLSITE_EXPLICIT_TAIL_PREFIX);
return;
}
// Delegate Invoke method doesn't have a body and gets special cased instead.
// Don't even bother trying to inline it.
if (call->IsDelegateInvoke())
{
inlineResult.NoteFatal(InlineObservation::CALLEE_HAS_NO_BODY);
return;
}
// Tail recursion elimination takes precedence over inlining.
// TODO: We may want to do some of the additional checks from fgMorphCall
// here to reduce the chance we don't inline a call that won't be optimized
// as a fast tail call or turned into a loop.
if (gtIsRecursiveCall(call) && call->IsImplicitTailCall())
{
inlineResult.NoteFatal(InlineObservation::CALLSITE_IMPLICIT_REC_TAIL_CALL);
return;
}
if (call->IsVirtual())
{
// Allow guarded devirt calls to be treated as inline candidates,
// but reject all other virtual calls.
if (!call->IsGuardedDevirtualizationCandidate())
{
inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT);
return;
}
}
/* Ignore helper calls */
if (call->gtCallType == CT_HELPER)
{
assert(!call->IsGuardedDevirtualizationCandidate());
inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_CALL_TO_HELPER);
return;
}
/* Ignore indirect calls */
if (call->gtCallType == CT_INDIRECT)
{
inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT_MANAGED);
return;
}
/* I removed the check for BBJ_THROW. BBJ_THROW is usually marked as rarely run. This more or less
* restricts the inliner to non-expanding inlines. I removed the check to allow for non-expanding
* inlining in throw blocks. I should consider the same thing for catch and filter regions. */
CORINFO_METHOD_HANDLE fncHandle;
unsigned methAttr;
if (call->IsGuardedDevirtualizationCandidate())
{
if (call->gtGuardedDevirtualizationCandidateInfo->guardedMethodUnboxedEntryHandle != nullptr)
{
fncHandle = call->gtGuardedDevirtualizationCandidateInfo->guardedMethodUnboxedEntryHandle;
}
else
{
fncHandle = call->gtGuardedDevirtualizationCandidateInfo->guardedMethodHandle;
}
methAttr = info.compCompHnd->getMethodAttribs(fncHandle);
}
else
{
fncHandle = call->gtCallMethHnd;
// Reuse method flags from the original callInfo if possible
if (fncHandle == callInfo->hMethod)
{
methAttr = callInfo->methodFlags;
}
else
{
methAttr = info.compCompHnd->getMethodAttribs(fncHandle);
}
}
#ifdef DEBUG
if (compStressCompile(STRESS_FORCE_INLINE, 0))
{
methAttr |= CORINFO_FLG_FORCEINLINE;
}
#endif
// Check for COMPlus_AggressiveInlining
if (compDoAggressiveInlining)
{
methAttr |= CORINFO_FLG_FORCEINLINE;
}
if (!(methAttr & CORINFO_FLG_FORCEINLINE))
{
/* Don't bother inline blocks that are in the filter region */
if (bbInCatchHandlerILRange(compCurBB))
{
#ifdef DEBUG
if (verbose)
{
printf("\nWill not inline blocks that are in the catch handler region\n");
}
#endif
inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_CATCH);
return;
}
if (bbInFilterILRange(compCurBB))
{
#ifdef DEBUG
if (verbose)
{
printf("\nWill not inline blocks that are in the filter region\n");
}
#endif
inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_FILTER);
return;
}
}
/* Check if we tried to inline this method before */
if (methAttr & CORINFO_FLG_DONT_INLINE)
{
inlineResult.NoteFatal(InlineObservation::CALLEE_IS_NOINLINE);
return;
}
/* Cannot inline synchronized methods */
if (methAttr & CORINFO_FLG_SYNCH)
{
inlineResult.NoteFatal(InlineObservation::CALLEE_IS_SYNCHRONIZED);
return;
}
/* Check legality of PInvoke callsite (for inlining of marshalling code) */
if (methAttr & CORINFO_FLG_PINVOKE)
{
// See comment in impCheckForPInvokeCall
BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB;
if (!impCanPInvokeInlineCallSite(block))
{
inlineResult.NoteFatal(InlineObservation::CALLSITE_PINVOKE_EH);
return;
}
}
InlineCandidateInfo* inlineCandidateInfo = nullptr;
impCheckCanInline(call, fncHandle, methAttr, exactContextHnd, &inlineCandidateInfo, &inlineResult);
if (inlineResult.IsFailure())
{
return;
}
// The old value should be null OR this call should be a guarded devirtualization candidate.
assert((call->gtInlineCandidateInfo == nullptr) || call->IsGuardedDevirtualizationCandidate());
// The new value should not be null.
assert(inlineCandidateInfo != nullptr);
inlineCandidateInfo->exactContextNeedsRuntimeLookup = exactContextNeedsRuntimeLookup;
call->gtInlineCandidateInfo = inlineCandidateInfo;
// If we're in an inlinee compiler, and have a return spill temp, and this inline candidate
// is also a tail call candidate, it can use the same return spill temp.
//
if (compIsForInlining() && call->CanTailCall() &&
(impInlineInfo->inlineCandidateInfo->preexistingSpillTemp != BAD_VAR_NUM))
{
inlineCandidateInfo->preexistingSpillTemp = impInlineInfo->inlineCandidateInfo->preexistingSpillTemp;
JITDUMP("Inline candidate [%06u] can share spill temp V%02u\n", dspTreeID(call),
inlineCandidateInfo->preexistingSpillTemp);
}
// Mark the call node as inline candidate.
call->gtFlags |= GTF_CALL_INLINE_CANDIDATE;
// Let the strategy know there's another candidate.
impInlineRoot()->m_inlineStrategy->NoteCandidate();
// Since we're not actually inlining yet, and this call site is
// still just an inline candidate, there's nothing to report.
inlineResult.SetReported();
}
/******************************************************************************/
// Returns true if the given intrinsic will be implemented by target-specific
// instructions
bool Compiler::IsTargetIntrinsic(NamedIntrinsic intrinsicName)
{
#if defined(TARGET_XARCH)
switch (intrinsicName)
{
// AMD64/x86 has SSE2 instructions to directly compute sqrt/abs and SSE4.1
// instructions to directly compute round/ceiling/floor/truncate.
case NI_System_Math_Abs:
case NI_System_Math_Sqrt:
return true;
case NI_System_Math_Ceiling:
case NI_System_Math_Floor:
case NI_System_Math_Truncate:
case NI_System_Math_Round:
return compOpportunisticallyDependsOn(InstructionSet_SSE41);
case NI_System_Math_FusedMultiplyAdd:
return compOpportunisticallyDependsOn(InstructionSet_FMA);
default:
return false;
}
#elif defined(TARGET_ARM64)
switch (intrinsicName)
{
case NI_System_Math_Abs:
case NI_System_Math_Ceiling:
case NI_System_Math_Floor:
case NI_System_Math_Truncate:
case NI_System_Math_Round:
case NI_System_Math_Sqrt:
case NI_System_Math_Max:
case NI_System_Math_Min:
return true;
case NI_System_Math_FusedMultiplyAdd:
return compOpportunisticallyDependsOn(InstructionSet_AdvSimd);
default:
return false;
}
#elif defined(TARGET_ARM)
switch (intrinsicName)
{
case NI_System_Math_Abs:
case NI_System_Math_Round:
case NI_System_Math_Sqrt:
return true;
default:
return false;
}
#else
// TODO: This portion of logic is not implemented for other arch.
// The reason for returning true is that on all other arch the only intrinsic
// enabled are target intrinsics.
return true;
#endif
}
/******************************************************************************/
// Returns true if the given intrinsic will be implemented by calling System.Math
// methods.
bool Compiler::IsIntrinsicImplementedByUserCall(NamedIntrinsic intrinsicName)
{
// Currently, if a math intrinsic is not implemented by target-specific
// instructions, it will be implemented by a System.Math call. In the
// future, if we turn to implementing some of them with helper calls,
// this predicate needs to be revisited.
return !IsTargetIntrinsic(intrinsicName);
}
bool Compiler::IsMathIntrinsic(NamedIntrinsic intrinsicName)
{
switch (intrinsicName)
{
case NI_System_Math_Abs:
case NI_System_Math_Acos:
case NI_System_Math_Acosh:
case NI_System_Math_Asin:
case NI_System_Math_Asinh:
case NI_System_Math_Atan:
case NI_System_Math_Atanh:
case NI_System_Math_Atan2:
case NI_System_Math_Cbrt:
case NI_System_Math_Ceiling:
case NI_System_Math_Cos:
case NI_System_Math_Cosh:
case NI_System_Math_Exp:
case NI_System_Math_Floor:
case NI_System_Math_FMod:
case NI_System_Math_FusedMultiplyAdd:
case NI_System_Math_ILogB:
case NI_System_Math_Log:
case NI_System_Math_Log2:
case NI_System_Math_Log10:
case NI_System_Math_Max:
case NI_System_Math_Min:
case NI_System_Math_Pow:
case NI_System_Math_Round:
case NI_System_Math_Sin:
case NI_System_Math_Sinh:
case NI_System_Math_Sqrt:
case NI_System_Math_Tan:
case NI_System_Math_Tanh:
case NI_System_Math_Truncate:
{
assert((intrinsicName > NI_SYSTEM_MATH_START) && (intrinsicName < NI_SYSTEM_MATH_END));
return true;
}
default:
{
assert((intrinsicName < NI_SYSTEM_MATH_START) || (intrinsicName > NI_SYSTEM_MATH_END));
return false;
}
}
}
bool Compiler::IsMathIntrinsic(GenTree* tree)
{
return (tree->OperGet() == GT_INTRINSIC) && IsMathIntrinsic(tree->AsIntrinsic()->gtIntrinsicName);
}
//------------------------------------------------------------------------
// impDevirtualizeCall: Attempt to change a virtual vtable call into a
// normal call
//
// Arguments:
// call -- the call node to examine/modify
// pResolvedToken -- [IN] the resolved token used to create the call. Used for R2R.
// method -- [IN/OUT] the method handle for call. Updated iff call devirtualized.
// methodFlags -- [IN/OUT] flags for the method to call. Updated iff call devirtualized.
// pContextHandle -- [IN/OUT] context handle for the call. Updated iff call devirtualized.
// pExactContextHandle -- [OUT] updated context handle iff call devirtualized
// isLateDevirtualization -- if devirtualization is happening after importation
// isExplicitTailCalll -- [IN] true if we plan on using an explicit tail call
// ilOffset -- IL offset of the call
//
// Notes:
// Virtual calls in IL will always "invoke" the base class method.
//
// This transformation looks for evidence that the type of 'this'
// in the call is exactly known, is a final class or would invoke
// a final method, and if that and other safety checks pan out,
// modifies the call and the call info to create a direct call.
//
// This transformation is initially done in the importer and not
// in some subsequent optimization pass because we want it to be
// upstream of inline candidate identification.
//
// However, later phases may supply improved type information that
// can enable further devirtualization. We currently reinvoke this
// code after inlining, if the return value of the inlined call is
// the 'this obj' of a subsequent virtual call.
//
// If devirtualization succeeds and the call's this object is a
// (boxed) value type, the jit will ask the EE for the unboxed entry
// point. If this exists, the jit will invoke the unboxed entry
// on the box payload. In addition if the boxing operation is
// visible to the jit and the call is the only consmer of the box,
// the jit will try analyze the box to see if the call can be instead
// instead made on a local copy. If that is doable, the call is
// updated to invoke the unboxed entry on the local copy and the
// boxing operation is removed.
//
// When guarded devirtualization is enabled, this method will mark
// calls as guarded devirtualization candidates, if the type of `this`
// is not exactly known, and there is a plausible guess for the type.
void Compiler::impDevirtualizeCall(GenTreeCall* call,
CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_METHOD_HANDLE* method,
unsigned* methodFlags,
CORINFO_CONTEXT_HANDLE* pContextHandle,
CORINFO_CONTEXT_HANDLE* pExactContextHandle,
bool isLateDevirtualization,
bool isExplicitTailCall,
IL_OFFSET ilOffset)
{
assert(call != nullptr);
assert(method != nullptr);
assert(methodFlags != nullptr);
assert(pContextHandle != nullptr);
// This should be a virtual vtable or virtual stub call.
//
assert(call->IsVirtual());
// Possibly instrument. Note for OSR+PGO we will instrument when
// optimizing and (currently) won't devirtualize. We may want
// to revisit -- if we can devirtualize we should be able to
// suppress the probe.
//
// We strip BBINSTR from inlinees currently, so we'll only
// do this for the root method calls.
//
if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR))
{
assert(opts.OptimizationDisabled() || opts.IsOSR());
assert(!compIsForInlining());
// During importation, optionally flag this block as one that
// contains calls requiring class profiling. Ideally perhaps
// we'd just keep track of the calls themselves, so we don't
// have to search for them later.
//
if ((call->gtCallType != CT_INDIRECT) && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR) &&
!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT) && (JitConfig.JitClassProfiling() > 0) &&
!isLateDevirtualization)
{
JITDUMP("\n ... marking [%06u] in " FMT_BB " for class profile instrumentation\n", dspTreeID(call),
compCurBB->bbNum);
ClassProfileCandidateInfo* pInfo = new (this, CMK_Inlining) ClassProfileCandidateInfo;
// Record some info needed for the class profiling probe.
//
pInfo->ilOffset = ilOffset;
pInfo->probeIndex = info.compClassProbeCount++;
call->gtClassProfileCandidateInfo = pInfo;
// Flag block as needing scrutiny
//
compCurBB->bbFlags |= BBF_HAS_CLASS_PROFILE;
}
return;
}
// Bail if optimizations are disabled.
if (opts.OptimizationDisabled())
{
return;
}
#if defined(DEBUG)
// Bail if devirt is disabled.
if (JitConfig.JitEnableDevirtualization() == 0)
{
return;
}
// Optionally, print info on devirtualization
Compiler* const rootCompiler = impInlineRoot();
const bool doPrint = JitConfig.JitPrintDevirtualizedMethods().contains(rootCompiler->info.compMethodName,
rootCompiler->info.compClassName,
&rootCompiler->info.compMethodInfo->args);
#endif // DEBUG
// Fetch information about the virtual method we're calling.
CORINFO_METHOD_HANDLE baseMethod = *method;
unsigned baseMethodAttribs = *methodFlags;
if (baseMethodAttribs == 0)
{
// For late devirt we may not have method attributes, so fetch them.
baseMethodAttribs = info.compCompHnd->getMethodAttribs(baseMethod);
}
else
{
#if defined(DEBUG)
// Validate that callInfo has up to date method flags
const DWORD freshBaseMethodAttribs = info.compCompHnd->getMethodAttribs(baseMethod);
// All the base method attributes should agree, save that
// CORINFO_FLG_DONT_INLINE may have changed from 0 to 1
// because of concurrent jitting activity.
//
// Note we don't look at this particular flag bit below, and
// later on (if we do try and inline) we will rediscover why
// the method can't be inlined, so there's no danger here in
// seeing this particular flag bit in different states between
// the cached and fresh values.
if ((freshBaseMethodAttribs & ~CORINFO_FLG_DONT_INLINE) != (baseMethodAttribs & ~CORINFO_FLG_DONT_INLINE))
{
assert(!"mismatched method attributes");
}
#endif // DEBUG
}
// In R2R mode, we might see virtual stub calls to
// non-virtuals. For instance cases where the non-virtual method
// is in a different assembly but is called via CALLVIRT. For
// verison resilience we must allow for the fact that the method
// might become virtual in some update.
//
// In non-R2R modes CALLVIRT <nonvirtual> will be turned into a
// regular call+nullcheck upstream, so we won't reach this
// point.
if ((baseMethodAttribs & CORINFO_FLG_VIRTUAL) == 0)
{
assert(call->IsVirtualStub());
assert(opts.IsReadyToRun());
JITDUMP("\nimpDevirtualizeCall: [R2R] base method not virtual, sorry\n");
return;
}
// Fetch information about the class that introduced the virtual method.
CORINFO_CLASS_HANDLE baseClass = info.compCompHnd->getMethodClass(baseMethod);
const DWORD baseClassAttribs = info.compCompHnd->getClassAttribs(baseClass);
// Is the call an interface call?
const bool isInterface = (baseClassAttribs & CORINFO_FLG_INTERFACE) != 0;
// See what we know about the type of 'this' in the call.
GenTree* thisObj = call->gtCallThisArg->GetNode()->gtEffectiveVal(false);
bool isExact = false;
bool objIsNonNull = false;
CORINFO_CLASS_HANDLE objClass = gtGetClassHandle(thisObj, &isExact, &objIsNonNull);
// Bail if we know nothing.
if (objClass == NO_CLASS_HANDLE)
{
JITDUMP("\nimpDevirtualizeCall: no type available (op=%s)\n", GenTree::OpName(thisObj->OperGet()));
// Don't try guarded devirtualiztion when we're doing late devirtualization.
//
if (isLateDevirtualization)
{
JITDUMP("No guarded devirt during late devirtualization\n");
return;
}
considerGuardedDevirtualization(call, ilOffset, isInterface, baseMethod, baseClass,
pContextHandle DEBUGARG(objClass) DEBUGARG("unknown"));
return;
}
// If the objClass is sealed (final), then we may be able to devirtualize.
const DWORD objClassAttribs = info.compCompHnd->getClassAttribs(objClass);
const bool objClassIsFinal = (objClassAttribs & CORINFO_FLG_FINAL) != 0;
#if defined(DEBUG)
const char* callKind = isInterface ? "interface" : "virtual";
const char* objClassNote = "[?]";
const char* objClassName = "?objClass";
const char* baseClassName = "?baseClass";
const char* baseMethodName = "?baseMethod";
if (verbose || doPrint)
{
objClassNote = isExact ? " [exact]" : objClassIsFinal ? " [final]" : "";
objClassName = eeGetClassName(objClass);
baseClassName = eeGetClassName(baseClass);
baseMethodName = eeGetMethodName(baseMethod, nullptr);
if (verbose)
{
printf("\nimpDevirtualizeCall: Trying to devirtualize %s call:\n"
" class for 'this' is %s%s (attrib %08x)\n"
" base method is %s::%s\n",
callKind, objClassName, objClassNote, objClassAttribs, baseClassName, baseMethodName);
}
}
#endif // defined(DEBUG)
// See if the jit's best type for `obj` is an interface.
// See for instance System.ValueTuple`8::GetHashCode, where lcl 0 is System.IValueTupleInternal
// IL_021d: ldloc.0
// IL_021e: callvirt instance int32 System.Object::GetHashCode()
//
// If so, we can't devirtualize, but we may be able to do guarded devirtualization.
//
if ((objClassAttribs & CORINFO_FLG_INTERFACE) != 0)
{
// Don't try guarded devirtualiztion when we're doing late devirtualization.
//
if (isLateDevirtualization)
{
JITDUMP("No guarded devirt during late devirtualization\n");
return;
}
considerGuardedDevirtualization(call, ilOffset, isInterface, baseMethod, baseClass,
pContextHandle DEBUGARG(objClass) DEBUGARG(objClassName));
return;
}
// If we get this far, the jit has a lower bound class type for the `this` object being used for dispatch.
// It may or may not know enough to devirtualize...
if (isInterface)
{
assert(call->IsVirtualStub());
JITDUMP("--- base class is interface\n");
}
// Fetch the method that would be called based on the declared type of 'this',
// and prepare to fetch the method attributes.
//
CORINFO_DEVIRTUALIZATION_INFO dvInfo;
dvInfo.virtualMethod = baseMethod;
dvInfo.objClass = objClass;
dvInfo.context = *pContextHandle;
dvInfo.detail = CORINFO_DEVIRTUALIZATION_UNKNOWN;
dvInfo.pResolvedTokenVirtualMethod = pResolvedToken;
info.compCompHnd->resolveVirtualMethod(&dvInfo);
CORINFO_METHOD_HANDLE derivedMethod = dvInfo.devirtualizedMethod;
CORINFO_CONTEXT_HANDLE exactContext = dvInfo.exactContext;
CORINFO_CLASS_HANDLE derivedClass = NO_CLASS_HANDLE;
CORINFO_RESOLVED_TOKEN* pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedMethod;
if (derivedMethod != nullptr)
{
assert(exactContext != nullptr);
assert(((size_t)exactContext & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS);
derivedClass = (CORINFO_CLASS_HANDLE)((size_t)exactContext & ~CORINFO_CONTEXTFLAGS_MASK);
}
DWORD derivedMethodAttribs = 0;
bool derivedMethodIsFinal = false;
bool canDevirtualize = false;
#if defined(DEBUG)
const char* derivedClassName = "?derivedClass";
const char* derivedMethodName = "?derivedMethod";
const char* note = "inexact or not final";
#endif
// If we failed to get a method handle, we can't directly devirtualize.
//
// This can happen when prejitting, if the devirtualization crosses
// servicing bubble boundaries, or if objClass is a shared class.
//
if (derivedMethod == nullptr)
{
JITDUMP("--- no derived method: %s\n", devirtualizationDetailToString(dvInfo.detail));
}
else
{
// Fetch method attributes to see if method is marked final.
derivedMethodAttribs = info.compCompHnd->getMethodAttribs(derivedMethod);
derivedMethodIsFinal = ((derivedMethodAttribs & CORINFO_FLG_FINAL) != 0);
#if defined(DEBUG)
if (isExact)
{
note = "exact";
}
else if (objClassIsFinal)
{
note = "final class";
}
else if (derivedMethodIsFinal)
{
note = "final method";
}
if (verbose || doPrint)
{
derivedMethodName = eeGetMethodName(derivedMethod, nullptr);
derivedClassName = eeGetClassName(derivedClass);
if (verbose)
{
printf(" devirt to %s::%s -- %s\n", derivedClassName, derivedMethodName, note);
gtDispTree(call);
}
}
#endif // defined(DEBUG)
canDevirtualize = isExact || objClassIsFinal || (!isInterface && derivedMethodIsFinal);
}
// We still might be able to do a guarded devirtualization.
// Note the call might be an interface call or a virtual call.
//
if (!canDevirtualize)
{
JITDUMP(" Class not final or exact%s\n", isInterface ? "" : ", and method not final");
#if defined(DEBUG)
// If we know the object type exactly, we generally expect we can devirtualize.
// (don't when doing late devirt as we won't have an owner type (yet))
//
if (!isLateDevirtualization && (isExact || objClassIsFinal) && JitConfig.JitNoteFailedExactDevirtualization())
{
printf("@@@ Exact/Final devirt failure in %s at [%06u] $ %s\n", info.compFullName, dspTreeID(call),
devirtualizationDetailToString(dvInfo.detail));
}
#endif
// Don't try guarded devirtualiztion if we're doing late devirtualization.
//
if (isLateDevirtualization)
{
JITDUMP("No guarded devirt during late devirtualization\n");
return;
}
considerGuardedDevirtualization(call, ilOffset, isInterface, baseMethod, baseClass,
pContextHandle DEBUGARG(objClass) DEBUGARG(objClassName));
return;
}
// All checks done. Time to transform the call.
//
// We should always have an exact class context.
//
// Note that wouldnt' be true if the runtime side supported array interface devirt,
// the resulting method would be a generic method of the non-generic SZArrayHelper class.
//
assert(canDevirtualize);
JITDUMP(" %s; can devirtualize\n", note);
// Make the updates.
call->gtFlags &= ~GTF_CALL_VIRT_VTABLE;
call->gtFlags &= ~GTF_CALL_VIRT_STUB;
call->gtCallMethHnd = derivedMethod;
call->gtCallType = CT_USER_FUNC;
call->gtCallMoreFlags |= GTF_CALL_M_DEVIRTUALIZED;
// Virtual calls include an implicit null check, which we may
// now need to make explicit.
if (!objIsNonNull)
{
call->gtFlags |= GTF_CALL_NULLCHECK;
}
// Clear the inline candidate info (may be non-null since
// it's a union field used for other things by virtual
// stubs)
call->gtInlineCandidateInfo = nullptr;
#if defined(DEBUG)
if (verbose)
{
printf("... after devirt...\n");
gtDispTree(call);
}
if (doPrint)
{
printf("Devirtualized %s call to %s:%s; now direct call to %s:%s [%s]\n", callKind, baseClassName,
baseMethodName, derivedClassName, derivedMethodName, note);
}
// If we successfully devirtualized based on an exact or final class,
// and we have dynamic PGO data describing the likely class, make sure they agree.
//
// If pgo source is not dynamic we may see likely classes from other versions of this code
// where types had different properties.
//
// If method is an inlinee we may be specializing to a class that wasn't seen at runtime.
//
const bool canSensiblyCheck =
(isExact || objClassIsFinal) && (fgPgoSource == ICorJitInfo::PgoSource::Dynamic) && !compIsForInlining();
if (JitConfig.JitCrossCheckDevirtualizationAndPGO() && canSensiblyCheck)
{
// We only can handle a single likely class for now
const int maxLikelyClasses = 1;
LikelyClassRecord likelyClasses[maxLikelyClasses];
UINT32 numberOfClasses =
getLikelyClasses(likelyClasses, maxLikelyClasses, fgPgoSchema, fgPgoSchemaCount, fgPgoData, ilOffset);
UINT32 likelihood = likelyClasses[0].likelihood;
CORINFO_CLASS_HANDLE likelyClass = likelyClasses[0].clsHandle;
if (numberOfClasses > 0)
{
// PGO had better agree the class we devirtualized to is plausible.
//
if (likelyClass != derivedClass)
{
// Managed type system may report different addresses for a class handle
// at different times....?
//
// Also, AOT may have a more nuanced notion of class equality.
//
if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT))
{
bool mismatch = true;
// derivedClass will be the introducer of derived method, so it's possible
// likelyClass is a non-overriding subclass. Check up the hierarchy.
//
CORINFO_CLASS_HANDLE parentClass = likelyClass;
while (parentClass != NO_CLASS_HANDLE)
{
if (parentClass == derivedClass)
{
mismatch = false;
break;
}
parentClass = info.compCompHnd->getParentType(parentClass);
}
if (mismatch || (numberOfClasses != 1) || (likelihood != 100))
{
printf("@@@ Likely %p (%s) != Derived %p (%s) [n=%u, l=%u, il=%u] in %s \n", likelyClass,
eeGetClassName(likelyClass), derivedClass, eeGetClassName(derivedClass), numberOfClasses,
likelihood, ilOffset, info.compFullName);
}
assert(!(mismatch || (numberOfClasses != 1) || (likelihood != 100)));
}
}
}
}
#endif // defined(DEBUG)
// If the 'this' object is a value class, see if we can rework the call to invoke the
// unboxed entry. This effectively inlines the normally un-inlineable wrapper stub
// and exposes the potentially inlinable unboxed entry method.
//
// We won't optimize explicit tail calls, as ensuring we get the right tail call info
// is tricky (we'd need to pass an updated sig and resolved token back to some callers).
//
// Note we may not have a derived class in some cases (eg interface call on an array)
//
if (info.compCompHnd->isValueClass(derivedClass))
{
if (isExplicitTailCall)
{
JITDUMP("Have a direct explicit tail call to boxed entry point; can't optimize further\n");
}
else
{
JITDUMP("Have a direct call to boxed entry point. Trying to optimize to call an unboxed entry point\n");
// Note for some shared methods the unboxed entry point requires an extra parameter.
bool requiresInstMethodTableArg = false;
CORINFO_METHOD_HANDLE unboxedEntryMethod =
info.compCompHnd->getUnboxedEntry(derivedMethod, &requiresInstMethodTableArg);
if (unboxedEntryMethod != nullptr)
{
bool optimizedTheBox = false;
// If the 'this' object is a local box, see if we can revise things
// to not require boxing.
//
if (thisObj->IsBoxedValue() && !isExplicitTailCall)
{
// Since the call is the only consumer of the box, we know the box can't escape
// since it is being passed an interior pointer.
//
// So, revise the box to simply create a local copy, use the address of that copy
// as the this pointer, and update the entry point to the unboxed entry.
//
// Ideally, we then inline the boxed method and and if it turns out not to modify
// the copy, we can undo the copy too.
if (requiresInstMethodTableArg)
{
// Perform a trial box removal and ask for the type handle tree that fed the box.
//
JITDUMP("Unboxed entry needs method table arg...\n");
GenTree* methodTableArg =
gtTryRemoveBoxUpstreamEffects(thisObj, BR_DONT_REMOVE_WANT_TYPE_HANDLE);
if (methodTableArg != nullptr)
{
// If that worked, turn the box into a copy to a local var
//
JITDUMP("Found suitable method table arg tree [%06u]\n", dspTreeID(methodTableArg));
GenTree* localCopyThis = gtTryRemoveBoxUpstreamEffects(thisObj, BR_MAKE_LOCAL_COPY);
if (localCopyThis != nullptr)
{
// Pass the local var as this and the type handle as a new arg
//
JITDUMP("Success! invoking unboxed entry point on local copy, and passing method table "
"arg\n");
call->gtCallThisArg = gtNewCallArgs(localCopyThis);
call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED;
// Prepend for R2L arg passing or empty L2R passing
// Append for non-empty L2R
//
if ((Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) || (call->gtCallArgs == nullptr))
{
// If there's a ret buf, the method table is the second arg.
//
if (call->HasRetBufArg())
{
gtInsertNewCallArgAfter(methodTableArg, call->gtCallArgs);
}
else
{
call->gtCallArgs = gtPrependNewCallArg(methodTableArg, call->gtCallArgs);
}
}
else
{
GenTreeCall::Use* beforeArg = call->gtCallArgs;
while (beforeArg->GetNext() != nullptr)
{
beforeArg = beforeArg->GetNext();
}
beforeArg->SetNext(gtNewCallArgs(methodTableArg));
}
call->gtCallMethHnd = unboxedEntryMethod;
derivedMethod = unboxedEntryMethod;
pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedUnboxedMethod;
// Method attributes will differ because unboxed entry point is shared
//
const DWORD unboxedMethodAttribs =
info.compCompHnd->getMethodAttribs(unboxedEntryMethod);
JITDUMP("Updating method attribs from 0x%08x to 0x%08x\n", derivedMethodAttribs,
unboxedMethodAttribs);
derivedMethodAttribs = unboxedMethodAttribs;
optimizedTheBox = true;
}
else
{
JITDUMP("Sorry, failed to undo the box -- can't convert to local copy\n");
}
}
else
{
JITDUMP("Sorry, failed to undo the box -- can't find method table arg\n");
}
}
else
{
JITDUMP("Found unboxed entry point, trying to simplify box to a local copy\n");
GenTree* localCopyThis = gtTryRemoveBoxUpstreamEffects(thisObj, BR_MAKE_LOCAL_COPY);
if (localCopyThis != nullptr)
{
JITDUMP("Success! invoking unboxed entry point on local copy\n");
call->gtCallThisArg = gtNewCallArgs(localCopyThis);
call->gtCallMethHnd = unboxedEntryMethod;
call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED;
derivedMethod = unboxedEntryMethod;
pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedUnboxedMethod;
optimizedTheBox = true;
}
else
{
JITDUMP("Sorry, failed to undo the box\n");
}
}
if (optimizedTheBox)
{
#if FEATURE_TAILCALL_OPT
if (call->IsImplicitTailCall())
{
JITDUMP("Clearing the implicit tail call flag\n");
// If set, we clear the implicit tail call flag
// as we just introduced a new address taken local variable
//
call->gtCallMoreFlags &= ~GTF_CALL_M_IMPLICIT_TAILCALL;
}
#endif // FEATURE_TAILCALL_OPT
}
}
if (!optimizedTheBox)
{
// If we get here, we have a boxed value class that either wasn't boxed
// locally, or was boxed locally but we were unable to remove the box for
// various reasons.
//
// We can still update the call to invoke the unboxed entry, if the
// boxed value is simple.
//
if (requiresInstMethodTableArg)
{
// Get the method table from the boxed object.
//
GenTree* const thisArg = call->gtCallThisArg->GetNode();
GenTree* const clonedThisArg = gtClone(thisArg);
if (clonedThisArg == nullptr)
{
JITDUMP(
"unboxed entry needs MT arg, but `this` was too complex to clone. Deferring update.\n");
}
else
{
JITDUMP("revising call to invoke unboxed entry with additional method table arg\n");
GenTree* const methodTableArg = gtNewMethodTableLookup(clonedThisArg);
// Update the 'this' pointer to refer to the box payload
//
GenTree* const payloadOffset = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL);
GenTree* const boxPayload = gtNewOperNode(GT_ADD, TYP_BYREF, thisArg, payloadOffset);
call->gtCallThisArg = gtNewCallArgs(boxPayload);
call->gtCallMethHnd = unboxedEntryMethod;
call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED;
// Method attributes will differ because unboxed entry point is shared
//
const DWORD unboxedMethodAttribs = info.compCompHnd->getMethodAttribs(unboxedEntryMethod);
JITDUMP("Updating method attribs from 0x%08x to 0x%08x\n", derivedMethodAttribs,
unboxedMethodAttribs);
derivedMethod = unboxedEntryMethod;
pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedUnboxedMethod;
derivedMethodAttribs = unboxedMethodAttribs;
// Add the method table argument.
//
// Prepend for R2L arg passing or empty L2R passing
// Append for non-empty L2R
//
if ((Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) || (call->gtCallArgs == nullptr))
{
// If there's a ret buf, the method table is the second arg.
//
if (call->HasRetBufArg())
{
gtInsertNewCallArgAfter(methodTableArg, call->gtCallArgs);
}
else
{
call->gtCallArgs = gtPrependNewCallArg(methodTableArg, call->gtCallArgs);
}
}
else
{
GenTreeCall::Use* beforeArg = call->gtCallArgs;
while (beforeArg->GetNext() != nullptr)
{
beforeArg = beforeArg->GetNext();
}
beforeArg->SetNext(gtNewCallArgs(methodTableArg));
}
}
}
else
{
JITDUMP("revising call to invoke unboxed entry\n");
GenTree* const thisArg = call->gtCallThisArg->GetNode();
GenTree* const payloadOffset = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL);
GenTree* const boxPayload = gtNewOperNode(GT_ADD, TYP_BYREF, thisArg, payloadOffset);
call->gtCallThisArg = gtNewCallArgs(boxPayload);
call->gtCallMethHnd = unboxedEntryMethod;
call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED;
derivedMethod = unboxedEntryMethod;
pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedUnboxedMethod;
}
}
}
else
{
// Many of the low-level methods on value classes won't have unboxed entries,
// as they need access to the type of the object.
//
// Note this may be a cue for us to stack allocate the boxed object, since
// we probably know that these objects don't escape.
JITDUMP("Sorry, failed to find unboxed entry point\n");
}
}
}
// Need to update call info too.
//
*method = derivedMethod;
*methodFlags = derivedMethodAttribs;
// Update context handle
//
*pContextHandle = MAKE_METHODCONTEXT(derivedMethod);
// Update exact context handle.
//
if (pExactContextHandle != nullptr)
{
*pExactContextHandle = MAKE_CLASSCONTEXT(derivedClass);
}
#ifdef FEATURE_READYTORUN
if (opts.IsReadyToRun())
{
// For R2R, getCallInfo triggers bookkeeping on the zap
// side and acquires the actual symbol to call so we need to call it here.
// Look up the new call info.
CORINFO_CALL_INFO derivedCallInfo;
eeGetCallInfo(pDerivedResolvedToken, nullptr, CORINFO_CALLINFO_ALLOWINSTPARAM, &derivedCallInfo);
// Update the call.
call->gtCallMoreFlags &= ~GTF_CALL_M_VIRTSTUB_REL_INDIRECT;
call->gtCallMoreFlags &= ~GTF_CALL_M_R2R_REL_INDIRECT;
call->setEntryPoint(derivedCallInfo.codePointerLookup.constLookup);
}
#endif // FEATURE_READYTORUN
}
//------------------------------------------------------------------------
// impGetSpecialIntrinsicExactReturnType: Look for special cases where a call
// to an intrinsic returns an exact type
//
// Arguments:
// methodHnd -- handle for the special intrinsic method
//
// Returns:
// Exact class handle returned by the intrinsic call, if known.
// Nullptr if not known, or not likely to lead to beneficial optimization.
CORINFO_CLASS_HANDLE Compiler::impGetSpecialIntrinsicExactReturnType(CORINFO_METHOD_HANDLE methodHnd)
{
JITDUMP("Special intrinsic: looking for exact type returned by %s\n", eeGetMethodFullName(methodHnd));
CORINFO_CLASS_HANDLE result = nullptr;
// See what intrinisc we have...
const NamedIntrinsic ni = lookupNamedIntrinsic(methodHnd);
switch (ni)
{
case NI_System_Collections_Generic_Comparer_get_Default:
case NI_System_Collections_Generic_EqualityComparer_get_Default:
{
// Expect one class generic parameter; figure out which it is.
CORINFO_SIG_INFO sig;
info.compCompHnd->getMethodSig(methodHnd, &sig);
assert(sig.sigInst.classInstCount == 1);
CORINFO_CLASS_HANDLE typeHnd = sig.sigInst.classInst[0];
assert(typeHnd != nullptr);
// Lookup can incorrect when we have __Canon as it won't appear
// to implement any interface types.
//
// And if we do not have a final type, devirt & inlining is
// unlikely to result in much simplification.
//
// We can use CORINFO_FLG_FINAL to screen out both of these cases.
const DWORD typeAttribs = info.compCompHnd->getClassAttribs(typeHnd);
const bool isFinalType = ((typeAttribs & CORINFO_FLG_FINAL) != 0);
if (isFinalType)
{
if (ni == NI_System_Collections_Generic_EqualityComparer_get_Default)
{
result = info.compCompHnd->getDefaultEqualityComparerClass(typeHnd);
}
else
{
assert(ni == NI_System_Collections_Generic_Comparer_get_Default);
result = info.compCompHnd->getDefaultComparerClass(typeHnd);
}
JITDUMP("Special intrinsic for type %s: return type is %s\n", eeGetClassName(typeHnd),
result != nullptr ? eeGetClassName(result) : "unknown");
}
else
{
JITDUMP("Special intrinsic for type %s: type not final, so deferring opt\n", eeGetClassName(typeHnd));
}
break;
}
default:
{
JITDUMP("This special intrinsic not handled, sorry...\n");
break;
}
}
return result;
}
//------------------------------------------------------------------------
// impAllocateMethodPointerInfo: create methodPointerInfo into jit-allocated memory and init it.
//
// Arguments:
// token - init value for the allocated token.
// tokenConstrained - init value for the constraint associated with the token
//
// Return Value:
// pointer to token into jit-allocated memory.
methodPointerInfo* Compiler::impAllocateMethodPointerInfo(const CORINFO_RESOLVED_TOKEN& token, mdToken tokenConstrained)
{
methodPointerInfo* memory = getAllocator(CMK_Unknown).allocate<methodPointerInfo>(1);
memory->m_token = token;
memory->m_tokenConstraint = tokenConstrained;
return memory;
}
//------------------------------------------------------------------------
// SpillRetExprHelper: iterate through arguments tree and spill ret_expr to local variables.
//
class SpillRetExprHelper
{
public:
SpillRetExprHelper(Compiler* comp) : comp(comp)
{
}
void StoreRetExprResultsInArgs(GenTreeCall* call)
{
for (GenTreeCall::Use& use : call->Args())
{
comp->fgWalkTreePre(&use.NodeRef(), SpillRetExprVisitor, this);
}
if (call->gtCallThisArg != nullptr)
{
comp->fgWalkTreePre(&call->gtCallThisArg->NodeRef(), SpillRetExprVisitor, this);
}
}
private:
static Compiler::fgWalkResult SpillRetExprVisitor(GenTree** pTree, Compiler::fgWalkData* fgWalkPre)
{
assert((pTree != nullptr) && (*pTree != nullptr));
GenTree* tree = *pTree;
if ((tree->gtFlags & GTF_CALL) == 0)
{
// Trees with ret_expr are marked as GTF_CALL.
return Compiler::WALK_SKIP_SUBTREES;
}
if (tree->OperGet() == GT_RET_EXPR)
{
SpillRetExprHelper* walker = static_cast<SpillRetExprHelper*>(fgWalkPre->pCallbackData);
walker->StoreRetExprAsLocalVar(pTree);
}
return Compiler::WALK_CONTINUE;
}
void StoreRetExprAsLocalVar(GenTree** pRetExpr)
{
GenTree* retExpr = *pRetExpr;
assert(retExpr->OperGet() == GT_RET_EXPR);
const unsigned tmp = comp->lvaGrabTemp(true DEBUGARG("spilling ret_expr"));
JITDUMP("Storing return expression [%06u] to a local var V%02u.\n", comp->dspTreeID(retExpr), tmp);
comp->impAssignTempGen(tmp, retExpr, (unsigned)Compiler::CHECK_SPILL_NONE);
*pRetExpr = comp->gtNewLclvNode(tmp, retExpr->TypeGet());
if (retExpr->TypeGet() == TYP_REF)
{
assert(comp->lvaTable[tmp].lvSingleDef == 0);
comp->lvaTable[tmp].lvSingleDef = 1;
JITDUMP("Marked V%02u as a single def temp\n", tmp);
bool isExact = false;
bool isNonNull = false;
CORINFO_CLASS_HANDLE retClsHnd = comp->gtGetClassHandle(retExpr, &isExact, &isNonNull);
if (retClsHnd != nullptr)
{
comp->lvaSetClass(tmp, retClsHnd, isExact);
}
}
}
private:
Compiler* comp;
};
//------------------------------------------------------------------------
// addFatPointerCandidate: mark the call and the method, that they have a fat pointer candidate.
// Spill ret_expr in the call node, because they can't be cloned.
//
// Arguments:
// call - fat calli candidate
//
void Compiler::addFatPointerCandidate(GenTreeCall* call)
{
JITDUMP("Marking call [%06u] as fat pointer candidate\n", dspTreeID(call));
setMethodHasFatPointer();
call->SetFatPointerCandidate();
SpillRetExprHelper helper(this);
helper.StoreRetExprResultsInArgs(call);
}
//------------------------------------------------------------------------
// considerGuardedDevirtualization: see if we can profitably guess at the
// class involved in an interface or virtual call.
//
// Arguments:
//
// call - potential guarded devirtualization candidate
// ilOffset - IL ofset of the call instruction
// isInterface - true if this is an interface call
// baseMethod - target method of the call
// baseClass - class that introduced the target method
// pContextHandle - context handle for the call
// objClass - class of 'this' in the call
// objClassName - name of the obj Class
//
// Notes:
// Consults with VM to see if there's a likely class at runtime,
// if so, adds a candidate for guarded devirtualization.
//
void Compiler::considerGuardedDevirtualization(
GenTreeCall* call,
IL_OFFSET ilOffset,
bool isInterface,
CORINFO_METHOD_HANDLE baseMethod,
CORINFO_CLASS_HANDLE baseClass,
CORINFO_CONTEXT_HANDLE* pContextHandle DEBUGARG(CORINFO_CLASS_HANDLE objClass) DEBUGARG(const char* objClassName))
{
#if defined(DEBUG)
const char* callKind = isInterface ? "interface" : "virtual";
#endif
JITDUMP("Considering guarded devirtualization at IL offset %u (0x%x)\n", ilOffset, ilOffset);
// We currently only get likely class guesses when there is PGO data
// with class profiles.
//
if (fgPgoClassProfiles == 0)
{
JITDUMP("Not guessing for class: no class profile pgo data, or pgo disabled\n");
return;
}
// See if there's a likely guess for the class.
//
const unsigned likelihoodThreshold = isInterface ? 25 : 30;
unsigned likelihood = 0;
unsigned numberOfClasses = 0;
CORINFO_CLASS_HANDLE likelyClass = NO_CLASS_HANDLE;
bool doRandomDevirt = false;
const int maxLikelyClasses = 32;
LikelyClassRecord likelyClasses[maxLikelyClasses];
#ifdef DEBUG
// Optional stress mode to pick a random known class, rather than
// the most likely known class.
//
doRandomDevirt = JitConfig.JitRandomGuardedDevirtualization() != 0;
if (doRandomDevirt)
{
// Reuse the random inliner's random state.
//
CLRRandom* const random =
impInlineRoot()->m_inlineStrategy->GetRandom(JitConfig.JitRandomGuardedDevirtualization());
likelyClasses[0].clsHandle = getRandomClass(fgPgoSchema, fgPgoSchemaCount, fgPgoData, ilOffset, random);
likelyClasses[0].likelihood = 100;
if (likelyClasses[0].clsHandle != NO_CLASS_HANDLE)
{
numberOfClasses = 1;
}
}
else
#endif
{
numberOfClasses =
getLikelyClasses(likelyClasses, maxLikelyClasses, fgPgoSchema, fgPgoSchemaCount, fgPgoData, ilOffset);
}
// For now we only use the most popular type
likelihood = likelyClasses[0].likelihood;
likelyClass = likelyClasses[0].clsHandle;
if (numberOfClasses < 1)
{
JITDUMP("No likely class, sorry\n");
return;
}
assert(likelyClass != NO_CLASS_HANDLE);
// Print all likely classes
JITDUMP("%s classes for %p (%s):\n", doRandomDevirt ? "Random" : "Likely", dspPtr(objClass), objClassName)
for (UINT32 i = 0; i < numberOfClasses; i++)
{
JITDUMP(" %u) %p (%s) [likelihood:%u%%]\n", i + 1, likelyClasses[i].clsHandle,
eeGetClassName(likelyClasses[i].clsHandle), likelyClasses[i].likelihood);
}
// Todo: a more advanced heuristic using likelihood, number of
// classes, and the profile count for this block.
//
// For now we will guess if the likelihood is at least 25%/30% (intfc/virt), as studies
// have shown this transformation should pay off even if we guess wrong sometimes.
//
if (likelihood < likelihoodThreshold)
{
JITDUMP("Not guessing for class; likelihood is below %s call threshold %u\n", callKind, likelihoodThreshold);
return;
}
uint32_t const likelyClassAttribs = info.compCompHnd->getClassAttribs(likelyClass);
if ((likelyClassAttribs & CORINFO_FLG_ABSTRACT) != 0)
{
// We may see an abstract likely class, if we have a stale profile.
// No point guessing for this.
//
JITDUMP("Not guessing for class; abstract (stale profile)\n");
return;
}
// Figure out which method will be called.
//
CORINFO_DEVIRTUALIZATION_INFO dvInfo;
dvInfo.virtualMethod = baseMethod;
dvInfo.objClass = likelyClass;
dvInfo.context = *pContextHandle;
dvInfo.exactContext = *pContextHandle;
dvInfo.pResolvedTokenVirtualMethod = nullptr;
const bool canResolve = info.compCompHnd->resolveVirtualMethod(&dvInfo);
if (!canResolve)
{
JITDUMP("Can't figure out which method would be invoked, sorry\n");
return;
}
CORINFO_METHOD_HANDLE likelyMethod = dvInfo.devirtualizedMethod;
JITDUMP("%s call would invoke method %s\n", callKind, eeGetMethodName(likelyMethod, nullptr));
// Add this as a potential candidate.
//
uint32_t const likelyMethodAttribs = info.compCompHnd->getMethodAttribs(likelyMethod);
addGuardedDevirtualizationCandidate(call, likelyMethod, likelyClass, likelyMethodAttribs, likelyClassAttribs,
likelihood);
}
//------------------------------------------------------------------------
// addGuardedDevirtualizationCandidate: potentially mark the call as a guarded
// devirtualization candidate
//
// Notes:
//
// Call sites in rare or unoptimized code, and calls that require cookies are
// not marked as candidates.
//
// As part of marking the candidate, the code spills GT_RET_EXPRs anywhere in any
// child tree, because and we need to clone all these trees when we clone the call
// as part of guarded devirtualization, and these IR nodes can't be cloned.
//
// Arguments:
// call - potential guarded devirtualization candidate
// methodHandle - method that will be invoked if the class test succeeds
// classHandle - class that will be tested for at runtime
// methodAttr - attributes of the method
// classAttr - attributes of the class
// likelihood - odds that this class is the class seen at runtime
//
void Compiler::addGuardedDevirtualizationCandidate(GenTreeCall* call,
CORINFO_METHOD_HANDLE methodHandle,
CORINFO_CLASS_HANDLE classHandle,
unsigned methodAttr,
unsigned classAttr,
unsigned likelihood)
{
// This transformation only makes sense for virtual calls
assert(call->IsVirtual());
// Only mark calls if the feature is enabled.
const bool isEnabled = JitConfig.JitEnableGuardedDevirtualization() > 0;
if (!isEnabled)
{
JITDUMP("NOT Marking call [%06u] as guarded devirtualization candidate -- disabled by jit config\n",
dspTreeID(call));
return;
}
// Bail if not optimizing or the call site is very likely cold
if (compCurBB->isRunRarely() || opts.OptimizationDisabled())
{
JITDUMP("NOT Marking call [%06u] as guarded devirtualization candidate -- rare / dbg / minopts\n",
dspTreeID(call));
return;
}
// CT_INDIRECT calls may use the cookie, bail if so...
//
// If transforming these provides a benefit, we could save this off in the same way
// we save the stub address below.
if ((call->gtCallType == CT_INDIRECT) && (call->AsCall()->gtCallCookie != nullptr))
{
JITDUMP("NOT Marking call [%06u] as guarded devirtualization candidate -- CT_INDIRECT with cookie\n",
dspTreeID(call));
return;
}
#ifdef DEBUG
// See if disabled by range
//
static ConfigMethodRange JitGuardedDevirtualizationRange;
JitGuardedDevirtualizationRange.EnsureInit(JitConfig.JitGuardedDevirtualizationRange());
assert(!JitGuardedDevirtualizationRange.Error());
if (!JitGuardedDevirtualizationRange.Contains(impInlineRoot()->info.compMethodHash()))
{
JITDUMP("NOT Marking call [%06u] as guarded devirtualization candidate -- excluded by "
"JitGuardedDevirtualizationRange",
dspTreeID(call));
return;
}
#endif
// We're all set, proceed with candidate creation.
//
JITDUMP("Marking call [%06u] as guarded devirtualization candidate; will guess for class %s\n", dspTreeID(call),
eeGetClassName(classHandle));
setMethodHasGuardedDevirtualization();
call->SetGuardedDevirtualizationCandidate();
// Spill off any GT_RET_EXPR subtrees so we can clone the call.
//
SpillRetExprHelper helper(this);
helper.StoreRetExprResultsInArgs(call);
// Gather some information for later. Note we actually allocate InlineCandidateInfo
// here, as the devirtualized half of this call will likely become an inline candidate.
//
GuardedDevirtualizationCandidateInfo* pInfo = new (this, CMK_Inlining) InlineCandidateInfo;
pInfo->guardedMethodHandle = methodHandle;
pInfo->guardedMethodUnboxedEntryHandle = nullptr;
pInfo->guardedClassHandle = classHandle;
pInfo->likelihood = likelihood;
pInfo->requiresInstMethodTableArg = false;
// If the guarded class is a value class, look for an unboxed entry point.
//
if ((classAttr & CORINFO_FLG_VALUECLASS) != 0)
{
JITDUMP(" ... class is a value class, looking for unboxed entry\n");
bool requiresInstMethodTableArg = false;
CORINFO_METHOD_HANDLE unboxedEntryMethodHandle =
info.compCompHnd->getUnboxedEntry(methodHandle, &requiresInstMethodTableArg);
if (unboxedEntryMethodHandle != nullptr)
{
JITDUMP(" ... updating GDV candidate with unboxed entry info\n");
pInfo->guardedMethodUnboxedEntryHandle = unboxedEntryMethodHandle;
pInfo->requiresInstMethodTableArg = requiresInstMethodTableArg;
}
}
call->gtGuardedDevirtualizationCandidateInfo = pInfo;
}
void Compiler::addExpRuntimeLookupCandidate(GenTreeCall* call)
{
setMethodHasExpRuntimeLookup();
call->SetExpRuntimeLookup();
}
//------------------------------------------------------------------------
// impIsClassExact: check if a class handle can only describe values
// of exactly one class.
//
// Arguments:
// classHnd - handle for class in question
//
// Returns:
// true if class is final and not subject to special casting from
// variance or similar.
//
// Note:
// We are conservative on arrays of primitive types here.
bool Compiler::impIsClassExact(CORINFO_CLASS_HANDLE classHnd)
{
DWORD flags = info.compCompHnd->getClassAttribs(classHnd);
DWORD flagsMask = CORINFO_FLG_FINAL | CORINFO_FLG_VARIANCE | CORINFO_FLG_ARRAY;
if ((flags & flagsMask) == CORINFO_FLG_FINAL)
{
return true;
}
if ((flags & flagsMask) == (CORINFO_FLG_FINAL | CORINFO_FLG_ARRAY))
{
CORINFO_CLASS_HANDLE arrayElementHandle = nullptr;
CorInfoType type = info.compCompHnd->getChildType(classHnd, &arrayElementHandle);
if ((type == CORINFO_TYPE_CLASS) || (type == CORINFO_TYPE_VALUECLASS))
{
return impIsClassExact(arrayElementHandle);
}
}
return false;
}
//------------------------------------------------------------------------
// impCanSkipCovariantStoreCheck: see if storing a ref type value to an array
// can skip the array store covariance check.
//
// Arguments:
// value -- tree producing the value to store
// array -- tree representing the array to store to
//
// Returns:
// true if the store does not require a covariance check.
//
bool Compiler::impCanSkipCovariantStoreCheck(GenTree* value, GenTree* array)
{
// We should only call this when optimizing.
assert(opts.OptimizationEnabled());
// Check for assignment to same array, ie. arrLcl[i] = arrLcl[j]
if (value->OperIs(GT_INDEX) && array->OperIs(GT_LCL_VAR))
{
GenTree* valueIndex = value->AsIndex()->Arr();
if (valueIndex->OperIs(GT_LCL_VAR))
{
unsigned valueLcl = valueIndex->AsLclVar()->GetLclNum();
unsigned arrayLcl = array->AsLclVar()->GetLclNum();
if ((valueLcl == arrayLcl) && !lvaGetDesc(arrayLcl)->IsAddressExposed())
{
JITDUMP("\nstelem of ref from same array: skipping covariant store check\n");
return true;
}
}
}
// Check for assignment of NULL.
if (value->OperIs(GT_CNS_INT))
{
assert(value->gtType == TYP_REF);
if (value->AsIntCon()->gtIconVal == 0)
{
JITDUMP("\nstelem of null: skipping covariant store check\n");
return true;
}
// Non-0 const refs can only occur with frozen objects
assert(value->IsIconHandle(GTF_ICON_STR_HDL));
assert(doesMethodHaveFrozenString() ||
(compIsForInlining() && impInlineInfo->InlinerCompiler->doesMethodHaveFrozenString()));
}
// Try and get a class handle for the array
if (value->gtType != TYP_REF)
{
return false;
}
bool arrayIsExact = false;
bool arrayIsNonNull = false;
CORINFO_CLASS_HANDLE arrayHandle = gtGetClassHandle(array, &arrayIsExact, &arrayIsNonNull);
if (arrayHandle == NO_CLASS_HANDLE)
{
return false;
}
// There are some methods in corelib where we're storing to an array but the IL
// doesn't reflect this (see SZArrayHelper). Avoid.
DWORD attribs = info.compCompHnd->getClassAttribs(arrayHandle);
if ((attribs & CORINFO_FLG_ARRAY) == 0)
{
return false;
}
CORINFO_CLASS_HANDLE arrayElementHandle = nullptr;
CorInfoType arrayElemType = info.compCompHnd->getChildType(arrayHandle, &arrayElementHandle);
// Verify array type handle is really an array of ref type
assert(arrayElemType == CORINFO_TYPE_CLASS);
// Check for exactly object[]
if (arrayIsExact && (arrayElementHandle == impGetObjectClass()))
{
JITDUMP("\nstelem to (exact) object[]: skipping covariant store check\n");
return true;
}
const bool arrayTypeIsSealed = impIsClassExact(arrayElementHandle);
if ((!arrayIsExact && !arrayTypeIsSealed) || (arrayElementHandle == NO_CLASS_HANDLE))
{
// Bail out if we don't know array's exact type
return false;
}
bool valueIsExact = false;
bool valueIsNonNull = false;
CORINFO_CLASS_HANDLE valueHandle = gtGetClassHandle(value, &valueIsExact, &valueIsNonNull);
// Array's type is sealed and equals to value's type
if (arrayTypeIsSealed && (valueHandle == arrayElementHandle))
{
JITDUMP("\nstelem to T[] with T exact: skipping covariant store check\n");
return true;
}
// Array's type is not sealed but we know its exact type
if (arrayIsExact && (valueHandle != NO_CLASS_HANDLE) &&
(info.compCompHnd->compareTypesForCast(valueHandle, arrayElementHandle) == TypeCompareState::Must))
{
JITDUMP("\nstelem to T[] with T exact: skipping covariant store check\n");
return true;
}
return false;
}
| 1 |
dotnet/runtime | 65,922 | Rely on PGO for isinst/castclass | Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | EgorBo | "2022-02-26T22:13:46Z" | "2022-04-01T21:25:56Z" | 063d08018602291f8dbd9d9b6055b8e1af874cb8 | f249a3d58b0b2bb55adca1a56c5a39cf446437b7 | Rely on PGO for isinst/castclass. Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | ./src/coreclr/jit/jitconfigvalues.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#if !defined(CONFIG_INTEGER) || !defined(CONFIG_STRING) || !defined(CONFIG_METHODSET)
#error CONFIG_INTEGER, CONFIG_STRING, and CONFIG_METHODSET must be defined before including this file.
#endif // !defined(CONFIG_INTEGER) || !defined(CONFIG_STRING) || !defined(CONFIG_METHODSET)
#ifdef DEBUG
#define OPT_CONFIG // Enable optimization level configuration.
#endif
#if defined(DEBUG)
///
/// JIT
///
CONFIG_INTEGER(AltJitLimit, W("AltJitLimit"), 0) // Max number of functions to use altjit for (decimal)
CONFIG_INTEGER(AltJitSkipOnAssert, W("AltJitSkipOnAssert"), 0) // If AltJit hits an assert, fall back to the fallback
// JIT. Useful in conjunction with
// COMPlus_ContinueOnAssert=1
CONFIG_INTEGER(BreakOnDumpToken, W("BreakOnDumpToken"), 0xffffffff) // Breaks when using internal logging on a
// particular token value.
CONFIG_INTEGER(DebugBreakOnVerificationFailure, W("DebugBreakOnVerificationFailure"), 0) // Halts the jit on
// verification failure
CONFIG_INTEGER(DiffableDasm, W("JitDiffableDasm"), 0) // Make the disassembly diff-able
CONFIG_INTEGER(JitDasmWithAddress, W("JitDasmWithAddress"), 0) // Print the process address next to each instruction of
// the disassembly
CONFIG_INTEGER(DisplayLoopHoistStats, W("JitLoopHoistStats"), 0) // Display JIT loop hoisting statistics
CONFIG_INTEGER(DisplayLsraStats, W("JitLsraStats"), 0) // Display JIT Linear Scan Register Allocator statistics
// If set to "1", display the stats in textual format.
// If set to "2", display the stats in csv format.
// If set to "3", display the stats in summarize format.
// Recommended to use with JitStdOutFile flag.
CONFIG_STRING(JitLsraOrdering, W("JitLsraOrdering")) // LSRA heuristics ordering
CONFIG_INTEGER(DumpJittedMethods, W("DumpJittedMethods"), 0) // Prints all jitted methods to the console
CONFIG_INTEGER(EnablePCRelAddr, W("JitEnablePCRelAddr"), 1) // Whether absolute addr be encoded as PC-rel offset by
// RyuJIT where possible
CONFIG_INTEGER(JitAssertOnMaxRAPasses, W("JitAssertOnMaxRAPasses"), 0)
CONFIG_INTEGER(JitBreakEmitOutputInstr, W("JitBreakEmitOutputInstr"), -1)
CONFIG_INTEGER(JitBreakMorphTree, W("JitBreakMorphTree"), 0xffffffff)
CONFIG_INTEGER(JitBreakOnBadCode, W("JitBreakOnBadCode"), 0)
CONFIG_INTEGER(JitBreakOnMinOpts, W("JITBreakOnMinOpts"), 0) // Halt if jit switches to MinOpts
CONFIG_INTEGER(JitBreakOnUnsafeCode, W("JitBreakOnUnsafeCode"), 0)
CONFIG_INTEGER(JitCloneLoops, W("JitCloneLoops"), 1) // If 0, don't clone. Otherwise clone loops for optimizations.
CONFIG_INTEGER(JitDebugLogLoopCloning, W("JitDebugLogLoopCloning"), 0) // In debug builds log places where loop cloning
// optimizations are performed on the fast path.
CONFIG_INTEGER(JitDefaultFill, W("JitDefaultFill"), 0xdd) // In debug builds, initialize the memory allocated by the nra
// with this byte.
CONFIG_INTEGER(JitAlignLoopMinBlockWeight,
W("JitAlignLoopMinBlockWeight"),
DEFAULT_ALIGN_LOOP_MIN_BLOCK_WEIGHT) // Minimum weight needed for the first block of a loop to make it a
// candidate for alignment.
CONFIG_INTEGER(JitAlignLoopMaxCodeSize,
W("JitAlignLoopMaxCodeSize"),
DEFAULT_MAX_LOOPSIZE_FOR_ALIGN) // For non-adaptive alignment, minimum loop size (in bytes) for which
// alignment will be done.
// Defaults to 3 blocks of 32 bytes chunks = 96 bytes.
CONFIG_INTEGER(JitAlignLoopBoundary,
W("JitAlignLoopBoundary"),
DEFAULT_ALIGN_LOOP_BOUNDARY) // For non-adaptive alignment, address boundary (power of 2) at which loop
// alignment should be done. By default, 32B.
CONFIG_INTEGER(JitAlignLoopForJcc,
W("JitAlignLoopForJcc"),
0) // If set, for non-adaptive alignment, ensure loop jmps are not on or cross alignment boundary.
CONFIG_INTEGER(JitAlignLoopAdaptive,
W("JitAlignLoopAdaptive"),
1) // If set, perform adaptive loop alignment that limits number of padding based on loop size.
CONFIG_INTEGER(JitHideAlignBehindJmp,
W("JitHideAlignBehindJmp"),
1) // If set, try to hide align instruction (if any) behind an unconditional jump instruction (if any)
// that is present before the loop start.
CONFIG_INTEGER(JitOptimizeStructHiddenBuffer, W("JitOptimizeStructHiddenBuffer"), 1) // Track assignments to locals done
// through return buffers.
// Print the alignment boundaries in disassembly.
CONFIG_INTEGER(JitDasmWithAlignmentBoundaries, W("JitDasmWithAlignmentBoundaries"), 0)
CONFIG_INTEGER(JitDirectAlloc, W("JitDirectAlloc"), 0)
CONFIG_INTEGER(JitDoubleAlign, W("JitDoubleAlign"), 1)
CONFIG_INTEGER(JitDumpASCII, W("JitDumpASCII"), 1) // Uses only ASCII characters in tree dumps
CONFIG_INTEGER(JitDumpTerseLsra, W("JitDumpTerseLsra"), 1) // Produce terse dump output for LSRA
CONFIG_INTEGER(JitDumpToDebugger, W("JitDumpToDebugger"), 0) // Output JitDump output to the debugger
CONFIG_INTEGER(JitDumpVerboseSsa, W("JitDumpVerboseSsa"), 0) // Produce especially verbose dump output for SSA
CONFIG_INTEGER(JitDumpVerboseTrees, W("JitDumpVerboseTrees"), 0) // Enable more verbose tree dumps
CONFIG_INTEGER(JitEmitPrintRefRegs, W("JitEmitPrintRefRegs"), 0)
CONFIG_INTEGER(JitEnableDevirtualization, W("JitEnableDevirtualization"), 1) // Enable devirtualization in importer
CONFIG_INTEGER(JitEnableLateDevirtualization, W("JitEnableLateDevirtualization"), 1) // Enable devirtualization after
// inlining
CONFIG_INTEGER(JitExpensiveDebugCheckLevel, W("JitExpensiveDebugCheckLevel"), 0) // Level indicates how much checking
// beyond the default to do in debug
// builds (currently 1-2)
CONFIG_INTEGER(JitForceFallback, W("JitForceFallback"), 0) // Set to non-zero to test NOWAY assert by forcing a retry
CONFIG_INTEGER(JitFullyInt, W("JitFullyInt"), 0) // Forces Fully interruptible code
CONFIG_INTEGER(JitFunctionTrace, W("JitFunctionTrace"), 0) // If non-zero, print JIT start/end logging
CONFIG_INTEGER(JitGCChecks, W("JitGCChecks"), 0)
CONFIG_INTEGER(JitGCInfoLogging, W("JitGCInfoLogging"), 0) // If true, prints GCInfo-related output to standard output.
CONFIG_INTEGER(JitHashBreak, W("JitHashBreak"), -1) // Same as JitBreak, but for a method hash
CONFIG_INTEGER(JitHashDump, W("JitHashDump"), -1) // Same as JitDump, but for a method hash
CONFIG_INTEGER(JitHashHalt, W("JitHashHalt"), -1) // Same as JitHalt, but for a method hash
CONFIG_INTEGER(JitInlineAdditionalMultiplier, W("JitInlineAdditionalMultiplier"), 0)
CONFIG_INTEGER(JitInlinePrintStats, W("JitInlinePrintStats"), 0)
CONFIG_INTEGER(JitInlineSize, W("JITInlineSize"), DEFAULT_MAX_INLINE_SIZE)
CONFIG_INTEGER(JitInlineDepth, W("JITInlineDepth"), DEFAULT_MAX_INLINE_DEPTH)
CONFIG_INTEGER(JitLongAddress, W("JitLongAddress"), 0) // Force using the large pseudo instruction form for long address
CONFIG_INTEGER(JitMaxUncheckedOffset, W("JitMaxUncheckedOffset"), 8)
CONFIG_INTEGER(JitMinOpts, W("JITMinOpts"), 0) // Forces MinOpts
CONFIG_INTEGER(JitMinOptsBbCount, W("JITMinOptsBbCount"), DEFAULT_MIN_OPTS_BB_COUNT) // Internal jit control of MinOpts
CONFIG_INTEGER(JitMinOptsCodeSize, W("JITMinOptsCodeSize"), DEFAULT_MIN_OPTS_CODE_SIZE) // Internal jit control of
// MinOpts
CONFIG_INTEGER(JitMinOptsInstrCount, W("JITMinOptsInstrCount"), DEFAULT_MIN_OPTS_INSTR_COUNT) // Internal jit control of
// MinOpts
CONFIG_INTEGER(JitMinOptsLvNumCount, W("JITMinOptsLvNumcount"), DEFAULT_MIN_OPTS_LV_NUM_COUNT) // Internal jit control
// of MinOpts
CONFIG_INTEGER(JitMinOptsLvRefCount, W("JITMinOptsLvRefcount"), DEFAULT_MIN_OPTS_LV_REF_COUNT) // Internal jit control
// of MinOpts
CONFIG_INTEGER(JitNoCSE, W("JitNoCSE"), 0)
CONFIG_INTEGER(JitNoCSE2, W("JitNoCSE2"), 0)
CONFIG_INTEGER(JitNoForceFallback, W("JitNoForceFallback"), 0) // Set to non-zero to prevent NOWAY assert testing.
// Overrides COMPlus_JitForceFallback and JIT stress
// flags.
CONFIG_INTEGER(JitNoForwardSub, W("JitNoForwardSub"), 0) // Disables forward sub
CONFIG_INTEGER(JitNoHoist, W("JitNoHoist"), 0)
CONFIG_INTEGER(JitNoInline, W("JitNoInline"), 0) // Disables inlining of all methods
CONFIG_INTEGER(JitNoMemoryBarriers, W("JitNoMemoryBarriers"), 0) // If 1, don't generate memory barriers
CONFIG_INTEGER(JitNoRegLoc, W("JitNoRegLoc"), 0)
CONFIG_INTEGER(JitNoStructPromotion, W("JitNoStructPromotion"), 0) // Disables struct promotion 1 - for all, 2 - for
// params.
CONFIG_INTEGER(JitNoUnroll, W("JitNoUnroll"), 0)
CONFIG_INTEGER(JitOrder, W("JitOrder"), 0)
CONFIG_INTEGER(JitQueryCurrentStaticFieldClass, W("JitQueryCurrentStaticFieldClass"), 1)
CONFIG_INTEGER(JitReportFastTailCallDecisions, W("JitReportFastTailCallDecisions"), 0)
CONFIG_INTEGER(JitPInvokeCheckEnabled, W("JITPInvokeCheckEnabled"), 0)
CONFIG_INTEGER(JitPInvokeEnabled, W("JITPInvokeEnabled"), 1)
// Controls verbosity for JitPrintInlinedMethods. Ignored for JitDump/NgenDump where
// it's always set.
CONFIG_INTEGER(JitPrintInlinedMethodsVerbose, W("JitPrintInlinedMethodsVerboseLevel"), 0)
// Prints a tree of inlinees for a specific method (use '*' for all methods)
CONFIG_METHODSET(JitPrintInlinedMethods, W("JitPrintInlinedMethods"))
CONFIG_METHODSET(JitPrintDevirtualizedMethods, W("JitPrintDevirtualizedMethods"))
CONFIG_INTEGER(JitProfileChecks, W("JitProfileChecks"), 0) // 1 enable in dumps, 2 assert if issues found
CONFIG_INTEGER(JitRequired, W("JITRequired"), -1)
CONFIG_INTEGER(JitRoundFloat, W("JITRoundFloat"), DEFAULT_ROUND_LEVEL)
CONFIG_INTEGER(JitStackAllocToLocalSize, W("JitStackAllocToLocalSize"), DEFAULT_MAX_LOCALLOC_TO_LOCAL_SIZE)
CONFIG_INTEGER(JitSkipArrayBoundCheck, W("JitSkipArrayBoundCheck"), 0)
CONFIG_INTEGER(JitSlowDebugChecksEnabled, W("JitSlowDebugChecksEnabled"), 1) // Turn on slow debug checks
CONFIG_INTEGER(JitSplitFunctionSize, W("JitSplitFunctionSize"), 0) // On ARM, use this as the maximum function/funclet
// size for creating function fragments (and creating
// multiple RUNTIME_FUNCTION entries)
CONFIG_INTEGER(JitSsaStress, W("JitSsaStress"), 0) // Perturb order of processing of blocks in SSA; 0 = no stress; 1 =
// use method hash; * = supplied value as random hash
CONFIG_INTEGER(JitStackChecks, W("JitStackChecks"), 0)
CONFIG_STRING(JitStdOutFile, W("JitStdOutFile")) // If set, sends JIT's stdout output to this file.
CONFIG_INTEGER(JitStress, W("JitStress"), 0) // Internal Jit stress mode: 0 = no stress, 2 = all stress, other = vary
// stress based on a hash of the method and this value
CONFIG_INTEGER(JitStressBBProf, W("JitStressBBProf"), 0) // Internal Jit stress mode
CONFIG_INTEGER(JitStressBiasedCSE, W("JitStressBiasedCSE"), 0x101) // Internal Jit stress mode: decimal bias value
// between (0,100) to perform CSE on a candidate.
// 100% = All CSEs. 0% = 0 CSE. (> 100) means no
// stress.
CONFIG_INTEGER(JitStressModeNamesOnly, W("JitStressModeNamesOnly"), 0) // Internal Jit stress: if nonzero, only enable
// stress modes listed in JitStressModeNames
CONFIG_INTEGER(JitStressRegs, W("JitStressRegs"), 0)
CONFIG_INTEGER(JitVNMapSelLimit, W("JitVNMapSelLimit"), 0) // If non-zero, assert if # of VNF_MapSelect applications
// considered reaches this
CONFIG_INTEGER(NgenHashDump, W("NgenHashDump"), -1) // same as JitHashDump, but for ngen
CONFIG_INTEGER(NgenOrder, W("NgenOrder"), 0)
CONFIG_INTEGER(RunAltJitCode, W("RunAltJitCode"), 1) // If non-zero, and the compilation succeeds for an AltJit, then
// use the code. If zero, then we always throw away the generated
// code and fall back to the default compiler.
CONFIG_INTEGER(RunComponentUnitTests, W("JitComponentUnitTests"), 0) // Run JIT component unit tests
CONFIG_INTEGER(ShouldInjectFault, W("InjectFault"), 0)
CONFIG_INTEGER(StressCOMCall, W("StressCOMCall"), 0)
CONFIG_INTEGER(TailcallStress, W("TailcallStress"), 0)
CONFIG_INTEGER(TreesBeforeAfterMorph, W("JitDumpBeforeAfterMorph"), 0) // If 1, display each tree before/after morphing
CONFIG_METHODSET(JitBreak, W("JitBreak")) // Stops in the importer when compiling a specified method
CONFIG_METHODSET(JitDebugBreak, W("JitDebugBreak"))
CONFIG_METHODSET(JitDisasm, W("JitDisasm")) // Dumps disassembly for specified method
CONFIG_STRING(JitDisasmAssemblies, W("JitDisasmAssemblies")) // Only show JitDisasm and related info for methods
// from this semicolon-delimited list of assemblies.
CONFIG_INTEGER(JitDisasmWithGC, W("JitDisasmWithGC"), 0) // Dump interleaved GC Info for any method disassembled.
CONFIG_METHODSET(JitDump, W("JitDump")) // Dumps trees for specified method
CONFIG_INTEGER(JitDumpTier0, W("JitDumpTier0"), 1) // Dump tier0 requests
CONFIG_INTEGER(JitDumpAtOSROffset, W("JitDumpAtOSROffset"), -1) // Only dump OSR requests for this offset
CONFIG_INTEGER(JitDumpInlinePhases, W("JitDumpInlinePhases"), 1) // Dump inline compiler phases
CONFIG_METHODSET(JitEHDump, W("JitEHDump")) // Dump the EH table for the method, as reported to the VM
CONFIG_METHODSET(JitExclude, W("JitExclude"))
CONFIG_METHODSET(JitForceProcedureSplitting, W("JitForceProcedureSplitting"))
CONFIG_METHODSET(JitGCDump, W("JitGCDump"))
CONFIG_METHODSET(JitDebugDump, W("JitDebugDump"))
CONFIG_METHODSET(JitHalt, W("JitHalt")) // Emits break instruction into jitted code
CONFIG_METHODSET(JitImportBreak, W("JitImportBreak"))
CONFIG_METHODSET(JitInclude, W("JitInclude"))
CONFIG_METHODSET(JitLateDisasm, W("JitLateDisasm"))
CONFIG_METHODSET(JitMinOptsName, W("JITMinOptsName")) // Forces MinOpts for a named function
CONFIG_METHODSET(JitNoProcedureSplitting, W("JitNoProcedureSplitting")) // Disallow procedure splitting for specified
// methods
CONFIG_METHODSET(JitNoProcedureSplittingEH, W("JitNoProcedureSplittingEH")) // Disallow procedure splitting for
// specified methods if they contain
// exception handling
CONFIG_METHODSET(JitStressOnly, W("JitStressOnly")) // Internal Jit stress mode: stress only the specified method(s)
CONFIG_METHODSET(JitUnwindDump, W("JitUnwindDump")) // Dump the unwind codes for the method
///
/// NGEN
///
CONFIG_METHODSET(NgenDisasm, W("NgenDisasm")) // Same as JitDisasm, but for ngen
CONFIG_METHODSET(NgenDump, W("NgenDump")) // Same as JitDump, but for ngen
CONFIG_METHODSET(NgenEHDump, W("NgenEHDump")) // Dump the EH table for the method, as reported to the VM
CONFIG_METHODSET(NgenGCDump, W("NgenGCDump"))
CONFIG_METHODSET(NgenDebugDump, W("NgenDebugDump"))
CONFIG_METHODSET(NgenUnwindDump, W("NgenUnwindDump")) // Dump the unwind codes for the method
///
/// JIT
///
CONFIG_METHODSET(JitDumpFg, W("JitDumpFg")) // Dumps Xml/Dot Flowgraph for specified method
CONFIG_STRING(JitDumpFgDir, W("JitDumpFgDir")) // Directory for Xml/Dot flowgraph dump(s)
CONFIG_STRING(JitDumpFgFile, W("JitDumpFgFile")) // Filename for Xml/Dot flowgraph dump(s) (default: "default")
CONFIG_STRING(JitDumpFgPhase, W("JitDumpFgPhase")) // Phase-based Xml/Dot flowgraph support. Set to the short name of a
// phase to see the flowgraph after that phase. Leave unset to dump
// after COLD-BLK (determine first cold block) or set to * for all
// phases
CONFIG_STRING(JitDumpFgPrePhase,
W("JitDumpFgPrePhase")) // Same as JitDumpFgPhase, but specifies to dump pre-phase, not post-phase.
CONFIG_INTEGER(JitDumpFgDot, W("JitDumpFgDot"), 1) // 0 == dump XML format; non-zero == dump DOT format
CONFIG_INTEGER(JitDumpFgEH, W("JitDumpFgEH"), 0) // 0 == no EH regions; non-zero == include EH regions
CONFIG_INTEGER(JitDumpFgLoops, W("JitDumpFgLoops"), 0) // 0 == no loop regions; non-zero == include loop regions
CONFIG_INTEGER(JitDumpFgConstrained, W("JitDumpFgConstrained"), 1) // 0 == don't constrain to mostly linear layout;
// non-zero == force mostly lexical block
// linear layout
CONFIG_INTEGER(JitDumpFgBlockID, W("JitDumpFgBlockID"), 0) // 0 == display block with bbNum; 1 == display with both
// bbNum and bbID
CONFIG_INTEGER(JitDumpFgBlockFlags, W("JitDumpFgBlockFlags"), 0) // 0 == don't display block flags; 1 == display flags
CONFIG_INTEGER(JitDumpFgLoopFlags, W("JitDumpFgLoopFlags"), 0) // 0 == don't display loop flags; 1 == display flags
CONFIG_STRING(JitDumpPreciseDebugInfoFile, W("JitDumpPreciseDebugInfoFile"))
CONFIG_INTEGER(JitDisasmWithDebugInfo, W("JitDisasmWithDebugInfo"), 0)
CONFIG_STRING(JitLateDisasmTo, W("JITLateDisasmTo"))
CONFIG_STRING(JitRange, W("JitRange"))
CONFIG_STRING(JitStressModeNames, W("JitStressModeNames")) // Internal Jit stress mode: stress using the given set of
// stress mode names, e.g. STRESS_REGS, STRESS_TAILCALL
CONFIG_STRING(JitStressModeNamesNot, W("JitStressModeNamesNot")) // Internal Jit stress mode: do NOT stress using the
// given set of stress mode names, e.g. STRESS_REGS,
// STRESS_TAILCALL
CONFIG_STRING(JitStressRange, W("JitStressRange")) // Internal Jit stress mode
///
/// NGEN
///
CONFIG_METHODSET(NgenDumpFg, W("NgenDumpFg")) // Ngen Xml/Dot flowgraph dump support
CONFIG_STRING(NgenDumpFgDir, W("NgenDumpFgDir")) // Ngen Xml/Dot flowgraph dump support
CONFIG_STRING(NgenDumpFgFile, W("NgenDumpFgFile")) // Ngen Xml/Dot flowgraph dump support
///
/// JIT Hardware Intrinsics
///
CONFIG_INTEGER(EnableIncompleteISAClass, W("EnableIncompleteISAClass"), 0) // Enable testing not-yet-implemented
// intrinsic classes
#endif // defined(DEBUG)
#if FEATURE_LOOP_ALIGN
CONFIG_INTEGER(JitAlignLoops, W("JitAlignLoops"), 1) // If set, align inner loops
#else
CONFIG_INTEGER(JitAlignLoops, W("JitAlignLoops"), 0)
#endif
///
/// JIT
///
#ifdef FEATURE_ENABLE_NO_RANGE_CHECKS
CONFIG_INTEGER(JitNoRangeChks, W("JitNoRngChks"), 0) // If 1, don't generate range checks
#endif
// AltJitAssertOnNYI should be 0 on targets where JIT is under development or bring up stage, so as to facilitate
// fallback to main JIT on hitting a NYI.
#if defined(TARGET_ARM64) || defined(TARGET_X86)
CONFIG_INTEGER(AltJitAssertOnNYI, W("AltJitAssertOnNYI"), 0) // Controls the AltJit behavior of NYI stuff
#else // !defined(TARGET_ARM64) && !defined(TARGET_X86)
CONFIG_INTEGER(AltJitAssertOnNYI, W("AltJitAssertOnNYI"), 1) // Controls the AltJit behavior of NYI stuff
#endif // defined(TARGET_ARM64) || defined(TARGET_X86)
CONFIG_INTEGER(EnableEHWriteThru, W("EnableEHWriteThru"), 1) // Enable the register allocator to support EH-write thru:
// partial enregistration of vars exposed on EH boundaries
CONFIG_INTEGER(EnableMultiRegLocals, W("EnableMultiRegLocals"), 1) // Enable the enregistration of locals that are
// defined or used in a multireg context.
// clang-format on
#ifdef FEATURE_SIMD
CONFIG_INTEGER(JitDisableSimdVN, W("JitDisableSimdVN"), 0) // Default 0, ValueNumbering of SIMD nodes and HW Intrinsic
// nodes enabled
// If 1, then disable ValueNumbering of SIMD nodes
// If 2, then disable ValueNumbering of HW Intrinsic nodes
// If 3, disable both SIMD and HW Intrinsic nodes
#endif // FEATURE_SIMD
// Default 0, enable the CSE of Constants, including nearby offsets. (only for ARM64)
// If 1, disable all the CSE of Constants
// If 2, enable the CSE of Constants but don't combine with nearby offsets. (only for ARM64)
// If 3, enable the CSE of Constants including nearby offsets. (all platforms)
// If 4, enable the CSE of Constants but don't combine with nearby offsets. (all platforms)
//
CONFIG_INTEGER(JitConstCSE, W("JitConstCSE"), 0)
#define CONST_CSE_ENABLE_ARM64 0
#define CONST_CSE_DISABLE_ALL 1
#define CONST_CSE_ENABLE_ARM64_NO_SHARING 2
#define CONST_CSE_ENABLE_ALL 3
#define CONST_CSE_ENABLE_ALL_NO_SHARING 4
///
/// JIT
///
#if !defined(DEBUG) && !defined(_DEBUG)
CONFIG_INTEGER(JitEnableNoWayAssert, W("JitEnableNoWayAssert"), 0)
#else // defined(DEBUG) || defined(_DEBUG)
CONFIG_INTEGER(JitEnableNoWayAssert, W("JitEnableNoWayAssert"), 1)
#endif // !defined(DEBUG) && !defined(_DEBUG)
#if defined(TARGET_AMD64) || defined(TARGET_X86)
#define JitMinOptsTrackGCrefs_Default 0 // Not tracking GC refs in MinOpts is new behavior
#else
#define JitMinOptsTrackGCrefs_Default 1
#endif
CONFIG_INTEGER(JitMinOptsTrackGCrefs, W("JitMinOptsTrackGCrefs"), JitMinOptsTrackGCrefs_Default) // Track GC roots
// The following should be wrapped inside "#if MEASURE_MEM_ALLOC / #endif", but
// some files include this one without bringing in the definitions from "jit.h"
// so we don't always know what the "true" value of that flag should be. For now
// we take the easy way out and always include the flag, even in release builds
// (normally MEASURE_MEM_ALLOC is off for release builds but if it's toggled on
// for release in "jit.h" the flag would be missing for some includers).
// TODO-Cleanup: need to make 'MEASURE_MEM_ALLOC' well-defined here at all times.
CONFIG_INTEGER(DisplayMemStats, W("JitMemStats"), 0) // Display JIT memory usage statistics
#if defined(DEBUG)
CONFIG_INTEGER(JitEnregStats, W("JitEnregStats"), 0) // Display JIT enregistration statistics
#endif // DEBUG
CONFIG_INTEGER(JitAggressiveInlining, W("JitAggressiveInlining"), 0) // Aggressive inlining of all methods
CONFIG_INTEGER(JitELTHookEnabled, W("JitELTHookEnabled"), 0) // If 1, emit Enter/Leave/TailCall callbacks
CONFIG_INTEGER(JitInlineSIMDMultiplier, W("JitInlineSIMDMultiplier"), 3)
// Ex lclMAX_TRACKED constant.
CONFIG_INTEGER(JitMaxLocalsToTrack, W("JitMaxLocalsToTrack"), 0x400)
#if defined(FEATURE_ENABLE_NO_RANGE_CHECKS)
CONFIG_INTEGER(JitNoRngChks, W("JitNoRngChks"), 0) // If 1, don't generate range checks
#endif // defined(FEATURE_ENABLE_NO_RANGE_CHECKS)
#if defined(OPT_CONFIG)
CONFIG_INTEGER(JitDoAssertionProp, W("JitDoAssertionProp"), 1) // Perform assertion propagation optimization
CONFIG_INTEGER(JitDoCopyProp, W("JitDoCopyProp"), 1) // Perform copy propagation on variables that appear redundant
CONFIG_INTEGER(JitDoEarlyProp, W("JitDoEarlyProp"), 1) // Perform Early Value Propagation
CONFIG_INTEGER(JitDoLoopHoisting, W("JitDoLoopHoisting"), 1) // Perform loop hoisting on loop invariant values
CONFIG_INTEGER(JitDoLoopInversion, W("JitDoLoopInversion"), 1) // Perform loop inversion on "for/while" loops
CONFIG_INTEGER(JitDoRangeAnalysis, W("JitDoRangeAnalysis"), 1) // Perform range check analysis
CONFIG_INTEGER(JitDoRedundantBranchOpts, W("JitDoRedundantBranchOpts"), 1) // Perform redundant branch optimizations
CONFIG_INTEGER(JitDoSsa, W("JitDoSsa"), 1) // Perform Static Single Assignment (SSA) numbering on the variables
CONFIG_INTEGER(JitDoValueNumber, W("JitDoValueNumber"), 1) // Perform value numbering on method expressions
CONFIG_METHODSET(JitOptRepeat, W("JitOptRepeat")) // Runs optimizer multiple times on the method
CONFIG_INTEGER(JitOptRepeatCount, W("JitOptRepeatCount"), 2) // Number of times to repeat opts when repeating
#endif // defined(OPT_CONFIG)
CONFIG_INTEGER(JitTelemetry, W("JitTelemetry"), 1) // If non-zero, gather JIT telemetry data
// Max # of MapSelect's considered for a particular top-level invocation.
CONFIG_INTEGER(JitVNMapSelBudget, W("JitVNMapSelBudget"), DEFAULT_MAP_SELECT_BUDGET)
CONFIG_INTEGER(TailCallLoopOpt, W("TailCallLoopOpt"), 1) // Convert recursive tail calls to loops
CONFIG_METHODSET(AltJit, W("AltJit")) // Enables AltJit and selectively limits it to the specified methods.
CONFIG_METHODSET(AltJitNgen, W("AltJitNgen")) // Enables AltJit for NGEN and selectively limits it
// to the specified methods.
CONFIG_STRING(AltJitExcludeAssemblies, W("AltJitExcludeAssemblies")) // Do not use AltJit on this
// semicolon-delimited list of assemblies.
CONFIG_INTEGER(JitMeasureIR, W("JitMeasureIR"), 0) // If set, measure the IR size after some phases and report it in
// the time log.
CONFIG_STRING(JitFuncInfoFile, W("JitFuncInfoLogFile")) // If set, gather JIT function info and write to this file.
CONFIG_STRING(JitTimeLogCsv, W("JitTimeLogCsv")) // If set, gather JIT throughput data and write to a CSV file. This
// mode must be used in internal retail builds.
CONFIG_STRING(TailCallOpt, W("TailCallOpt"))
CONFIG_INTEGER(FastTailCalls, W("FastTailCalls"), 1) // If set, allow fast tail calls; otherwise allow only helper-based
// calls
// for explicit tail calls.
CONFIG_INTEGER(JitMeasureNowayAssert, W("JitMeasureNowayAssert"), 0) // Set to 1 to measure noway_assert usage. Only
// valid if MEASURE_NOWAY is defined.
CONFIG_STRING(JitMeasureNowayAssertFile,
W("JitMeasureNowayAssertFile")) // Set to file to write noway_assert usage to a file (if not
// set: stdout). Only valid if MEASURE_NOWAY is defined.
#if defined(DEBUG)
CONFIG_INTEGER(EnableExtraSuperPmiQueries, W("EnableExtraSuperPmiQueries"), 0) // Make extra queries to somewhat
// future-proof SuperPmi method contexts.
#endif // DEBUG
#if defined(DEBUG) || defined(INLINE_DATA)
CONFIG_INTEGER(JitInlineDumpData, W("JitInlineDumpData"), 0)
CONFIG_INTEGER(JitInlineDumpXml, W("JitInlineDumpXml"), 0) // 1 = full xml (+ failures in DEBUG)
// 2 = only methods with inlines (+ failures in DEBUG)
// 3 = only methods with inlines, no failures
CONFIG_STRING(JitInlineDumpXmlFile, W("JitInlineDumpXmlFile"))
CONFIG_INTEGER(JitInlinePolicyDumpXml, W("JitInlinePolicyDumpXml"), 0)
CONFIG_INTEGER(JitInlineLimit, W("JitInlineLimit"), -1)
CONFIG_INTEGER(JitInlinePolicyDiscretionary, W("JitInlinePolicyDiscretionary"), 0)
CONFIG_INTEGER(JitInlinePolicyFull, W("JitInlinePolicyFull"), 0)
CONFIG_INTEGER(JitInlinePolicySize, W("JitInlinePolicySize"), 0)
CONFIG_INTEGER(JitInlinePolicyRandom, W("JitInlinePolicyRandom"), 0) // nonzero enables; value is the external random
// seed
CONFIG_INTEGER(JitInlinePolicyReplay, W("JitInlinePolicyReplay"), 0)
CONFIG_STRING(JitNoInlineRange, W("JitNoInlineRange"))
CONFIG_STRING(JitInlineReplayFile, W("JitInlineReplayFile"))
#endif // defined(DEBUG) || defined(INLINE_DATA)
// Extended version of DefaultPolicy that includes a more precise IL scan,
// relies on PGO if it exists and generally is more aggressive.
CONFIG_INTEGER(JitExtDefaultPolicy, W("JitExtDefaultPolicy"), 1)
CONFIG_INTEGER(JitExtDefaultPolicyMaxIL, W("JitExtDefaultPolicyMaxIL"), 0x80)
CONFIG_INTEGER(JitExtDefaultPolicyMaxILProf, W("JitExtDefaultPolicyMaxILProf"), 0x400)
CONFIG_INTEGER(JitExtDefaultPolicyMaxBB, W("JitExtDefaultPolicyMaxBB"), 7)
// Inliner uses the following formula for PGO-driven decisions:
//
// BM = BM * ((1.0 - ProfTrust) + ProfWeight * ProfScale)
//
// Where BM is a benefit multiplier composed from various observations (e.g. "const arg makes a branch foldable").
// If a profile data can be trusted for 100% we can safely just give up on inlining anything inside cold blocks
// (except the cases where inlining in cold blocks improves type info/escape analysis for the whole caller).
// For now, it's only applied for dynamic PGO.
CONFIG_INTEGER(JitExtDefaultPolicyProfTrust, W("JitExtDefaultPolicyProfTrust"), 0x7)
CONFIG_INTEGER(JitExtDefaultPolicyProfScale, W("JitExtDefaultPolicyProfScale"), 0x2A)
CONFIG_INTEGER(JitInlinePolicyModel, W("JitInlinePolicyModel"), 0)
CONFIG_INTEGER(JitInlinePolicyProfile, W("JitInlinePolicyProfile"), 0)
CONFIG_INTEGER(JitInlinePolicyProfileThreshold, W("JitInlinePolicyProfileThreshold"), 40)
CONFIG_INTEGER(JitObjectStackAllocation, W("JitObjectStackAllocation"), 0)
CONFIG_INTEGER(JitEECallTimingInfo, W("JitEECallTimingInfo"), 0)
#if defined(DEBUG)
CONFIG_INTEGER(JitEnableFinallyCloning, W("JitEnableFinallyCloning"), 1)
CONFIG_INTEGER(JitEnableRemoveEmptyTry, W("JitEnableRemoveEmptyTry"), 1)
#endif // DEBUG
// Overall master enable for Guarded Devirtualization.
CONFIG_INTEGER(JitEnableGuardedDevirtualization, W("JitEnableGuardedDevirtualization"), 1)
// Various policies for GuardedDevirtualization
CONFIG_INTEGER(JitGuardedDevirtualizationChainLikelihood, W("JitGuardedDevirtualizationChainLikelihood"), 0x4B) // 75
CONFIG_INTEGER(JitGuardedDevirtualizationChainStatements, W("JitGuardedDevirtualizationChainStatements"), 4)
#if defined(DEBUG)
CONFIG_STRING(JitGuardedDevirtualizationRange, W("JitGuardedDevirtualizationRange"))
CONFIG_INTEGER(JitRandomGuardedDevirtualization, W("JitRandomGuardedDevirtualization"), 0)
#endif // DEBUG
// Enable insertion of patchpoints into Tier0 methods, switching to optimized where needed.
#if defined(TARGET_AMD64) || defined(TARGET_ARM64)
CONFIG_INTEGER(TC_OnStackReplacement, W("TC_OnStackReplacement"), 1)
#else
CONFIG_INTEGER(TC_OnStackReplacement, W("TC_OnStackReplacement"), 0)
#endif // defined(TARGET_AMD64) || defined(TARGET_ARM64)
// Initial patchpoint counter value used by jitted code
CONFIG_INTEGER(TC_OnStackReplacement_InitialCounter, W("TC_OnStackReplacement_InitialCounter"), 1000)
// Enable partial compilation for Tier0 methods
CONFIG_INTEGER(TC_PartialCompilation, W("TC_PartialCompilation"), 0)
// Patchpoint strategy:
// 0 - backedge sources
// 1 - backedge targets
// 2 - adaptive (default)
CONFIG_INTEGER(TC_PatchpointStrategy, W("TC_PatchpointStrategy"), 2)
#if defined(DEBUG)
// Randomly sprinkle patchpoints. Value is the likelyhood any given stack-empty point becomes a patchpoint.
CONFIG_INTEGER(JitRandomOnStackReplacement, W("JitRandomOnStackReplacement"), 0)
// Place patchpoint at the specified IL offset, if possible. Overrides random placement.
CONFIG_INTEGER(JitOffsetOnStackReplacement, W("JitOffsetOnStackReplacement"), -1)
#endif // debug
#if defined(DEBUG)
// EnableOsrRange allows you to limit the set of methods that will rely on OSR to escape
// from Tier0 code. Methods outside the range that would normally be jitted at Tier0
// and have patchpoints will instead be switched to optimized.
CONFIG_STRING(JitEnableOsrRange, W("JitEnableOsrRange"))
// EnablePatchpointRange allows you to limit the set of Tier0 methods that
// will have patchpoints, and hence control which methods will create OSR methods.
// Unlike EnableOsrRange, it will not alter the optimization setting for methods
// outside the enabled range.
CONFIG_STRING(JitEnablePatchpointRange, W("JitEnablePatchpointRange"))
#endif
// Profile instrumentation options
CONFIG_INTEGER(JitMinimalJitProfiling, W("JitMinimalJitProfiling"), 1)
CONFIG_INTEGER(JitMinimalPrejitProfiling, W("JitMinimalPrejitProfiling"), 0)
CONFIG_INTEGER(JitCastProfiling, W("JitCastProfiling"), 0) // Profile castclass and isinst
CONFIG_INTEGER(JitClassProfiling, W("JitClassProfiling"), 1) // Profile virtual and interface calls
CONFIG_INTEGER(JitEdgeProfiling, W("JitEdgeProfiling"), 1) // Profile edges instead of blocks
CONFIG_INTEGER(JitCollect64BitCounts, W("JitCollect64BitCounts"), 0) // Collect counts as 64-bit values.
// Profile consumption options
CONFIG_INTEGER(JitDisablePgo, W("JitDisablePgo"), 0) // Ignore pgo data for all methods
#if defined(DEBUG)
CONFIG_STRING(JitEnablePgoRange, W("JitEnablePgoRange")) // Enable pgo data for only some methods
CONFIG_INTEGER(JitRandomEdgeCounts, W("JitRandomEdgeCounts"), 0) // Substitute random values for edge counts
CONFIG_INTEGER(JitCrossCheckDevirtualizationAndPGO, W("JitCrossCheckDevirtualizationAndPGO"), 0)
CONFIG_INTEGER(JitNoteFailedExactDevirtualization, W("JitNoteFailedExactDevirtualization"), 0)
#endif // debug
// Control when Virtual Calls are expanded
CONFIG_INTEGER(JitExpandCallsEarly, W("JitExpandCallsEarly"), 1) // Expand Call targets early (in the global morph
// phase)
// Force the generation of CFG checks
CONFIG_INTEGER(JitForceControlFlowGuard, W("JitForceControlFlowGuard"), 0);
// JitCFGUseDispatcher values:
// 0: Never use dispatcher
// 1: Use dispatcher on all platforms that support it
// 2: Default behavior, depends on platform (yes on x64, no on arm64)
CONFIG_INTEGER(JitCFGUseDispatcher, W("JitCFGUseDispatcher"), 2)
#if defined(DEBUG)
// JitFunctionFile: Name of a file that contains a list of functions. If the currently compiled function is in the
// file, certain other JIT config variables will be active. If the currently compiled function is not in the file,
// the specific JIT config variables will not be active.
//
// Functions are approximately in the format output by JitFunctionTrace, e.g.:
//
// System.CLRConfig:GetBoolValue(ref,byref):bool (MethodHash=3c54d35e)
// -- use the MethodHash, not the function name
//
// System.CLRConfig:GetBoolValue(ref,byref):bool
// -- use just the name
//
// Lines with leading ";" "#" or "//" are ignored.
//
// If this is unset, then the JIT config values have their normal behavior.
//
CONFIG_STRING(JitFunctionFile, W("JitFunctionFile"))
#endif // DEBUG
#if defined(DEBUG)
#if defined(TARGET_ARM64)
// JitSaveFpLrWithCalleeSavedRegisters:
// 0: use default frame type decision
// 1: disable frames that save FP/LR registers with the callee-saved registers (at the top of the frame)
// 2: force all frames to use the frame types that save FP/LR registers with the callee-saved registers (at the top
// of the frame)
CONFIG_INTEGER(JitSaveFpLrWithCalleeSavedRegisters, W("JitSaveFpLrWithCalleeSavedRegisters"), 0)
#endif // defined(TARGET_ARM64)
#endif // DEBUG
CONFIG_INTEGER(JitEnregStructLocals, W("JitEnregStructLocals"), 1) // Allow to enregister locals with struct type.
#undef CONFIG_INTEGER
#undef CONFIG_STRING
#undef CONFIG_METHODSET
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#if !defined(CONFIG_INTEGER) || !defined(CONFIG_STRING) || !defined(CONFIG_METHODSET)
#error CONFIG_INTEGER, CONFIG_STRING, and CONFIG_METHODSET must be defined before including this file.
#endif // !defined(CONFIG_INTEGER) || !defined(CONFIG_STRING) || !defined(CONFIG_METHODSET)
#ifdef DEBUG
#define OPT_CONFIG // Enable optimization level configuration.
#endif
#if defined(DEBUG)
///
/// JIT
///
CONFIG_INTEGER(AltJitLimit, W("AltJitLimit"), 0) // Max number of functions to use altjit for (decimal)
CONFIG_INTEGER(AltJitSkipOnAssert, W("AltJitSkipOnAssert"), 0) // If AltJit hits an assert, fall back to the fallback
// JIT. Useful in conjunction with
// COMPlus_ContinueOnAssert=1
CONFIG_INTEGER(BreakOnDumpToken, W("BreakOnDumpToken"), 0xffffffff) // Breaks when using internal logging on a
// particular token value.
CONFIG_INTEGER(DebugBreakOnVerificationFailure, W("DebugBreakOnVerificationFailure"), 0) // Halts the jit on
// verification failure
CONFIG_INTEGER(DiffableDasm, W("JitDiffableDasm"), 0) // Make the disassembly diff-able
CONFIG_INTEGER(JitDasmWithAddress, W("JitDasmWithAddress"), 0) // Print the process address next to each instruction of
// the disassembly
CONFIG_INTEGER(DisplayLoopHoistStats, W("JitLoopHoistStats"), 0) // Display JIT loop hoisting statistics
CONFIG_INTEGER(DisplayLsraStats, W("JitLsraStats"), 0) // Display JIT Linear Scan Register Allocator statistics
// If set to "1", display the stats in textual format.
// If set to "2", display the stats in csv format.
// If set to "3", display the stats in summarize format.
// Recommended to use with JitStdOutFile flag.
CONFIG_STRING(JitLsraOrdering, W("JitLsraOrdering")) // LSRA heuristics ordering
CONFIG_INTEGER(DumpJittedMethods, W("DumpJittedMethods"), 0) // Prints all jitted methods to the console
CONFIG_INTEGER(EnablePCRelAddr, W("JitEnablePCRelAddr"), 1) // Whether absolute addr be encoded as PC-rel offset by
// RyuJIT where possible
CONFIG_INTEGER(JitAssertOnMaxRAPasses, W("JitAssertOnMaxRAPasses"), 0)
CONFIG_INTEGER(JitBreakEmitOutputInstr, W("JitBreakEmitOutputInstr"), -1)
CONFIG_INTEGER(JitBreakMorphTree, W("JitBreakMorphTree"), 0xffffffff)
CONFIG_INTEGER(JitBreakOnBadCode, W("JitBreakOnBadCode"), 0)
CONFIG_INTEGER(JitBreakOnMinOpts, W("JITBreakOnMinOpts"), 0) // Halt if jit switches to MinOpts
CONFIG_INTEGER(JitBreakOnUnsafeCode, W("JitBreakOnUnsafeCode"), 0)
CONFIG_INTEGER(JitCloneLoops, W("JitCloneLoops"), 1) // If 0, don't clone. Otherwise clone loops for optimizations.
CONFIG_INTEGER(JitDebugLogLoopCloning, W("JitDebugLogLoopCloning"), 0) // In debug builds log places where loop cloning
// optimizations are performed on the fast path.
CONFIG_INTEGER(JitDefaultFill, W("JitDefaultFill"), 0xdd) // In debug builds, initialize the memory allocated by the nra
// with this byte.
CONFIG_INTEGER(JitAlignLoopMinBlockWeight,
W("JitAlignLoopMinBlockWeight"),
DEFAULT_ALIGN_LOOP_MIN_BLOCK_WEIGHT) // Minimum weight needed for the first block of a loop to make it a
// candidate for alignment.
CONFIG_INTEGER(JitAlignLoopMaxCodeSize,
W("JitAlignLoopMaxCodeSize"),
DEFAULT_MAX_LOOPSIZE_FOR_ALIGN) // For non-adaptive alignment, minimum loop size (in bytes) for which
// alignment will be done.
// Defaults to 3 blocks of 32 bytes chunks = 96 bytes.
CONFIG_INTEGER(JitAlignLoopBoundary,
W("JitAlignLoopBoundary"),
DEFAULT_ALIGN_LOOP_BOUNDARY) // For non-adaptive alignment, address boundary (power of 2) at which loop
// alignment should be done. By default, 32B.
CONFIG_INTEGER(JitAlignLoopForJcc,
W("JitAlignLoopForJcc"),
0) // If set, for non-adaptive alignment, ensure loop jmps are not on or cross alignment boundary.
CONFIG_INTEGER(JitAlignLoopAdaptive,
W("JitAlignLoopAdaptive"),
1) // If set, perform adaptive loop alignment that limits number of padding based on loop size.
CONFIG_INTEGER(JitHideAlignBehindJmp,
W("JitHideAlignBehindJmp"),
1) // If set, try to hide align instruction (if any) behind an unconditional jump instruction (if any)
// that is present before the loop start.
CONFIG_INTEGER(JitOptimizeStructHiddenBuffer, W("JitOptimizeStructHiddenBuffer"), 1) // Track assignments to locals done
// through return buffers.
// Print the alignment boundaries in disassembly.
CONFIG_INTEGER(JitDasmWithAlignmentBoundaries, W("JitDasmWithAlignmentBoundaries"), 0)
CONFIG_INTEGER(JitDirectAlloc, W("JitDirectAlloc"), 0)
CONFIG_INTEGER(JitDoubleAlign, W("JitDoubleAlign"), 1)
CONFIG_INTEGER(JitDumpASCII, W("JitDumpASCII"), 1) // Uses only ASCII characters in tree dumps
CONFIG_INTEGER(JitDumpTerseLsra, W("JitDumpTerseLsra"), 1) // Produce terse dump output for LSRA
CONFIG_INTEGER(JitDumpToDebugger, W("JitDumpToDebugger"), 0) // Output JitDump output to the debugger
CONFIG_INTEGER(JitDumpVerboseSsa, W("JitDumpVerboseSsa"), 0) // Produce especially verbose dump output for SSA
CONFIG_INTEGER(JitDumpVerboseTrees, W("JitDumpVerboseTrees"), 0) // Enable more verbose tree dumps
CONFIG_INTEGER(JitEmitPrintRefRegs, W("JitEmitPrintRefRegs"), 0)
CONFIG_INTEGER(JitEnableDevirtualization, W("JitEnableDevirtualization"), 1) // Enable devirtualization in importer
CONFIG_INTEGER(JitEnableLateDevirtualization, W("JitEnableLateDevirtualization"), 1) // Enable devirtualization after
// inlining
CONFIG_INTEGER(JitExpensiveDebugCheckLevel, W("JitExpensiveDebugCheckLevel"), 0) // Level indicates how much checking
// beyond the default to do in debug
// builds (currently 1-2)
CONFIG_INTEGER(JitForceFallback, W("JitForceFallback"), 0) // Set to non-zero to test NOWAY assert by forcing a retry
CONFIG_INTEGER(JitFullyInt, W("JitFullyInt"), 0) // Forces Fully interruptible code
CONFIG_INTEGER(JitFunctionTrace, W("JitFunctionTrace"), 0) // If non-zero, print JIT start/end logging
CONFIG_INTEGER(JitGCChecks, W("JitGCChecks"), 0)
CONFIG_INTEGER(JitGCInfoLogging, W("JitGCInfoLogging"), 0) // If true, prints GCInfo-related output to standard output.
CONFIG_INTEGER(JitHashBreak, W("JitHashBreak"), -1) // Same as JitBreak, but for a method hash
CONFIG_INTEGER(JitHashDump, W("JitHashDump"), -1) // Same as JitDump, but for a method hash
CONFIG_INTEGER(JitHashHalt, W("JitHashHalt"), -1) // Same as JitHalt, but for a method hash
CONFIG_INTEGER(JitInlineAdditionalMultiplier, W("JitInlineAdditionalMultiplier"), 0)
CONFIG_INTEGER(JitInlinePrintStats, W("JitInlinePrintStats"), 0)
CONFIG_INTEGER(JitInlineSize, W("JITInlineSize"), DEFAULT_MAX_INLINE_SIZE)
CONFIG_INTEGER(JitInlineDepth, W("JITInlineDepth"), DEFAULT_MAX_INLINE_DEPTH)
CONFIG_INTEGER(JitLongAddress, W("JitLongAddress"), 0) // Force using the large pseudo instruction form for long address
CONFIG_INTEGER(JitMaxUncheckedOffset, W("JitMaxUncheckedOffset"), 8)
CONFIG_INTEGER(JitMinOpts, W("JITMinOpts"), 0) // Forces MinOpts
CONFIG_INTEGER(JitMinOptsBbCount, W("JITMinOptsBbCount"), DEFAULT_MIN_OPTS_BB_COUNT) // Internal jit control of MinOpts
CONFIG_INTEGER(JitMinOptsCodeSize, W("JITMinOptsCodeSize"), DEFAULT_MIN_OPTS_CODE_SIZE) // Internal jit control of
// MinOpts
CONFIG_INTEGER(JitMinOptsInstrCount, W("JITMinOptsInstrCount"), DEFAULT_MIN_OPTS_INSTR_COUNT) // Internal jit control of
// MinOpts
CONFIG_INTEGER(JitMinOptsLvNumCount, W("JITMinOptsLvNumcount"), DEFAULT_MIN_OPTS_LV_NUM_COUNT) // Internal jit control
// of MinOpts
CONFIG_INTEGER(JitMinOptsLvRefCount, W("JITMinOptsLvRefcount"), DEFAULT_MIN_OPTS_LV_REF_COUNT) // Internal jit control
// of MinOpts
CONFIG_INTEGER(JitNoCSE, W("JitNoCSE"), 0)
CONFIG_INTEGER(JitNoCSE2, W("JitNoCSE2"), 0)
CONFIG_INTEGER(JitNoForceFallback, W("JitNoForceFallback"), 0) // Set to non-zero to prevent NOWAY assert testing.
// Overrides COMPlus_JitForceFallback and JIT stress
// flags.
CONFIG_INTEGER(JitNoForwardSub, W("JitNoForwardSub"), 0) // Disables forward sub
CONFIG_INTEGER(JitNoHoist, W("JitNoHoist"), 0)
CONFIG_INTEGER(JitNoInline, W("JitNoInline"), 0) // Disables inlining of all methods
CONFIG_INTEGER(JitNoMemoryBarriers, W("JitNoMemoryBarriers"), 0) // If 1, don't generate memory barriers
CONFIG_INTEGER(JitNoRegLoc, W("JitNoRegLoc"), 0)
CONFIG_INTEGER(JitNoStructPromotion, W("JitNoStructPromotion"), 0) // Disables struct promotion 1 - for all, 2 - for
// params.
CONFIG_INTEGER(JitNoUnroll, W("JitNoUnroll"), 0)
CONFIG_INTEGER(JitOrder, W("JitOrder"), 0)
CONFIG_INTEGER(JitQueryCurrentStaticFieldClass, W("JitQueryCurrentStaticFieldClass"), 1)
CONFIG_INTEGER(JitReportFastTailCallDecisions, W("JitReportFastTailCallDecisions"), 0)
CONFIG_INTEGER(JitPInvokeCheckEnabled, W("JITPInvokeCheckEnabled"), 0)
CONFIG_INTEGER(JitPInvokeEnabled, W("JITPInvokeEnabled"), 1)
// Controls verbosity for JitPrintInlinedMethods. Ignored for JitDump/NgenDump where
// it's always set.
CONFIG_INTEGER(JitPrintInlinedMethodsVerbose, W("JitPrintInlinedMethodsVerboseLevel"), 0)
// Prints a tree of inlinees for a specific method (use '*' for all methods)
CONFIG_METHODSET(JitPrintInlinedMethods, W("JitPrintInlinedMethods"))
CONFIG_METHODSET(JitPrintDevirtualizedMethods, W("JitPrintDevirtualizedMethods"))
CONFIG_INTEGER(JitProfileChecks, W("JitProfileChecks"), 0) // 1 enable in dumps, 2 assert if issues found
CONFIG_INTEGER(JitRequired, W("JITRequired"), -1)
CONFIG_INTEGER(JitRoundFloat, W("JITRoundFloat"), DEFAULT_ROUND_LEVEL)
CONFIG_INTEGER(JitStackAllocToLocalSize, W("JitStackAllocToLocalSize"), DEFAULT_MAX_LOCALLOC_TO_LOCAL_SIZE)
CONFIG_INTEGER(JitSkipArrayBoundCheck, W("JitSkipArrayBoundCheck"), 0)
CONFIG_INTEGER(JitSlowDebugChecksEnabled, W("JitSlowDebugChecksEnabled"), 1) // Turn on slow debug checks
CONFIG_INTEGER(JitSplitFunctionSize, W("JitSplitFunctionSize"), 0) // On ARM, use this as the maximum function/funclet
// size for creating function fragments (and creating
// multiple RUNTIME_FUNCTION entries)
CONFIG_INTEGER(JitSsaStress, W("JitSsaStress"), 0) // Perturb order of processing of blocks in SSA; 0 = no stress; 1 =
// use method hash; * = supplied value as random hash
CONFIG_INTEGER(JitStackChecks, W("JitStackChecks"), 0)
CONFIG_STRING(JitStdOutFile, W("JitStdOutFile")) // If set, sends JIT's stdout output to this file.
CONFIG_INTEGER(JitStress, W("JitStress"), 0) // Internal Jit stress mode: 0 = no stress, 2 = all stress, other = vary
// stress based on a hash of the method and this value
CONFIG_INTEGER(JitStressBBProf, W("JitStressBBProf"), 0) // Internal Jit stress mode
CONFIG_INTEGER(JitStressBiasedCSE, W("JitStressBiasedCSE"), 0x101) // Internal Jit stress mode: decimal bias value
// between (0,100) to perform CSE on a candidate.
// 100% = All CSEs. 0% = 0 CSE. (> 100) means no
// stress.
CONFIG_INTEGER(JitStressModeNamesOnly, W("JitStressModeNamesOnly"), 0) // Internal Jit stress: if nonzero, only enable
// stress modes listed in JitStressModeNames
CONFIG_INTEGER(JitStressRegs, W("JitStressRegs"), 0)
CONFIG_INTEGER(JitVNMapSelLimit, W("JitVNMapSelLimit"), 0) // If non-zero, assert if # of VNF_MapSelect applications
// considered reaches this
CONFIG_INTEGER(NgenHashDump, W("NgenHashDump"), -1) // same as JitHashDump, but for ngen
CONFIG_INTEGER(NgenOrder, W("NgenOrder"), 0)
CONFIG_INTEGER(RunAltJitCode, W("RunAltJitCode"), 1) // If non-zero, and the compilation succeeds for an AltJit, then
// use the code. If zero, then we always throw away the generated
// code and fall back to the default compiler.
CONFIG_INTEGER(RunComponentUnitTests, W("JitComponentUnitTests"), 0) // Run JIT component unit tests
CONFIG_INTEGER(ShouldInjectFault, W("InjectFault"), 0)
CONFIG_INTEGER(StressCOMCall, W("StressCOMCall"), 0)
CONFIG_INTEGER(TailcallStress, W("TailcallStress"), 0)
CONFIG_INTEGER(TreesBeforeAfterMorph, W("JitDumpBeforeAfterMorph"), 0) // If 1, display each tree before/after morphing
CONFIG_METHODSET(JitBreak, W("JitBreak")) // Stops in the importer when compiling a specified method
CONFIG_METHODSET(JitDebugBreak, W("JitDebugBreak"))
CONFIG_METHODSET(JitDisasm, W("JitDisasm")) // Dumps disassembly for specified method
CONFIG_STRING(JitDisasmAssemblies, W("JitDisasmAssemblies")) // Only show JitDisasm and related info for methods
// from this semicolon-delimited list of assemblies.
CONFIG_INTEGER(JitDisasmWithGC, W("JitDisasmWithGC"), 0) // Dump interleaved GC Info for any method disassembled.
CONFIG_METHODSET(JitDump, W("JitDump")) // Dumps trees for specified method
CONFIG_INTEGER(JitDumpTier0, W("JitDumpTier0"), 1) // Dump tier0 requests
CONFIG_INTEGER(JitDumpAtOSROffset, W("JitDumpAtOSROffset"), -1) // Only dump OSR requests for this offset
CONFIG_INTEGER(JitDumpInlinePhases, W("JitDumpInlinePhases"), 1) // Dump inline compiler phases
CONFIG_METHODSET(JitEHDump, W("JitEHDump")) // Dump the EH table for the method, as reported to the VM
CONFIG_METHODSET(JitExclude, W("JitExclude"))
CONFIG_METHODSET(JitForceProcedureSplitting, W("JitForceProcedureSplitting"))
CONFIG_METHODSET(JitGCDump, W("JitGCDump"))
CONFIG_METHODSET(JitDebugDump, W("JitDebugDump"))
CONFIG_METHODSET(JitHalt, W("JitHalt")) // Emits break instruction into jitted code
CONFIG_METHODSET(JitImportBreak, W("JitImportBreak"))
CONFIG_METHODSET(JitInclude, W("JitInclude"))
CONFIG_METHODSET(JitLateDisasm, W("JitLateDisasm"))
CONFIG_METHODSET(JitMinOptsName, W("JITMinOptsName")) // Forces MinOpts for a named function
CONFIG_METHODSET(JitNoProcedureSplitting, W("JitNoProcedureSplitting")) // Disallow procedure splitting for specified
// methods
CONFIG_METHODSET(JitNoProcedureSplittingEH, W("JitNoProcedureSplittingEH")) // Disallow procedure splitting for
// specified methods if they contain
// exception handling
CONFIG_METHODSET(JitStressOnly, W("JitStressOnly")) // Internal Jit stress mode: stress only the specified method(s)
CONFIG_METHODSET(JitUnwindDump, W("JitUnwindDump")) // Dump the unwind codes for the method
///
/// NGEN
///
CONFIG_METHODSET(NgenDisasm, W("NgenDisasm")) // Same as JitDisasm, but for ngen
CONFIG_METHODSET(NgenDump, W("NgenDump")) // Same as JitDump, but for ngen
CONFIG_METHODSET(NgenEHDump, W("NgenEHDump")) // Dump the EH table for the method, as reported to the VM
CONFIG_METHODSET(NgenGCDump, W("NgenGCDump"))
CONFIG_METHODSET(NgenDebugDump, W("NgenDebugDump"))
CONFIG_METHODSET(NgenUnwindDump, W("NgenUnwindDump")) // Dump the unwind codes for the method
///
/// JIT
///
CONFIG_METHODSET(JitDumpFg, W("JitDumpFg")) // Dumps Xml/Dot Flowgraph for specified method
CONFIG_STRING(JitDumpFgDir, W("JitDumpFgDir")) // Directory for Xml/Dot flowgraph dump(s)
CONFIG_STRING(JitDumpFgFile, W("JitDumpFgFile")) // Filename for Xml/Dot flowgraph dump(s) (default: "default")
CONFIG_STRING(JitDumpFgPhase, W("JitDumpFgPhase")) // Phase-based Xml/Dot flowgraph support. Set to the short name of a
// phase to see the flowgraph after that phase. Leave unset to dump
// after COLD-BLK (determine first cold block) or set to * for all
// phases
CONFIG_STRING(JitDumpFgPrePhase,
W("JitDumpFgPrePhase")) // Same as JitDumpFgPhase, but specifies to dump pre-phase, not post-phase.
CONFIG_INTEGER(JitDumpFgDot, W("JitDumpFgDot"), 1) // 0 == dump XML format; non-zero == dump DOT format
CONFIG_INTEGER(JitDumpFgEH, W("JitDumpFgEH"), 0) // 0 == no EH regions; non-zero == include EH regions
CONFIG_INTEGER(JitDumpFgLoops, W("JitDumpFgLoops"), 0) // 0 == no loop regions; non-zero == include loop regions
CONFIG_INTEGER(JitDumpFgConstrained, W("JitDumpFgConstrained"), 1) // 0 == don't constrain to mostly linear layout;
// non-zero == force mostly lexical block
// linear layout
CONFIG_INTEGER(JitDumpFgBlockID, W("JitDumpFgBlockID"), 0) // 0 == display block with bbNum; 1 == display with both
// bbNum and bbID
CONFIG_INTEGER(JitDumpFgBlockFlags, W("JitDumpFgBlockFlags"), 0) // 0 == don't display block flags; 1 == display flags
CONFIG_INTEGER(JitDumpFgLoopFlags, W("JitDumpFgLoopFlags"), 0) // 0 == don't display loop flags; 1 == display flags
CONFIG_STRING(JitDumpPreciseDebugInfoFile, W("JitDumpPreciseDebugInfoFile"))
CONFIG_INTEGER(JitDisasmWithDebugInfo, W("JitDisasmWithDebugInfo"), 0)
CONFIG_STRING(JitLateDisasmTo, W("JITLateDisasmTo"))
CONFIG_STRING(JitRange, W("JitRange"))
CONFIG_STRING(JitStressModeNames, W("JitStressModeNames")) // Internal Jit stress mode: stress using the given set of
// stress mode names, e.g. STRESS_REGS, STRESS_TAILCALL
CONFIG_STRING(JitStressModeNamesNot, W("JitStressModeNamesNot")) // Internal Jit stress mode: do NOT stress using the
// given set of stress mode names, e.g. STRESS_REGS,
// STRESS_TAILCALL
CONFIG_STRING(JitStressRange, W("JitStressRange")) // Internal Jit stress mode
///
/// NGEN
///
CONFIG_METHODSET(NgenDumpFg, W("NgenDumpFg")) // Ngen Xml/Dot flowgraph dump support
CONFIG_STRING(NgenDumpFgDir, W("NgenDumpFgDir")) // Ngen Xml/Dot flowgraph dump support
CONFIG_STRING(NgenDumpFgFile, W("NgenDumpFgFile")) // Ngen Xml/Dot flowgraph dump support
///
/// JIT Hardware Intrinsics
///
CONFIG_INTEGER(EnableIncompleteISAClass, W("EnableIncompleteISAClass"), 0) // Enable testing not-yet-implemented
// intrinsic classes
#endif // defined(DEBUG)
#if FEATURE_LOOP_ALIGN
CONFIG_INTEGER(JitAlignLoops, W("JitAlignLoops"), 1) // If set, align inner loops
#else
CONFIG_INTEGER(JitAlignLoops, W("JitAlignLoops"), 0)
#endif
///
/// JIT
///
#ifdef FEATURE_ENABLE_NO_RANGE_CHECKS
CONFIG_INTEGER(JitNoRangeChks, W("JitNoRngChks"), 0) // If 1, don't generate range checks
#endif
// AltJitAssertOnNYI should be 0 on targets where JIT is under development or bring up stage, so as to facilitate
// fallback to main JIT on hitting a NYI.
#if defined(TARGET_ARM64) || defined(TARGET_X86)
CONFIG_INTEGER(AltJitAssertOnNYI, W("AltJitAssertOnNYI"), 0) // Controls the AltJit behavior of NYI stuff
#else // !defined(TARGET_ARM64) && !defined(TARGET_X86)
CONFIG_INTEGER(AltJitAssertOnNYI, W("AltJitAssertOnNYI"), 1) // Controls the AltJit behavior of NYI stuff
#endif // defined(TARGET_ARM64) || defined(TARGET_X86)
CONFIG_INTEGER(EnableEHWriteThru, W("EnableEHWriteThru"), 1) // Enable the register allocator to support EH-write thru:
// partial enregistration of vars exposed on EH boundaries
CONFIG_INTEGER(EnableMultiRegLocals, W("EnableMultiRegLocals"), 1) // Enable the enregistration of locals that are
// defined or used in a multireg context.
// clang-format on
#ifdef FEATURE_SIMD
CONFIG_INTEGER(JitDisableSimdVN, W("JitDisableSimdVN"), 0) // Default 0, ValueNumbering of SIMD nodes and HW Intrinsic
// nodes enabled
// If 1, then disable ValueNumbering of SIMD nodes
// If 2, then disable ValueNumbering of HW Intrinsic nodes
// If 3, disable both SIMD and HW Intrinsic nodes
#endif // FEATURE_SIMD
// Default 0, enable the CSE of Constants, including nearby offsets. (only for ARM64)
// If 1, disable all the CSE of Constants
// If 2, enable the CSE of Constants but don't combine with nearby offsets. (only for ARM64)
// If 3, enable the CSE of Constants including nearby offsets. (all platforms)
// If 4, enable the CSE of Constants but don't combine with nearby offsets. (all platforms)
//
CONFIG_INTEGER(JitConstCSE, W("JitConstCSE"), 0)
#define CONST_CSE_ENABLE_ARM64 0
#define CONST_CSE_DISABLE_ALL 1
#define CONST_CSE_ENABLE_ARM64_NO_SHARING 2
#define CONST_CSE_ENABLE_ALL 3
#define CONST_CSE_ENABLE_ALL_NO_SHARING 4
///
/// JIT
///
#if !defined(DEBUG) && !defined(_DEBUG)
CONFIG_INTEGER(JitEnableNoWayAssert, W("JitEnableNoWayAssert"), 0)
#else // defined(DEBUG) || defined(_DEBUG)
CONFIG_INTEGER(JitEnableNoWayAssert, W("JitEnableNoWayAssert"), 1)
#endif // !defined(DEBUG) && !defined(_DEBUG)
#if defined(TARGET_AMD64) || defined(TARGET_X86)
#define JitMinOptsTrackGCrefs_Default 0 // Not tracking GC refs in MinOpts is new behavior
#else
#define JitMinOptsTrackGCrefs_Default 1
#endif
CONFIG_INTEGER(JitMinOptsTrackGCrefs, W("JitMinOptsTrackGCrefs"), JitMinOptsTrackGCrefs_Default) // Track GC roots
// The following should be wrapped inside "#if MEASURE_MEM_ALLOC / #endif", but
// some files include this one without bringing in the definitions from "jit.h"
// so we don't always know what the "true" value of that flag should be. For now
// we take the easy way out and always include the flag, even in release builds
// (normally MEASURE_MEM_ALLOC is off for release builds but if it's toggled on
// for release in "jit.h" the flag would be missing for some includers).
// TODO-Cleanup: need to make 'MEASURE_MEM_ALLOC' well-defined here at all times.
CONFIG_INTEGER(DisplayMemStats, W("JitMemStats"), 0) // Display JIT memory usage statistics
#if defined(DEBUG)
CONFIG_INTEGER(JitEnregStats, W("JitEnregStats"), 0) // Display JIT enregistration statistics
#endif // DEBUG
CONFIG_INTEGER(JitAggressiveInlining, W("JitAggressiveInlining"), 0) // Aggressive inlining of all methods
CONFIG_INTEGER(JitELTHookEnabled, W("JitELTHookEnabled"), 0) // If 1, emit Enter/Leave/TailCall callbacks
CONFIG_INTEGER(JitInlineSIMDMultiplier, W("JitInlineSIMDMultiplier"), 3)
// Ex lclMAX_TRACKED constant.
CONFIG_INTEGER(JitMaxLocalsToTrack, W("JitMaxLocalsToTrack"), 0x400)
#if defined(FEATURE_ENABLE_NO_RANGE_CHECKS)
CONFIG_INTEGER(JitNoRngChks, W("JitNoRngChks"), 0) // If 1, don't generate range checks
#endif // defined(FEATURE_ENABLE_NO_RANGE_CHECKS)
#if defined(OPT_CONFIG)
CONFIG_INTEGER(JitDoAssertionProp, W("JitDoAssertionProp"), 1) // Perform assertion propagation optimization
CONFIG_INTEGER(JitDoCopyProp, W("JitDoCopyProp"), 1) // Perform copy propagation on variables that appear redundant
CONFIG_INTEGER(JitDoEarlyProp, W("JitDoEarlyProp"), 1) // Perform Early Value Propagation
CONFIG_INTEGER(JitDoLoopHoisting, W("JitDoLoopHoisting"), 1) // Perform loop hoisting on loop invariant values
CONFIG_INTEGER(JitDoLoopInversion, W("JitDoLoopInversion"), 1) // Perform loop inversion on "for/while" loops
CONFIG_INTEGER(JitDoRangeAnalysis, W("JitDoRangeAnalysis"), 1) // Perform range check analysis
CONFIG_INTEGER(JitDoRedundantBranchOpts, W("JitDoRedundantBranchOpts"), 1) // Perform redundant branch optimizations
CONFIG_INTEGER(JitDoSsa, W("JitDoSsa"), 1) // Perform Static Single Assignment (SSA) numbering on the variables
CONFIG_INTEGER(JitDoValueNumber, W("JitDoValueNumber"), 1) // Perform value numbering on method expressions
CONFIG_METHODSET(JitOptRepeat, W("JitOptRepeat")) // Runs optimizer multiple times on the method
CONFIG_INTEGER(JitOptRepeatCount, W("JitOptRepeatCount"), 2) // Number of times to repeat opts when repeating
#endif // defined(OPT_CONFIG)
CONFIG_INTEGER(JitTelemetry, W("JitTelemetry"), 1) // If non-zero, gather JIT telemetry data
// Max # of MapSelect's considered for a particular top-level invocation.
CONFIG_INTEGER(JitVNMapSelBudget, W("JitVNMapSelBudget"), DEFAULT_MAP_SELECT_BUDGET)
CONFIG_INTEGER(TailCallLoopOpt, W("TailCallLoopOpt"), 1) // Convert recursive tail calls to loops
CONFIG_METHODSET(AltJit, W("AltJit")) // Enables AltJit and selectively limits it to the specified methods.
CONFIG_METHODSET(AltJitNgen, W("AltJitNgen")) // Enables AltJit for NGEN and selectively limits it
// to the specified methods.
CONFIG_STRING(AltJitExcludeAssemblies, W("AltJitExcludeAssemblies")) // Do not use AltJit on this
// semicolon-delimited list of assemblies.
CONFIG_INTEGER(JitMeasureIR, W("JitMeasureIR"), 0) // If set, measure the IR size after some phases and report it in
// the time log.
CONFIG_STRING(JitFuncInfoFile, W("JitFuncInfoLogFile")) // If set, gather JIT function info and write to this file.
CONFIG_STRING(JitTimeLogCsv, W("JitTimeLogCsv")) // If set, gather JIT throughput data and write to a CSV file. This
// mode must be used in internal retail builds.
CONFIG_STRING(TailCallOpt, W("TailCallOpt"))
CONFIG_INTEGER(FastTailCalls, W("FastTailCalls"), 1) // If set, allow fast tail calls; otherwise allow only helper-based
// calls
// for explicit tail calls.
CONFIG_INTEGER(JitMeasureNowayAssert, W("JitMeasureNowayAssert"), 0) // Set to 1 to measure noway_assert usage. Only
// valid if MEASURE_NOWAY is defined.
CONFIG_STRING(JitMeasureNowayAssertFile,
W("JitMeasureNowayAssertFile")) // Set to file to write noway_assert usage to a file (if not
// set: stdout). Only valid if MEASURE_NOWAY is defined.
#if defined(DEBUG)
CONFIG_INTEGER(EnableExtraSuperPmiQueries, W("EnableExtraSuperPmiQueries"), 0) // Make extra queries to somewhat
// future-proof SuperPmi method contexts.
#endif // DEBUG
#if defined(DEBUG) || defined(INLINE_DATA)
CONFIG_INTEGER(JitInlineDumpData, W("JitInlineDumpData"), 0)
CONFIG_INTEGER(JitInlineDumpXml, W("JitInlineDumpXml"), 0) // 1 = full xml (+ failures in DEBUG)
// 2 = only methods with inlines (+ failures in DEBUG)
// 3 = only methods with inlines, no failures
CONFIG_STRING(JitInlineDumpXmlFile, W("JitInlineDumpXmlFile"))
CONFIG_INTEGER(JitInlinePolicyDumpXml, W("JitInlinePolicyDumpXml"), 0)
CONFIG_INTEGER(JitInlineLimit, W("JitInlineLimit"), -1)
CONFIG_INTEGER(JitInlinePolicyDiscretionary, W("JitInlinePolicyDiscretionary"), 0)
CONFIG_INTEGER(JitInlinePolicyFull, W("JitInlinePolicyFull"), 0)
CONFIG_INTEGER(JitInlinePolicySize, W("JitInlinePolicySize"), 0)
CONFIG_INTEGER(JitInlinePolicyRandom, W("JitInlinePolicyRandom"), 0) // nonzero enables; value is the external random
// seed
CONFIG_INTEGER(JitInlinePolicyReplay, W("JitInlinePolicyReplay"), 0)
CONFIG_STRING(JitNoInlineRange, W("JitNoInlineRange"))
CONFIG_STRING(JitInlineReplayFile, W("JitInlineReplayFile"))
#endif // defined(DEBUG) || defined(INLINE_DATA)
// Extended version of DefaultPolicy that includes a more precise IL scan,
// relies on PGO if it exists and generally is more aggressive.
CONFIG_INTEGER(JitExtDefaultPolicy, W("JitExtDefaultPolicy"), 1)
CONFIG_INTEGER(JitExtDefaultPolicyMaxIL, W("JitExtDefaultPolicyMaxIL"), 0x80)
CONFIG_INTEGER(JitExtDefaultPolicyMaxILProf, W("JitExtDefaultPolicyMaxILProf"), 0x400)
CONFIG_INTEGER(JitExtDefaultPolicyMaxBB, W("JitExtDefaultPolicyMaxBB"), 7)
// Inliner uses the following formula for PGO-driven decisions:
//
// BM = BM * ((1.0 - ProfTrust) + ProfWeight * ProfScale)
//
// Where BM is a benefit multiplier composed from various observations (e.g. "const arg makes a branch foldable").
// If a profile data can be trusted for 100% we can safely just give up on inlining anything inside cold blocks
// (except the cases where inlining in cold blocks improves type info/escape analysis for the whole caller).
// For now, it's only applied for dynamic PGO.
CONFIG_INTEGER(JitExtDefaultPolicyProfTrust, W("JitExtDefaultPolicyProfTrust"), 0x7)
CONFIG_INTEGER(JitExtDefaultPolicyProfScale, W("JitExtDefaultPolicyProfScale"), 0x2A)
CONFIG_INTEGER(JitInlinePolicyModel, W("JitInlinePolicyModel"), 0)
CONFIG_INTEGER(JitInlinePolicyProfile, W("JitInlinePolicyProfile"), 0)
CONFIG_INTEGER(JitInlinePolicyProfileThreshold, W("JitInlinePolicyProfileThreshold"), 40)
CONFIG_INTEGER(JitObjectStackAllocation, W("JitObjectStackAllocation"), 0)
CONFIG_INTEGER(JitEECallTimingInfo, W("JitEECallTimingInfo"), 0)
#if defined(DEBUG)
CONFIG_INTEGER(JitEnableFinallyCloning, W("JitEnableFinallyCloning"), 1)
CONFIG_INTEGER(JitEnableRemoveEmptyTry, W("JitEnableRemoveEmptyTry"), 1)
#endif // DEBUG
// Overall master enable for Guarded Devirtualization.
CONFIG_INTEGER(JitEnableGuardedDevirtualization, W("JitEnableGuardedDevirtualization"), 1)
// Various policies for GuardedDevirtualization
CONFIG_INTEGER(JitGuardedDevirtualizationChainLikelihood, W("JitGuardedDevirtualizationChainLikelihood"), 0x4B) // 75
CONFIG_INTEGER(JitGuardedDevirtualizationChainStatements, W("JitGuardedDevirtualizationChainStatements"), 4)
#if defined(DEBUG)
CONFIG_STRING(JitGuardedDevirtualizationRange, W("JitGuardedDevirtualizationRange"))
CONFIG_INTEGER(JitRandomGuardedDevirtualization, W("JitRandomGuardedDevirtualization"), 0)
#endif // DEBUG
// Enable insertion of patchpoints into Tier0 methods, switching to optimized where needed.
#if defined(TARGET_AMD64) || defined(TARGET_ARM64)
CONFIG_INTEGER(TC_OnStackReplacement, W("TC_OnStackReplacement"), 1)
#else
CONFIG_INTEGER(TC_OnStackReplacement, W("TC_OnStackReplacement"), 0)
#endif // defined(TARGET_AMD64) || defined(TARGET_ARM64)
// Initial patchpoint counter value used by jitted code
CONFIG_INTEGER(TC_OnStackReplacement_InitialCounter, W("TC_OnStackReplacement_InitialCounter"), 1000)
// Enable partial compilation for Tier0 methods
CONFIG_INTEGER(TC_PartialCompilation, W("TC_PartialCompilation"), 0)
// Patchpoint strategy:
// 0 - backedge sources
// 1 - backedge targets
// 2 - adaptive (default)
CONFIG_INTEGER(TC_PatchpointStrategy, W("TC_PatchpointStrategy"), 2)
#if defined(DEBUG)
// Randomly sprinkle patchpoints. Value is the likelyhood any given stack-empty point becomes a patchpoint.
CONFIG_INTEGER(JitRandomOnStackReplacement, W("JitRandomOnStackReplacement"), 0)
// Place patchpoint at the specified IL offset, if possible. Overrides random placement.
CONFIG_INTEGER(JitOffsetOnStackReplacement, W("JitOffsetOnStackReplacement"), -1)
#endif // debug
#if defined(DEBUG)
// EnableOsrRange allows you to limit the set of methods that will rely on OSR to escape
// from Tier0 code. Methods outside the range that would normally be jitted at Tier0
// and have patchpoints will instead be switched to optimized.
CONFIG_STRING(JitEnableOsrRange, W("JitEnableOsrRange"))
// EnablePatchpointRange allows you to limit the set of Tier0 methods that
// will have patchpoints, and hence control which methods will create OSR methods.
// Unlike EnableOsrRange, it will not alter the optimization setting for methods
// outside the enabled range.
CONFIG_STRING(JitEnablePatchpointRange, W("JitEnablePatchpointRange"))
#endif
// Profile instrumentation options
CONFIG_INTEGER(JitMinimalJitProfiling, W("JitMinimalJitProfiling"), 1)
CONFIG_INTEGER(JitMinimalPrejitProfiling, W("JitMinimalPrejitProfiling"), 0)
CONFIG_INTEGER(JitProfileCasts, W("JitProfileCasts"), 1) // Profile castclass/isinst
CONFIG_INTEGER(JitConsumeProfileForCasts, W("JitConsumeProfileForCasts"), 1) // Consume profile data (if any) for
// castclass/isinst
CONFIG_INTEGER(JitClassProfiling, W("JitClassProfiling"), 1) // Profile virtual and interface calls
CONFIG_INTEGER(JitEdgeProfiling, W("JitEdgeProfiling"), 1) // Profile edges instead of blocks
CONFIG_INTEGER(JitCollect64BitCounts, W("JitCollect64BitCounts"), 0) // Collect counts as 64-bit values.
// Profile consumption options
CONFIG_INTEGER(JitDisablePgo, W("JitDisablePgo"), 0) // Ignore pgo data for all methods
#if defined(DEBUG)
CONFIG_STRING(JitEnablePgoRange, W("JitEnablePgoRange")) // Enable pgo data for only some methods
CONFIG_INTEGER(JitRandomEdgeCounts, W("JitRandomEdgeCounts"), 0) // Substitute random values for edge counts
CONFIG_INTEGER(JitCrossCheckDevirtualizationAndPGO, W("JitCrossCheckDevirtualizationAndPGO"), 0)
CONFIG_INTEGER(JitNoteFailedExactDevirtualization, W("JitNoteFailedExactDevirtualization"), 0)
#endif // debug
// Control when Virtual Calls are expanded
CONFIG_INTEGER(JitExpandCallsEarly, W("JitExpandCallsEarly"), 1) // Expand Call targets early (in the global morph
// phase)
// Force the generation of CFG checks
CONFIG_INTEGER(JitForceControlFlowGuard, W("JitForceControlFlowGuard"), 0);
// JitCFGUseDispatcher values:
// 0: Never use dispatcher
// 1: Use dispatcher on all platforms that support it
// 2: Default behavior, depends on platform (yes on x64, no on arm64)
CONFIG_INTEGER(JitCFGUseDispatcher, W("JitCFGUseDispatcher"), 2)
#if defined(DEBUG)
// JitFunctionFile: Name of a file that contains a list of functions. If the currently compiled function is in the
// file, certain other JIT config variables will be active. If the currently compiled function is not in the file,
// the specific JIT config variables will not be active.
//
// Functions are approximately in the format output by JitFunctionTrace, e.g.:
//
// System.CLRConfig:GetBoolValue(ref,byref):bool (MethodHash=3c54d35e)
// -- use the MethodHash, not the function name
//
// System.CLRConfig:GetBoolValue(ref,byref):bool
// -- use just the name
//
// Lines with leading ";" "#" or "//" are ignored.
//
// If this is unset, then the JIT config values have their normal behavior.
//
CONFIG_STRING(JitFunctionFile, W("JitFunctionFile"))
#endif // DEBUG
#if defined(DEBUG)
#if defined(TARGET_ARM64)
// JitSaveFpLrWithCalleeSavedRegisters:
// 0: use default frame type decision
// 1: disable frames that save FP/LR registers with the callee-saved registers (at the top of the frame)
// 2: force all frames to use the frame types that save FP/LR registers with the callee-saved registers (at the top
// of the frame)
CONFIG_INTEGER(JitSaveFpLrWithCalleeSavedRegisters, W("JitSaveFpLrWithCalleeSavedRegisters"), 0)
#endif // defined(TARGET_ARM64)
#endif // DEBUG
CONFIG_INTEGER(JitEnregStructLocals, W("JitEnregStructLocals"), 1) // Allow to enregister locals with struct type.
#undef CONFIG_INTEGER
#undef CONFIG_STRING
#undef CONFIG_METHODSET
| 1 |
dotnet/runtime | 65,922 | Rely on PGO for isinst/castclass | Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | EgorBo | "2022-02-26T22:13:46Z" | "2022-04-01T21:25:56Z" | 063d08018602291f8dbd9d9b6055b8e1af874cb8 | f249a3d58b0b2bb55adca1a56c5a39cf446437b7 | Rely on PGO for isinst/castclass. Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | ./src/coreclr/nativeaot/Runtime/UniversalTransitionHelpers.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "common.h"
#include "CommonTypes.h"
#include "CommonMacros.h"
#include "PalRedhawkCommon.h"
#include "PalRedhawk.h"
#ifdef _DEBUG
#define TRASH_SAVED_ARGUMENT_REGISTERS
#endif
#ifdef TRASH_SAVED_ARGUMENT_REGISTERS
//
// Define tables of predictable distinguished values that RhpUniversalTransition can use to
// trash argument registers after they have been saved into the transition frame.
//
// Trashing these registers is a testability aid that makes it easier to detect bugs where
// the transition frame content is not correctly propagated to the eventual callee.
//
// In the absence of trashing, such bugs can become undetectable if the code that
// dispatches the call happens to never touch the impacted argument register (e.g., xmm3 on
// amd64 or d5 on arm32). In such a case, the original enregistered argument will flow
// unmodified into the eventual callee, obscuring the fact that the dispatcher failed to
// propagate the transition frame copy of this register.
//
// These tables are manually aligned as a conservative safeguard to ensure that the
// consumers can use arbitrary access widths without ever needing to worry about alignment.
// The comments in each table show the %d/%f renderings of each 32-bit value, plus the
// %I64d/%f rendering of the combined 64-bit value of each aligned pair of 32-bit values.
//
#define TRASH_VALUE_ALIGNMENT 16
EXTERN_C
DECLSPEC_ALIGN(TRASH_VALUE_ALIGNMENT)
const uint32_t RhpIntegerTrashValues[] = {
// Lo32 Hi32 Lo32 Hi32 Hi32:Lo32
// ----------- ----------- --------- --------- ------------------
0x07801001U, 0x07802002U, // (125833217, 125837314) (540467148372316161)
0x07803003U, 0x07804004U, // (125841411, 125845508) (540502341334347779)
0x07805005U, 0x07806006U, // (125849605, 125853702) (540537534296379397)
0x07807007U, 0x07808008U, // (125857799, 125861896) (540572727258411015)
0x07809009U, 0x0780a00aU, // (125865993, 125870090) (540607920220442633)
0x0780b00bU, 0x0780c00cU, // (125874187, 125878284) (540643113182474251)
0x0780d00dU, 0x0780e00eU, // (125882381, 125886478) (540678306144505869)
0x0780f00fU, 0x07810010U, // (125890575, 125894672) (540713499106537487)
};
EXTERN_C
DECLSPEC_ALIGN(TRASH_VALUE_ALIGNMENT)
const uint32_t RhpFpTrashValues[] = {
// Lo32 Hi32 Lo32 Hi32 Hi32:Lo32
// ----------- ----------- ------------------- ------------------- -------------------
0x42001001U, 0x42002002U, // (32.0156288146972660, 32.0312576293945310) (8657061952.00781440)
0x42003003U, 0x42004004U, // (32.0468864440917970, 32.0625152587890630) (8724187200.02344320)
0x42005005U, 0x42006006U, // (32.0781440734863280, 32.0937728881835940) (8791312448.03907200)
0x42007007U, 0x42008008U, // (32.1094017028808590, 32.1250305175781250) (8858437696.05470090)
0x42009009U, 0x4200a00aU, // (32.1406593322753910, 32.1562881469726560) (8925562944.07032970)
0x4200b00bU, 0x4200c00cU, // (32.1719169616699220, 32.1875457763671880) (8992688192.08595850)
0x4200d00dU, 0x4200e00eU, // (32.2031745910644530, 32.2188034057617190) (9059813440.10158730)
0x4200f00fU, 0x42010010U, // (32.2344322204589840, 32.2500610351562500) (9126938688.11721610)
};
#endif // TRASH_SAVED_ARGUMENT_REGISTERS
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "common.h"
#include "CommonTypes.h"
#include "CommonMacros.h"
#include "PalRedhawkCommon.h"
#include "PalRedhawk.h"
#ifdef _DEBUG
#define TRASH_SAVED_ARGUMENT_REGISTERS
#endif
#ifdef TRASH_SAVED_ARGUMENT_REGISTERS
//
// Define tables of predictable distinguished values that RhpUniversalTransition can use to
// trash argument registers after they have been saved into the transition frame.
//
// Trashing these registers is a testability aid that makes it easier to detect bugs where
// the transition frame content is not correctly propagated to the eventual callee.
//
// In the absence of trashing, such bugs can become undetectable if the code that
// dispatches the call happens to never touch the impacted argument register (e.g., xmm3 on
// amd64 or d5 on arm32). In such a case, the original enregistered argument will flow
// unmodified into the eventual callee, obscuring the fact that the dispatcher failed to
// propagate the transition frame copy of this register.
//
// These tables are manually aligned as a conservative safeguard to ensure that the
// consumers can use arbitrary access widths without ever needing to worry about alignment.
// The comments in each table show the %d/%f renderings of each 32-bit value, plus the
// %I64d/%f rendering of the combined 64-bit value of each aligned pair of 32-bit values.
//
#define TRASH_VALUE_ALIGNMENT 16
EXTERN_C
DECLSPEC_ALIGN(TRASH_VALUE_ALIGNMENT)
const uint32_t RhpIntegerTrashValues[] = {
// Lo32 Hi32 Lo32 Hi32 Hi32:Lo32
// ----------- ----------- --------- --------- ------------------
0x07801001U, 0x07802002U, // (125833217, 125837314) (540467148372316161)
0x07803003U, 0x07804004U, // (125841411, 125845508) (540502341334347779)
0x07805005U, 0x07806006U, // (125849605, 125853702) (540537534296379397)
0x07807007U, 0x07808008U, // (125857799, 125861896) (540572727258411015)
0x07809009U, 0x0780a00aU, // (125865993, 125870090) (540607920220442633)
0x0780b00bU, 0x0780c00cU, // (125874187, 125878284) (540643113182474251)
0x0780d00dU, 0x0780e00eU, // (125882381, 125886478) (540678306144505869)
0x0780f00fU, 0x07810010U, // (125890575, 125894672) (540713499106537487)
};
EXTERN_C
DECLSPEC_ALIGN(TRASH_VALUE_ALIGNMENT)
const uint32_t RhpFpTrashValues[] = {
// Lo32 Hi32 Lo32 Hi32 Hi32:Lo32
// ----------- ----------- ------------------- ------------------- -------------------
0x42001001U, 0x42002002U, // (32.0156288146972660, 32.0312576293945310) (8657061952.00781440)
0x42003003U, 0x42004004U, // (32.0468864440917970, 32.0625152587890630) (8724187200.02344320)
0x42005005U, 0x42006006U, // (32.0781440734863280, 32.0937728881835940) (8791312448.03907200)
0x42007007U, 0x42008008U, // (32.1094017028808590, 32.1250305175781250) (8858437696.05470090)
0x42009009U, 0x4200a00aU, // (32.1406593322753910, 32.1562881469726560) (8925562944.07032970)
0x4200b00bU, 0x4200c00cU, // (32.1719169616699220, 32.1875457763671880) (8992688192.08595850)
0x4200d00dU, 0x4200e00eU, // (32.2031745910644530, 32.2188034057617190) (9059813440.10158730)
0x4200f00fU, 0x42010010U, // (32.2344322204589840, 32.2500610351562500) (9126938688.11721610)
};
#endif // TRASH_SAVED_ARGUMENT_REGISTERS
| -1 |
dotnet/runtime | 65,922 | Rely on PGO for isinst/castclass | Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | EgorBo | "2022-02-26T22:13:46Z" | "2022-04-01T21:25:56Z" | 063d08018602291f8dbd9d9b6055b8e1af874cb8 | f249a3d58b0b2bb55adca1a56c5a39cf446437b7 | Rely on PGO for isinst/castclass. Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | ./src/native/public/mono/metadata/details/opcodes-functions.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// This file does not have ifdef guards, it is meant to be included multiple times with different definitions of MONO_API_FUNCTION
#ifndef MONO_API_FUNCTION
#error "MONO_API_FUNCTION(ret,name,args) macro not defined before including function declaration header"
#endif
MONO_API_FUNCTION(const char*, mono_opcode_name, (int opcode))
MONO_API_FUNCTION(MonoOpcodeEnum, mono_opcode_value, (const mono_byte **ip, const mono_byte *end))
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// This file does not have ifdef guards, it is meant to be included multiple times with different definitions of MONO_API_FUNCTION
#ifndef MONO_API_FUNCTION
#error "MONO_API_FUNCTION(ret,name,args) macro not defined before including function declaration header"
#endif
MONO_API_FUNCTION(const char*, mono_opcode_name, (int opcode))
MONO_API_FUNCTION(MonoOpcodeEnum, mono_opcode_value, (const mono_byte **ip, const mono_byte *end))
| -1 |
dotnet/runtime | 65,922 | Rely on PGO for isinst/castclass | Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | EgorBo | "2022-02-26T22:13:46Z" | "2022-04-01T21:25:56Z" | 063d08018602291f8dbd9d9b6055b8e1af874cb8 | f249a3d58b0b2bb55adca1a56c5a39cf446437b7 | Rely on PGO for isinst/castclass. Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | ./src/coreclr/pal/tests/palsuite/c_runtime/_vsnwprintf_s/test19/test19.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*=====================================================================
**
** Source: test18.c
**
** Purpose: Test #18 for the _vsnwprintf_s function.
**
**
**===================================================================*/
#include <palsuite.h>
#include "../_vsnwprintf_s.h"
/* memcmp is used to verify the results, so this test is dependent on it. */
/* ditto with wcslen */
#define DOTEST(a,b,c,d,e) DoTest(a,b,(void*)c,d,e)
void DoArgumentPrecTest_vsnwprintf_s(WCHAR *formatstr, int precision, void *param,
WCHAR *paramstr, WCHAR *checkstr1, WCHAR *checkstr2)
{
WCHAR buf[256];
TestVsnwprintf_s(buf, 256, formatstr, precision, param);
if (memcmp(buf, checkstr1, wcslen(checkstr1) + 2) != 0 &&
memcmp(buf, checkstr2, wcslen(checkstr2) + 2) != 0)
{
Fail("ERROR: failed to insert %s into \"%s\" with precision %d\n"
"Expected \"%s\" or \"%s\", got \"%s\".\n",
paramstr,
convertC(formatstr),
precision,
convertC(checkstr1),
convertC(checkstr2),
convertC(buf));
}
}
void DoArgumentPrecDoubleTest_vsnwprintf_s(WCHAR *formatstr, int precision, double param,
WCHAR *checkstr1, WCHAR *checkstr2)
{
WCHAR buf[256];
TestVsnwprintf_s(buf, 256, formatstr, precision, param);
if (memcmp(buf, checkstr1, wcslen(checkstr1) + 2) != 0 &&
memcmp(buf, checkstr2, wcslen(checkstr2) + 2) != 0)
{
Fail("ERROR: failed to insert %f into \"%s\" with precision %d\n"
"Expected \"%s\" or \"%s\", got \"%s\".\n",
param, convertC(formatstr),
precision,
convertC(checkstr1),
convertC(checkstr2),
convertC(buf));
}
}
/*
* Uses memcmp & wcslen
*/
PALTEST(c_runtime__vsnwprintf_s_test19_paltest_vsnwprintf_test19, "c_runtime/_vsnwprintf_s/test19/paltest_vsnwprintf_test19")
{
if (PAL_Initialize(argc, argv) != 0)
{
return(FAIL);
}
DoArgumentPrecTest_vsnwprintf_s(convert("%.*s"), 2, (void*)convert("bar"), convert("bar"),
convert("ba"), convert("ba"));
DoArgumentPrecTest_vsnwprintf_s(convert("%.*c"), 0, (void*)'a', convert("a"),
convert("a"), convert("a"));
DoArgumentPrecTest_vsnwprintf_s(convert("%.*c"), 4, (void*)'a', convert("a"),
convert("a"), convert("a"));
DoArgumentPrecTest_vsnwprintf_s(convert("%.*C"), 0, (void*)'a', convert("a"),
convert("a"), convert("a"));
DoArgumentPrecTest_vsnwprintf_s(convert("%.*C"), 4, (void*)'a', convert("a"),
convert("a"), convert("a"));
DoArgumentPrecTest_vsnwprintf_s(convert("%.*d"), 1, (void*)42, convert("42"),
convert("42"), convert("42"));
DoArgumentPrecTest_vsnwprintf_s(convert("%.*d"), 3, (void*)42, convert("42"),
convert("042"), convert("042"));
DoArgumentPrecTest_vsnwprintf_s(convert("%.*i"), 1, (void*)42, convert("42"),
convert("42"), convert("42"));
DoArgumentPrecTest_vsnwprintf_s(convert("%.*i"), 3, (void*)42, convert("42"),
convert("042"), convert("042"));
DoArgumentPrecTest_vsnwprintf_s(convert("%.*o"), 1, (void*)42, convert("42"),
convert("52"), convert("52"));
DoArgumentPrecTest_vsnwprintf_s(convert("%.*o"), 3, (void*)42, convert("42"),
convert("052"), convert("052"));
DoArgumentPrecTest_vsnwprintf_s(convert("%.*u"), 1, (void*)42, convert("42"),
convert("42"), convert("42"));
DoArgumentPrecTest_vsnwprintf_s(convert("%.*u"), 3, (void*)42, convert("42"),
convert("042"), convert("042"));
DoArgumentPrecTest_vsnwprintf_s(convert("%.*x"), 1, (void*)0x42, convert("0x42"),
convert("42"), convert("42"));
DoArgumentPrecTest_vsnwprintf_s(convert("%.*x"), 3, (void*)0x42, convert("0x42"),
convert("042"), convert("042"));
DoArgumentPrecTest_vsnwprintf_s(convert("%.*X"), 1, (void*)0x42, convert("0x42"),
convert("42"), convert("42"));
DoArgumentPrecTest_vsnwprintf_s(convert("%.*X"), 3, (void*)0x42, convert("0x42"),
convert("042"), convert("042"));
DoArgumentPrecDoubleTest_vsnwprintf_s(convert("%.*e"), 1, 2.01, convert("2.0e+000"),
convert("2.0e+00"));
DoArgumentPrecDoubleTest_vsnwprintf_s(convert("%.*e"), 3, 2.01, convert("2.010e+000"),
convert("2.010e+00"));
DoArgumentPrecDoubleTest_vsnwprintf_s(convert("%.*E"), 1, 2.01, convert("2.0E+000"),
convert("2.0E+00"));
DoArgumentPrecDoubleTest_vsnwprintf_s(convert("%.*E"), 3, 2.01, convert("2.010E+000"),
convert("2.010E+00"));
DoArgumentPrecDoubleTest_vsnwprintf_s(convert("%.*f"), 1, 2.01, convert("2.0"),
convert("2.0"));
DoArgumentPrecDoubleTest_vsnwprintf_s(convert("%.*f"), 3, 2.01, convert("2.010"),
convert("2.010"));
DoArgumentPrecDoubleTest_vsnwprintf_s(convert("%.*g"), 1, 256.01, convert("3e+002"),
convert("3e+02"));
DoArgumentPrecDoubleTest_vsnwprintf_s(convert("%.*g"), 3, 256.01, convert("256"),
convert("256"));
DoArgumentPrecDoubleTest_vsnwprintf_s(convert("%.*g"), 4, 256.01, convert("256"),
convert("256"));
DoArgumentPrecDoubleTest_vsnwprintf_s(convert("%.*g"), 6, 256.01, convert("256.01"),
convert("256.01"));
DoArgumentPrecDoubleTest_vsnwprintf_s(convert("%.*G"), 1, 256.01, convert("3E+002"),
convert("3E+02"));
DoArgumentPrecDoubleTest_vsnwprintf_s(convert("%.*G"), 3, 256.01, convert("256"),
convert("256"));
DoArgumentPrecDoubleTest_vsnwprintf_s(convert("%.*G"), 4, 256.01, convert("256"),
convert("256"));
DoArgumentPrecDoubleTest_vsnwprintf_s(convert("%.*G"), 6, 256.01, convert("256.01"),
convert("256.01"));
PAL_Terminate();
return PASS;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*=====================================================================
**
** Source: test18.c
**
** Purpose: Test #18 for the _vsnwprintf_s function.
**
**
**===================================================================*/
#include <palsuite.h>
#include "../_vsnwprintf_s.h"
/* memcmp is used to verify the results, so this test is dependent on it. */
/* ditto with wcslen */
#define DOTEST(a,b,c,d,e) DoTest(a,b,(void*)c,d,e)
void DoArgumentPrecTest_vsnwprintf_s(WCHAR *formatstr, int precision, void *param,
WCHAR *paramstr, WCHAR *checkstr1, WCHAR *checkstr2)
{
WCHAR buf[256];
TestVsnwprintf_s(buf, 256, formatstr, precision, param);
if (memcmp(buf, checkstr1, wcslen(checkstr1) + 2) != 0 &&
memcmp(buf, checkstr2, wcslen(checkstr2) + 2) != 0)
{
Fail("ERROR: failed to insert %s into \"%s\" with precision %d\n"
"Expected \"%s\" or \"%s\", got \"%s\".\n",
paramstr,
convertC(formatstr),
precision,
convertC(checkstr1),
convertC(checkstr2),
convertC(buf));
}
}
void DoArgumentPrecDoubleTest_vsnwprintf_s(WCHAR *formatstr, int precision, double param,
WCHAR *checkstr1, WCHAR *checkstr2)
{
WCHAR buf[256];
TestVsnwprintf_s(buf, 256, formatstr, precision, param);
if (memcmp(buf, checkstr1, wcslen(checkstr1) + 2) != 0 &&
memcmp(buf, checkstr2, wcslen(checkstr2) + 2) != 0)
{
Fail("ERROR: failed to insert %f into \"%s\" with precision %d\n"
"Expected \"%s\" or \"%s\", got \"%s\".\n",
param, convertC(formatstr),
precision,
convertC(checkstr1),
convertC(checkstr2),
convertC(buf));
}
}
/*
* Uses memcmp & wcslen
*/
PALTEST(c_runtime__vsnwprintf_s_test19_paltest_vsnwprintf_test19, "c_runtime/_vsnwprintf_s/test19/paltest_vsnwprintf_test19")
{
if (PAL_Initialize(argc, argv) != 0)
{
return(FAIL);
}
DoArgumentPrecTest_vsnwprintf_s(convert("%.*s"), 2, (void*)convert("bar"), convert("bar"),
convert("ba"), convert("ba"));
DoArgumentPrecTest_vsnwprintf_s(convert("%.*c"), 0, (void*)'a', convert("a"),
convert("a"), convert("a"));
DoArgumentPrecTest_vsnwprintf_s(convert("%.*c"), 4, (void*)'a', convert("a"),
convert("a"), convert("a"));
DoArgumentPrecTest_vsnwprintf_s(convert("%.*C"), 0, (void*)'a', convert("a"),
convert("a"), convert("a"));
DoArgumentPrecTest_vsnwprintf_s(convert("%.*C"), 4, (void*)'a', convert("a"),
convert("a"), convert("a"));
DoArgumentPrecTest_vsnwprintf_s(convert("%.*d"), 1, (void*)42, convert("42"),
convert("42"), convert("42"));
DoArgumentPrecTest_vsnwprintf_s(convert("%.*d"), 3, (void*)42, convert("42"),
convert("042"), convert("042"));
DoArgumentPrecTest_vsnwprintf_s(convert("%.*i"), 1, (void*)42, convert("42"),
convert("42"), convert("42"));
DoArgumentPrecTest_vsnwprintf_s(convert("%.*i"), 3, (void*)42, convert("42"),
convert("042"), convert("042"));
DoArgumentPrecTest_vsnwprintf_s(convert("%.*o"), 1, (void*)42, convert("42"),
convert("52"), convert("52"));
DoArgumentPrecTest_vsnwprintf_s(convert("%.*o"), 3, (void*)42, convert("42"),
convert("052"), convert("052"));
DoArgumentPrecTest_vsnwprintf_s(convert("%.*u"), 1, (void*)42, convert("42"),
convert("42"), convert("42"));
DoArgumentPrecTest_vsnwprintf_s(convert("%.*u"), 3, (void*)42, convert("42"),
convert("042"), convert("042"));
DoArgumentPrecTest_vsnwprintf_s(convert("%.*x"), 1, (void*)0x42, convert("0x42"),
convert("42"), convert("42"));
DoArgumentPrecTest_vsnwprintf_s(convert("%.*x"), 3, (void*)0x42, convert("0x42"),
convert("042"), convert("042"));
DoArgumentPrecTest_vsnwprintf_s(convert("%.*X"), 1, (void*)0x42, convert("0x42"),
convert("42"), convert("42"));
DoArgumentPrecTest_vsnwprintf_s(convert("%.*X"), 3, (void*)0x42, convert("0x42"),
convert("042"), convert("042"));
DoArgumentPrecDoubleTest_vsnwprintf_s(convert("%.*e"), 1, 2.01, convert("2.0e+000"),
convert("2.0e+00"));
DoArgumentPrecDoubleTest_vsnwprintf_s(convert("%.*e"), 3, 2.01, convert("2.010e+000"),
convert("2.010e+00"));
DoArgumentPrecDoubleTest_vsnwprintf_s(convert("%.*E"), 1, 2.01, convert("2.0E+000"),
convert("2.0E+00"));
DoArgumentPrecDoubleTest_vsnwprintf_s(convert("%.*E"), 3, 2.01, convert("2.010E+000"),
convert("2.010E+00"));
DoArgumentPrecDoubleTest_vsnwprintf_s(convert("%.*f"), 1, 2.01, convert("2.0"),
convert("2.0"));
DoArgumentPrecDoubleTest_vsnwprintf_s(convert("%.*f"), 3, 2.01, convert("2.010"),
convert("2.010"));
DoArgumentPrecDoubleTest_vsnwprintf_s(convert("%.*g"), 1, 256.01, convert("3e+002"),
convert("3e+02"));
DoArgumentPrecDoubleTest_vsnwprintf_s(convert("%.*g"), 3, 256.01, convert("256"),
convert("256"));
DoArgumentPrecDoubleTest_vsnwprintf_s(convert("%.*g"), 4, 256.01, convert("256"),
convert("256"));
DoArgumentPrecDoubleTest_vsnwprintf_s(convert("%.*g"), 6, 256.01, convert("256.01"),
convert("256.01"));
DoArgumentPrecDoubleTest_vsnwprintf_s(convert("%.*G"), 1, 256.01, convert("3E+002"),
convert("3E+02"));
DoArgumentPrecDoubleTest_vsnwprintf_s(convert("%.*G"), 3, 256.01, convert("256"),
convert("256"));
DoArgumentPrecDoubleTest_vsnwprintf_s(convert("%.*G"), 4, 256.01, convert("256"),
convert("256"));
DoArgumentPrecDoubleTest_vsnwprintf_s(convert("%.*G"), 6, 256.01, convert("256.01"),
convert("256.01"));
PAL_Terminate();
return PASS;
}
| -1 |
dotnet/runtime | 65,922 | Rely on PGO for isinst/castclass | Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | EgorBo | "2022-02-26T22:13:46Z" | "2022-04-01T21:25:56Z" | 063d08018602291f8dbd9d9b6055b8e1af874cb8 | f249a3d58b0b2bb55adca1a56c5a39cf446437b7 | Rely on PGO for isinst/castclass. Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | ./src/tests/Interop/COM/NativeClients/Dispatch/Client.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "ClientTests.h"
#include <memory>
#include <windows_version_helpers.h>
void Validate_Numeric_In_ReturnByRef();
void Validate_Float_In_ReturnAndUpdateByRef();
void Validate_Double_In_ReturnAndUpdateByRef();
void Validate_LCID_Marshaled();
void Validate_Enumerator();
template<COINIT TM>
struct ComInit
{
const HRESULT Result;
ComInit()
: Result{ ::CoInitializeEx(nullptr, TM) }
{ }
~ComInit()
{
if (SUCCEEDED(Result))
::CoUninitialize();
}
};
using ComMTA = ComInit<COINIT_MULTITHREADED>;
int __cdecl main()
{
if (is_windows_nano() == S_OK)
{
::puts("RegFree COM is not supported on Windows Nano. Auto-passing this test.\n");
return 100;
}
ComMTA init;
if (FAILED(init.Result))
return -1;
try
{
Validate_Numeric_In_ReturnByRef();
Validate_Float_In_ReturnAndUpdateByRef();
Validate_Double_In_ReturnAndUpdateByRef();
Validate_LCID_Marshaled();
Validate_Enumerator();
}
catch (HRESULT hr)
{
::printf("Test Failure: 0x%08x\n", hr);
return 101;
}
return 100;
}
void Validate_Numeric_In_ReturnByRef()
{
HRESULT hr;
CoreShimComActivation csact{ W("NETServer"), W("DispatchTesting") };
ComSmartPtr<IDispatchTesting> dispatchTesting;
THROW_IF_FAILED(::CoCreateInstance(CLSID_DispatchTesting, nullptr, CLSCTX_INPROC, IID_IDispatchTesting, (void**)&dispatchTesting));
LPOLESTR numericMethodName = (LPOLESTR)W("DoubleNumeric_ReturnByRef");
LCID lcid = MAKELCID(LANG_USER_DEFAULT, SORT_DEFAULT);
DISPID methodId;
::wprintf(W("Invoke %s\n"), numericMethodName);
THROW_IF_FAILED(dispatchTesting->GetIDsOfNames(
IID_NULL,
&numericMethodName,
1,
lcid,
&methodId));
BYTE b1 = 24;
BYTE b2;
SHORT s1 = 53;
SHORT s2;
USHORT us1 = 74;
USHORT us2;
LONG i1 = 34;
LONG i2;
ULONG ui1 = 854;
ULONG ui2;
LONGLONG l1 = 894;
LONGLONG l2;
ULONGLONG ul1 = 4168;
ULONGLONG ul2;
{
DISPPARAMS params;
params.cArgs = 14;
params.rgvarg = new VARIANTARG[params.cArgs];
params.cNamedArgs = 0;
params.rgdispidNamedArgs = nullptr;
V_VT(¶ms.rgvarg[13]) = VT_UI1;
V_UI1(¶ms.rgvarg[13]) = b1;
V_VT(¶ms.rgvarg[12]) = VT_BYREF | VT_UI1;
V_UI1REF(¶ms.rgvarg[12]) = &b2;
V_VT(¶ms.rgvarg[11]) = VT_I2;
V_I2(¶ms.rgvarg[11]) = s1;
V_VT(¶ms.rgvarg[10]) = VT_BYREF | VT_I2;
V_I2REF(¶ms.rgvarg[10]) = &s2;
V_VT(¶ms.rgvarg[9]) = VT_UI2;
V_UI2(¶ms.rgvarg[9]) = us1;
V_VT(¶ms.rgvarg[8]) = VT_BYREF | VT_UI2;
V_UI2REF(¶ms.rgvarg[8]) = &us2;
V_VT(¶ms.rgvarg[7]) = VT_I4;
V_I4(¶ms.rgvarg[7]) = i1;
V_VT(¶ms.rgvarg[6]) = VT_BYREF | VT_I4;
V_I4REF(¶ms.rgvarg[6]) = &i2;
V_VT(¶ms.rgvarg[5]) = VT_UI4;
V_UI4(¶ms.rgvarg[5]) = ui1;
V_VT(¶ms.rgvarg[4]) = VT_BYREF | VT_UI4;
V_UI4REF(¶ms.rgvarg[4]) = &ui2;
V_VT(¶ms.rgvarg[3]) = VT_I8;
V_I8(¶ms.rgvarg[3]) = l1;
V_VT(¶ms.rgvarg[2]) = VT_BYREF | VT_I8;
V_I8REF(¶ms.rgvarg[2]) = &l2;
V_VT(¶ms.rgvarg[1]) = VT_UI8;
V_UI8(¶ms.rgvarg[1]) = ul1;
V_VT(¶ms.rgvarg[0]) = VT_BYREF | VT_UI8;
V_UI8REF(¶ms.rgvarg[0]) = &ul2;
THROW_IF_FAILED(dispatchTesting->Invoke(
methodId,
IID_NULL,
lcid,
DISPATCH_METHOD,
¶ms,
nullptr,
nullptr,
nullptr
));
THROW_FAIL_IF_FALSE(b2 == b1 * 2);
THROW_FAIL_IF_FALSE(s2 == s1 * 2);
THROW_FAIL_IF_FALSE(us2 == us1 * 2);
THROW_FAIL_IF_FALSE(i2 == i1 * 2);
THROW_FAIL_IF_FALSE(ui2 == ui1 * 2);
THROW_FAIL_IF_FALSE(l2 == l1 * 2);
THROW_FAIL_IF_FALSE(ul2 == ul1 * 2);
}
{
b2 = 0;
s2 = 0;
us2 = 0;
i2 = 0;
ui2 = 0;
l2 = 0;
ul2 = 0;
THROW_IF_FAILED(dispatchTesting->DoubleNumeric_ReturnByRef(b1, &b2, s1, &s2, us1, &us2, i1, (INT*)&i2, ui1, (UINT*)&ui2, l1, &l2, ul1, &ul2));
THROW_FAIL_IF_FALSE(b2 == b1 * 2);
THROW_FAIL_IF_FALSE(s2 == s1 * 2);
THROW_FAIL_IF_FALSE(us2 == us1 * 2);
THROW_FAIL_IF_FALSE(i2 == i1 * 2);
THROW_FAIL_IF_FALSE(ui2 == ui1 * 2);
THROW_FAIL_IF_FALSE(l2 == l1 * 2);
THROW_FAIL_IF_FALSE(ul2 == ul1 * 2);
}
}
namespace
{
bool EqualByBound(float expected, float actual)
{
float low = expected - 0.0001f;
float high = expected + 0.0001f;
float eps = abs(expected - actual);
return eps < std::numeric_limits<float>::epsilon() || (low < actual && actual < high);
}
bool EqualByBound(double expected, double actual)
{
double low = expected - 0.00001;
double high = expected + 0.00001;
double eps = abs(expected - actual);
return eps < std::numeric_limits<double>::epsilon() || (low < actual && actual < high);
}
}
void Validate_Float_In_ReturnAndUpdateByRef()
{
HRESULT hr;
CoreShimComActivation csact{ W("NETServer"), W("DispatchTesting") };
ComSmartPtr<IDispatchTesting> dispatchTesting;
THROW_IF_FAILED(::CoCreateInstance(CLSID_DispatchTesting, nullptr, CLSCTX_INPROC, IID_IDispatchTesting, (void**)&dispatchTesting));
LPOLESTR numericMethodName = (LPOLESTR)W("Add_Float_ReturnAndUpdateByRef");
LCID lcid = MAKELCID(LANG_USER_DEFAULT, SORT_DEFAULT);
DISPID methodId;
::wprintf(W("Invoke %s\n"), numericMethodName);
THROW_IF_FAILED(dispatchTesting->GetIDsOfNames(
IID_NULL,
&numericMethodName,
1,
lcid,
&methodId));
const float a = 12.34f;
const float b_orig = 1.234f;
const float expected = b_orig + a;
float b = b_orig;
{
DISPPARAMS params;
params.cArgs = 2;
params.rgvarg = new VARIANTARG[params.cArgs];
params.cNamedArgs = 0;
params.rgdispidNamedArgs = nullptr;
VARIANT result;
V_VT(¶ms.rgvarg[1]) = VT_R4;
V_R4(¶ms.rgvarg[1]) = a;
V_VT(¶ms.rgvarg[0]) = VT_BYREF | VT_R4;
V_R4REF(¶ms.rgvarg[0]) = &b;
THROW_IF_FAILED(dispatchTesting->Invoke(
methodId,
IID_NULL,
lcid,
DISPATCH_METHOD,
¶ms,
&result,
nullptr,
nullptr
));
THROW_FAIL_IF_FALSE(EqualByBound(expected, V_R4(&result)));
THROW_FAIL_IF_FALSE(EqualByBound(expected, b));
}
{
b = b_orig;
float result;
THROW_IF_FAILED(dispatchTesting->Add_Float_ReturnAndUpdateByRef(a, &b, &result));
THROW_FAIL_IF_FALSE(EqualByBound(expected, result));
THROW_FAIL_IF_FALSE(EqualByBound(expected, b));
}
}
void Validate_Double_In_ReturnAndUpdateByRef()
{
HRESULT hr;
CoreShimComActivation csact{ W("NETServer"), W("DispatchTesting") };
ComSmartPtr<IDispatchTesting> dispatchTesting;
THROW_IF_FAILED(::CoCreateInstance(CLSID_DispatchTesting, nullptr, CLSCTX_INPROC, IID_IDispatchTesting, (void**)&dispatchTesting));
LPOLESTR numericMethodName = (LPOLESTR)W("Add_Double_ReturnAndUpdateByRef");
LCID lcid = MAKELCID(LANG_USER_DEFAULT, SORT_DEFAULT);
DISPID methodId;
::wprintf(W("Invoke %s\n"), numericMethodName);
THROW_IF_FAILED(dispatchTesting->GetIDsOfNames(
IID_NULL,
&numericMethodName,
1,
lcid,
&methodId));
const double a = 1856.5634;
const double b_orig = 587867.757;
const double expected = a + b_orig;
double b = b_orig;
{
DISPPARAMS params;
params.cArgs = 2;
params.rgvarg = new VARIANTARG[params.cArgs];
params.cNamedArgs = 0;
params.rgdispidNamedArgs = nullptr;
VARIANT result;
V_VT(¶ms.rgvarg[1]) = VT_R8;
V_R8(¶ms.rgvarg[1]) = a;
V_VT(¶ms.rgvarg[0]) = VT_BYREF | VT_R8;
V_R8REF(¶ms.rgvarg[0]) = &b;
THROW_IF_FAILED(dispatchTesting->Invoke(
methodId,
IID_NULL,
lcid,
DISPATCH_METHOD,
¶ms,
&result,
nullptr,
nullptr
));
THROW_FAIL_IF_FALSE(EqualByBound(expected, V_R8(&result)));
THROW_FAIL_IF_FALSE(EqualByBound(expected, b));
}
{
b = b_orig;
double result;
THROW_IF_FAILED(dispatchTesting->Add_Double_ReturnAndUpdateByRef(a, &b, &result));
THROW_FAIL_IF_FALSE(EqualByBound(expected, result));
THROW_FAIL_IF_FALSE(EqualByBound(expected, b));
}
}
void Validate_LCID_Marshaled()
{
HRESULT hr;
CoreShimComActivation csact{ W("NETServer"), W("DispatchTesting") };
ComSmartPtr<IDispatchTesting> dispatchTesting;
THROW_IF_FAILED(::CoCreateInstance(CLSID_DispatchTesting, nullptr, CLSCTX_INPROC, IID_IDispatchTesting, (void**)&dispatchTesting));
LPOLESTR numericMethodName = (LPOLESTR)W("PassThroughLCID");
LCID lcid = MAKELCID(MAKELANGID(LANG_SPANISH, SUBLANG_SPANISH_CHILE), SORT_DEFAULT);
DISPID methodId;
::wprintf(W("Invoke %s\n"), numericMethodName);
THROW_IF_FAILED(dispatchTesting->GetIDsOfNames(
IID_NULL,
&numericMethodName,
1,
lcid,
&methodId));
DISPPARAMS params;
params.cArgs = 0;
params.rgvarg = nullptr;
params.cNamedArgs = 0;
params.rgdispidNamedArgs = nullptr;
VARIANT result;
THROW_IF_FAILED(dispatchTesting->Invoke(
methodId,
IID_NULL,
lcid,
DISPATCH_METHOD,
¶ms,
&result,
nullptr,
nullptr
));
THROW_FAIL_IF_FALSE(lcid == (LCID)V_UI4(&result));
}
namespace
{
void ValidateExpectedEnumVariant(IEnumVARIANT *enumVariant, int expectedStart, int expectedCount)
{
HRESULT hr;
VARIANT element;
ULONG numFetched;
for(int i = expectedStart; i < expectedStart + expectedCount; ++i)
{
THROW_IF_FAILED(enumVariant->Next(1, &element, &numFetched));
THROW_FAIL_IF_FALSE(numFetched == 1);
THROW_FAIL_IF_FALSE(V_I4(&element) == i)
::VariantClear(&element);
}
hr = enumVariant->Next(1, &element, &numFetched);
THROW_FAIL_IF_FALSE(hr == S_FALSE && numFetched == 0);
}
void ValidateReturnedEnumerator(VARIANT *toValidate)
{
HRESULT hr;
THROW_FAIL_IF_FALSE(V_VT(toValidate) == VT_UNKNOWN || V_VT(toValidate) == VT_DISPATCH);
ComSmartPtr<IEnumVARIANT> enumVariant;
THROW_IF_FAILED(V_UNKNOWN(toValidate)->QueryInterface<IEnumVARIANT>(&enumVariant));
// Implementation of IDispatchTesting should return [0,9]
ValidateExpectedEnumVariant(enumVariant, 0, 10);
THROW_IF_FAILED(enumVariant->Reset());
ValidateExpectedEnumVariant(enumVariant, 0, 10);
THROW_IF_FAILED(enumVariant->Reset());
THROW_IF_FAILED(enumVariant->Skip(3));
ValidateExpectedEnumVariant(enumVariant, 3, 7);
}
}
void Validate_Enumerator()
{
HRESULT hr;
CoreShimComActivation csact{ W("NETServer"), W("DispatchTesting") };
ComSmartPtr<IDispatchTesting> dispatchTesting;
THROW_IF_FAILED(::CoCreateInstance(CLSID_DispatchTesting, nullptr, CLSCTX_INPROC, IID_IDispatchTesting, (void**)&dispatchTesting));
LCID lcid = MAKELCID(LANG_USER_DEFAULT, SORT_DEFAULT);
::printf("Invoke GetEnumerator (DISPID_NEWENUM)\n");
DISPPARAMS params {};
VARIANT result;
THROW_IF_FAILED(dispatchTesting->Invoke(
DISPID_NEWENUM,
IID_NULL,
lcid,
DISPATCH_METHOD,
¶ms,
&result,
nullptr,
nullptr
));
::printf(" -- Validate returned IEnumVARIANT\n");
ValidateReturnedEnumerator(&result);
LPOLESTR methodName = (LPOLESTR)W("ExplicitGetEnumerator");
::wprintf(W("Invoke %s\n"), methodName);
DISPID methodId;
THROW_IF_FAILED(dispatchTesting->GetIDsOfNames(
IID_NULL,
&methodName,
1,
lcid,
&methodId));
::VariantClear(&result);
THROW_IF_FAILED(dispatchTesting->Invoke(
methodId,
IID_NULL,
lcid,
DISPATCH_METHOD,
¶ms,
&result,
nullptr,
nullptr
));
::printf(" -- Validate returned IEnumVARIANT\n");
ValidateReturnedEnumerator(&result);
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "ClientTests.h"
#include <memory>
#include <windows_version_helpers.h>
void Validate_Numeric_In_ReturnByRef();
void Validate_Float_In_ReturnAndUpdateByRef();
void Validate_Double_In_ReturnAndUpdateByRef();
void Validate_LCID_Marshaled();
void Validate_Enumerator();
template<COINIT TM>
struct ComInit
{
const HRESULT Result;
ComInit()
: Result{ ::CoInitializeEx(nullptr, TM) }
{ }
~ComInit()
{
if (SUCCEEDED(Result))
::CoUninitialize();
}
};
using ComMTA = ComInit<COINIT_MULTITHREADED>;
int __cdecl main()
{
if (is_windows_nano() == S_OK)
{
::puts("RegFree COM is not supported on Windows Nano. Auto-passing this test.\n");
return 100;
}
ComMTA init;
if (FAILED(init.Result))
return -1;
try
{
Validate_Numeric_In_ReturnByRef();
Validate_Float_In_ReturnAndUpdateByRef();
Validate_Double_In_ReturnAndUpdateByRef();
Validate_LCID_Marshaled();
Validate_Enumerator();
}
catch (HRESULT hr)
{
::printf("Test Failure: 0x%08x\n", hr);
return 101;
}
return 100;
}
void Validate_Numeric_In_ReturnByRef()
{
HRESULT hr;
CoreShimComActivation csact{ W("NETServer"), W("DispatchTesting") };
ComSmartPtr<IDispatchTesting> dispatchTesting;
THROW_IF_FAILED(::CoCreateInstance(CLSID_DispatchTesting, nullptr, CLSCTX_INPROC, IID_IDispatchTesting, (void**)&dispatchTesting));
LPOLESTR numericMethodName = (LPOLESTR)W("DoubleNumeric_ReturnByRef");
LCID lcid = MAKELCID(LANG_USER_DEFAULT, SORT_DEFAULT);
DISPID methodId;
::wprintf(W("Invoke %s\n"), numericMethodName);
THROW_IF_FAILED(dispatchTesting->GetIDsOfNames(
IID_NULL,
&numericMethodName,
1,
lcid,
&methodId));
BYTE b1 = 24;
BYTE b2;
SHORT s1 = 53;
SHORT s2;
USHORT us1 = 74;
USHORT us2;
LONG i1 = 34;
LONG i2;
ULONG ui1 = 854;
ULONG ui2;
LONGLONG l1 = 894;
LONGLONG l2;
ULONGLONG ul1 = 4168;
ULONGLONG ul2;
{
DISPPARAMS params;
params.cArgs = 14;
params.rgvarg = new VARIANTARG[params.cArgs];
params.cNamedArgs = 0;
params.rgdispidNamedArgs = nullptr;
V_VT(¶ms.rgvarg[13]) = VT_UI1;
V_UI1(¶ms.rgvarg[13]) = b1;
V_VT(¶ms.rgvarg[12]) = VT_BYREF | VT_UI1;
V_UI1REF(¶ms.rgvarg[12]) = &b2;
V_VT(¶ms.rgvarg[11]) = VT_I2;
V_I2(¶ms.rgvarg[11]) = s1;
V_VT(¶ms.rgvarg[10]) = VT_BYREF | VT_I2;
V_I2REF(¶ms.rgvarg[10]) = &s2;
V_VT(¶ms.rgvarg[9]) = VT_UI2;
V_UI2(¶ms.rgvarg[9]) = us1;
V_VT(¶ms.rgvarg[8]) = VT_BYREF | VT_UI2;
V_UI2REF(¶ms.rgvarg[8]) = &us2;
V_VT(¶ms.rgvarg[7]) = VT_I4;
V_I4(¶ms.rgvarg[7]) = i1;
V_VT(¶ms.rgvarg[6]) = VT_BYREF | VT_I4;
V_I4REF(¶ms.rgvarg[6]) = &i2;
V_VT(¶ms.rgvarg[5]) = VT_UI4;
V_UI4(¶ms.rgvarg[5]) = ui1;
V_VT(¶ms.rgvarg[4]) = VT_BYREF | VT_UI4;
V_UI4REF(¶ms.rgvarg[4]) = &ui2;
V_VT(¶ms.rgvarg[3]) = VT_I8;
V_I8(¶ms.rgvarg[3]) = l1;
V_VT(¶ms.rgvarg[2]) = VT_BYREF | VT_I8;
V_I8REF(¶ms.rgvarg[2]) = &l2;
V_VT(¶ms.rgvarg[1]) = VT_UI8;
V_UI8(¶ms.rgvarg[1]) = ul1;
V_VT(¶ms.rgvarg[0]) = VT_BYREF | VT_UI8;
V_UI8REF(¶ms.rgvarg[0]) = &ul2;
THROW_IF_FAILED(dispatchTesting->Invoke(
methodId,
IID_NULL,
lcid,
DISPATCH_METHOD,
¶ms,
nullptr,
nullptr,
nullptr
));
THROW_FAIL_IF_FALSE(b2 == b1 * 2);
THROW_FAIL_IF_FALSE(s2 == s1 * 2);
THROW_FAIL_IF_FALSE(us2 == us1 * 2);
THROW_FAIL_IF_FALSE(i2 == i1 * 2);
THROW_FAIL_IF_FALSE(ui2 == ui1 * 2);
THROW_FAIL_IF_FALSE(l2 == l1 * 2);
THROW_FAIL_IF_FALSE(ul2 == ul1 * 2);
}
{
b2 = 0;
s2 = 0;
us2 = 0;
i2 = 0;
ui2 = 0;
l2 = 0;
ul2 = 0;
THROW_IF_FAILED(dispatchTesting->DoubleNumeric_ReturnByRef(b1, &b2, s1, &s2, us1, &us2, i1, (INT*)&i2, ui1, (UINT*)&ui2, l1, &l2, ul1, &ul2));
THROW_FAIL_IF_FALSE(b2 == b1 * 2);
THROW_FAIL_IF_FALSE(s2 == s1 * 2);
THROW_FAIL_IF_FALSE(us2 == us1 * 2);
THROW_FAIL_IF_FALSE(i2 == i1 * 2);
THROW_FAIL_IF_FALSE(ui2 == ui1 * 2);
THROW_FAIL_IF_FALSE(l2 == l1 * 2);
THROW_FAIL_IF_FALSE(ul2 == ul1 * 2);
}
}
namespace
{
bool EqualByBound(float expected, float actual)
{
float low = expected - 0.0001f;
float high = expected + 0.0001f;
float eps = abs(expected - actual);
return eps < std::numeric_limits<float>::epsilon() || (low < actual && actual < high);
}
bool EqualByBound(double expected, double actual)
{
double low = expected - 0.00001;
double high = expected + 0.00001;
double eps = abs(expected - actual);
return eps < std::numeric_limits<double>::epsilon() || (low < actual && actual < high);
}
}
void Validate_Float_In_ReturnAndUpdateByRef()
{
HRESULT hr;
CoreShimComActivation csact{ W("NETServer"), W("DispatchTesting") };
ComSmartPtr<IDispatchTesting> dispatchTesting;
THROW_IF_FAILED(::CoCreateInstance(CLSID_DispatchTesting, nullptr, CLSCTX_INPROC, IID_IDispatchTesting, (void**)&dispatchTesting));
LPOLESTR numericMethodName = (LPOLESTR)W("Add_Float_ReturnAndUpdateByRef");
LCID lcid = MAKELCID(LANG_USER_DEFAULT, SORT_DEFAULT);
DISPID methodId;
::wprintf(W("Invoke %s\n"), numericMethodName);
THROW_IF_FAILED(dispatchTesting->GetIDsOfNames(
IID_NULL,
&numericMethodName,
1,
lcid,
&methodId));
const float a = 12.34f;
const float b_orig = 1.234f;
const float expected = b_orig + a;
float b = b_orig;
{
DISPPARAMS params;
params.cArgs = 2;
params.rgvarg = new VARIANTARG[params.cArgs];
params.cNamedArgs = 0;
params.rgdispidNamedArgs = nullptr;
VARIANT result;
V_VT(¶ms.rgvarg[1]) = VT_R4;
V_R4(¶ms.rgvarg[1]) = a;
V_VT(¶ms.rgvarg[0]) = VT_BYREF | VT_R4;
V_R4REF(¶ms.rgvarg[0]) = &b;
THROW_IF_FAILED(dispatchTesting->Invoke(
methodId,
IID_NULL,
lcid,
DISPATCH_METHOD,
¶ms,
&result,
nullptr,
nullptr
));
THROW_FAIL_IF_FALSE(EqualByBound(expected, V_R4(&result)));
THROW_FAIL_IF_FALSE(EqualByBound(expected, b));
}
{
b = b_orig;
float result;
THROW_IF_FAILED(dispatchTesting->Add_Float_ReturnAndUpdateByRef(a, &b, &result));
THROW_FAIL_IF_FALSE(EqualByBound(expected, result));
THROW_FAIL_IF_FALSE(EqualByBound(expected, b));
}
}
void Validate_Double_In_ReturnAndUpdateByRef()
{
HRESULT hr;
CoreShimComActivation csact{ W("NETServer"), W("DispatchTesting") };
ComSmartPtr<IDispatchTesting> dispatchTesting;
THROW_IF_FAILED(::CoCreateInstance(CLSID_DispatchTesting, nullptr, CLSCTX_INPROC, IID_IDispatchTesting, (void**)&dispatchTesting));
LPOLESTR numericMethodName = (LPOLESTR)W("Add_Double_ReturnAndUpdateByRef");
LCID lcid = MAKELCID(LANG_USER_DEFAULT, SORT_DEFAULT);
DISPID methodId;
::wprintf(W("Invoke %s\n"), numericMethodName);
THROW_IF_FAILED(dispatchTesting->GetIDsOfNames(
IID_NULL,
&numericMethodName,
1,
lcid,
&methodId));
const double a = 1856.5634;
const double b_orig = 587867.757;
const double expected = a + b_orig;
double b = b_orig;
{
DISPPARAMS params;
params.cArgs = 2;
params.rgvarg = new VARIANTARG[params.cArgs];
params.cNamedArgs = 0;
params.rgdispidNamedArgs = nullptr;
VARIANT result;
V_VT(¶ms.rgvarg[1]) = VT_R8;
V_R8(¶ms.rgvarg[1]) = a;
V_VT(¶ms.rgvarg[0]) = VT_BYREF | VT_R8;
V_R8REF(¶ms.rgvarg[0]) = &b;
THROW_IF_FAILED(dispatchTesting->Invoke(
methodId,
IID_NULL,
lcid,
DISPATCH_METHOD,
¶ms,
&result,
nullptr,
nullptr
));
THROW_FAIL_IF_FALSE(EqualByBound(expected, V_R8(&result)));
THROW_FAIL_IF_FALSE(EqualByBound(expected, b));
}
{
b = b_orig;
double result;
THROW_IF_FAILED(dispatchTesting->Add_Double_ReturnAndUpdateByRef(a, &b, &result));
THROW_FAIL_IF_FALSE(EqualByBound(expected, result));
THROW_FAIL_IF_FALSE(EqualByBound(expected, b));
}
}
void Validate_LCID_Marshaled()
{
HRESULT hr;
CoreShimComActivation csact{ W("NETServer"), W("DispatchTesting") };
ComSmartPtr<IDispatchTesting> dispatchTesting;
THROW_IF_FAILED(::CoCreateInstance(CLSID_DispatchTesting, nullptr, CLSCTX_INPROC, IID_IDispatchTesting, (void**)&dispatchTesting));
LPOLESTR numericMethodName = (LPOLESTR)W("PassThroughLCID");
LCID lcid = MAKELCID(MAKELANGID(LANG_SPANISH, SUBLANG_SPANISH_CHILE), SORT_DEFAULT);
DISPID methodId;
::wprintf(W("Invoke %s\n"), numericMethodName);
THROW_IF_FAILED(dispatchTesting->GetIDsOfNames(
IID_NULL,
&numericMethodName,
1,
lcid,
&methodId));
DISPPARAMS params;
params.cArgs = 0;
params.rgvarg = nullptr;
params.cNamedArgs = 0;
params.rgdispidNamedArgs = nullptr;
VARIANT result;
THROW_IF_FAILED(dispatchTesting->Invoke(
methodId,
IID_NULL,
lcid,
DISPATCH_METHOD,
¶ms,
&result,
nullptr,
nullptr
));
THROW_FAIL_IF_FALSE(lcid == (LCID)V_UI4(&result));
}
namespace
{
void ValidateExpectedEnumVariant(IEnumVARIANT *enumVariant, int expectedStart, int expectedCount)
{
HRESULT hr;
VARIANT element;
ULONG numFetched;
for(int i = expectedStart; i < expectedStart + expectedCount; ++i)
{
THROW_IF_FAILED(enumVariant->Next(1, &element, &numFetched));
THROW_FAIL_IF_FALSE(numFetched == 1);
THROW_FAIL_IF_FALSE(V_I4(&element) == i)
::VariantClear(&element);
}
hr = enumVariant->Next(1, &element, &numFetched);
THROW_FAIL_IF_FALSE(hr == S_FALSE && numFetched == 0);
}
void ValidateReturnedEnumerator(VARIANT *toValidate)
{
HRESULT hr;
THROW_FAIL_IF_FALSE(V_VT(toValidate) == VT_UNKNOWN || V_VT(toValidate) == VT_DISPATCH);
ComSmartPtr<IEnumVARIANT> enumVariant;
THROW_IF_FAILED(V_UNKNOWN(toValidate)->QueryInterface<IEnumVARIANT>(&enumVariant));
// Implementation of IDispatchTesting should return [0,9]
ValidateExpectedEnumVariant(enumVariant, 0, 10);
THROW_IF_FAILED(enumVariant->Reset());
ValidateExpectedEnumVariant(enumVariant, 0, 10);
THROW_IF_FAILED(enumVariant->Reset());
THROW_IF_FAILED(enumVariant->Skip(3));
ValidateExpectedEnumVariant(enumVariant, 3, 7);
}
}
void Validate_Enumerator()
{
HRESULT hr;
CoreShimComActivation csact{ W("NETServer"), W("DispatchTesting") };
ComSmartPtr<IDispatchTesting> dispatchTesting;
THROW_IF_FAILED(::CoCreateInstance(CLSID_DispatchTesting, nullptr, CLSCTX_INPROC, IID_IDispatchTesting, (void**)&dispatchTesting));
LCID lcid = MAKELCID(LANG_USER_DEFAULT, SORT_DEFAULT);
::printf("Invoke GetEnumerator (DISPID_NEWENUM)\n");
DISPPARAMS params {};
VARIANT result;
THROW_IF_FAILED(dispatchTesting->Invoke(
DISPID_NEWENUM,
IID_NULL,
lcid,
DISPATCH_METHOD,
¶ms,
&result,
nullptr,
nullptr
));
::printf(" -- Validate returned IEnumVARIANT\n");
ValidateReturnedEnumerator(&result);
LPOLESTR methodName = (LPOLESTR)W("ExplicitGetEnumerator");
::wprintf(W("Invoke %s\n"), methodName);
DISPID methodId;
THROW_IF_FAILED(dispatchTesting->GetIDsOfNames(
IID_NULL,
&methodName,
1,
lcid,
&methodId));
::VariantClear(&result);
THROW_IF_FAILED(dispatchTesting->Invoke(
methodId,
IID_NULL,
lcid,
DISPATCH_METHOD,
¶ms,
&result,
nullptr,
nullptr
));
::printf(" -- Validate returned IEnumVARIANT\n");
ValidateReturnedEnumerator(&result);
}
| -1 |
dotnet/runtime | 65,922 | Rely on PGO for isinst/castclass | Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | EgorBo | "2022-02-26T22:13:46Z" | "2022-04-01T21:25:56Z" | 063d08018602291f8dbd9d9b6055b8e1af874cb8 | f249a3d58b0b2bb55adca1a56c5a39cf446437b7 | Rely on PGO for isinst/castclass. Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | ./src/coreclr/md/inc/stgtiggerstorage.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//*****************************************************************************
// StgTiggerStorage.h
//
//
// TiggerStorage is a stripped down version of compound doc files. Doc files
// have some very useful and complex features to them, unfortunately nothing
// comes for free. Given the incredibly tuned format of existing .tlb files,
// every single byte counts and 10% added by doc files is just too exspensive.
//
// The storage itself is made up of a bunch of streams (each aligned to a 4 byte
// value), followed at the end of the file with the header. The header is
// put at the end so that you can continue to write as many streams as you
// like without thrashing the disk.
// +-------------------+
// | Signature |
// +-------------------+
// | Stream 1, 2, [] |
// +-------------------+
// | STORAGEHEADER |
// | Extra data |
// | STORAGESTREAM[] |
// +-------------------+
// | offset |
// +-------------------+
//
// The STORAGEHEADER contains flags describing the rest of the file, including
// the ability to have extra data stored in the header. If there is extra
// data, then immediately after the STORAGEHEADER struct is a 4 byte size of
// that data, followed immediately by the extra data. The length must be
// 4 byte aligned so that the first STORAGESTREAM starts on an aligned
// boundary. The contents of the extra data is caller defined.
//
// This code handles the signature at the start of the file, and the list of
// streams at the end (kept in the header). The data in each stream is, of
// course, caller specific.
//
// This code requires the StgIO code to handle the input and output from the
// backing storage, whatever scheme that may be. There are no consistency
// checks on the data (for example crc's) due to the expense in computation
// required. There is a signature at the front of the file and in the header.
//
//*****************************************************************************
#ifndef __StgTiggerStorage_h__
#define __StgTiggerStorage_h__
//#include "utilcode.h" // Helpers.
#include "mdfileformat.h"
typedef CDynArray<STORAGESTREAM> STORAGESTREAMLST;
// Forwards.
class TiggerStream;
class StgIO;
class TiggerStorage :
public IStorage
{
friend class TiggerStream;
public:
TiggerStorage();
virtual ~TiggerStorage();
// IUnknown so you can ref count this thing.
virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID riid, PVOID *pp)
{ return (BadError(E_NOTIMPL)); }
virtual ULONG STDMETHODCALLTYPE AddRef()
{ return (InterlockedIncrement(&m_cRef)); }
virtual ULONG STDMETHODCALLTYPE Release()
{
SUPPORTS_DAC_HOST_ONLY;
ULONG cRef;
if ((cRef = InterlockedDecrement(&m_cRef)) == 0)
delete this;
return (cRef);
}
//*****************************************************************************
// Init this storage object on top of the given storage unit.
//*****************************************************************************
HRESULT Init( // Return code.
StgIO *pStgIO, // The I/O subsystem.
_In_ _In_z_ LPSTR pVersion); // Compiler-supplied CLR version
//*****************************************************************************
// Retrieves a the size and a pointer to the extra data that can optionally be
// written in the header of the storage system. This data is not required to
// be in the file, in which case *pcbExtra will come back as 0 and pbData will
// be set to null. You must have initialized the storage using Init() before
// calling this function.
//*****************************************************************************
HRESULT GetExtraData( // Return code.
ULONG *pcbExtra, // Return size of extra data.
BYTE *&pbData); // Return a pointer to extra data.
//*****************************************************************************
// Flushes the header to disk.
//*****************************************************************************
HRESULT WriteHeader( // Return code.
STORAGESTREAMLST *pList, // List of streams.
ULONG cbExtraData, // Size of extra data, may be 0.
BYTE *pbExtraData); // Pointer to extra data for header.
//*****************************************************************************
// Called when all data has been written. Forces cached data to be flushed
// and stream lists to be validated.
//*****************************************************************************
HRESULT WriteFinished( // Return code.
STORAGESTREAMLST *pList, // List of streams.
ULONG *pcbSaveSize, // Return size of total data.
BOOL fDeltaSave); // Was this a delta
//*****************************************************************************
// Called after a successful rewrite of an existing file. The in memory
// backing store is no longer valid because all new data is in memory and
// on disk. This is essentially the same state as created, so free up some
// working set and remember this state.
//*****************************************************************************
HRESULT ResetBackingStore(); // Return code.
//*****************************************************************************
// Called to restore the original file. If this operation is successful, then
// the backup file is deleted as requested. The restore of the file is done
// in write through mode to the disk help ensure the contents are not lost.
// This is not good enough to fulfill ACID props, but it ain't that bad.
//*****************************************************************************
HRESULT Restore( // Return code.
_In_ _In_z_ LPWSTR szBackup, // If non-0, backup the file.
int bDeleteOnSuccess); // Delete backup file if successful.
//*****************************************************************************
// Given the name of a stream that will be persisted into a stream in this
// storage type, figure out how big that stream would be including the user's
// stream data and the header overhead the file format incurs. The name is
// stored in ANSI and the header struct is aligned to 4 bytes.
//*****************************************************************************
static HRESULT GetStreamSaveSize( // Return code.
LPCWSTR szStreamName, // Name of stream.
UINT32 cbDataSize, // Size of data to go into stream.
UINT32 *pcbSaveSize); // Return data size plus stream overhead.
//*****************************************************************************
// Return the fixed size overhead for the storage implementation. This includes
// the signature and fixed header overhead. The overhead in the header for each
// stream is calculated as part of GetStreamSaveSize because these structs are
// variable sized on the name.
//*****************************************************************************
static HRESULT GetStorageSaveSize( // Return code.
ULONG *pcbSaveSize, // [in] current size, [out] plus overhead.
ULONG cbExtra, // How much extra data to store in header.
LPCSTR pRuntimeVersion); // The version string as it's length is part of the total size.
//*****************************************************************************
// Adjust the offset in each known stream to match where it will wind up after
// a save operation.
//*****************************************************************************
static HRESULT CalcOffsets( // Return code.
STORAGESTREAMLST *pStreamList, // List of streams for header.
ULONG cbExtra, // Size of variable extra data in header.
LPCSTR pRuntimeVersion); // The version string as it's length is part of the total size.
//*****************************************************************************
// Returns the size of the signature plus the verion information
//*****************************************************************************
static HRESULT SizeOfStorageSignature(
LPCSTR pRuntimeVersion, // The version string as it's length is part of the total size.
ULONG *pcbSignatureSize);
// IStorage
virtual HRESULT STDMETHODCALLTYPE CreateStream(
const OLECHAR *pwcsName,
DWORD grfMode,
DWORD reserved1,
DWORD reserved2,
IStream **ppstm);
virtual HRESULT STDMETHODCALLTYPE CreateStream(
LPCSTR szName,
DWORD grfMode,
DWORD reserved1,
DWORD reserved2,
IStream **ppstm)
DAC_UNEXPECTED();
virtual HRESULT STDMETHODCALLTYPE OpenStream(
const OLECHAR *pwcsName,
void *reserved1,
DWORD grfMode,
DWORD reserved2,
IStream **ppstm);
virtual HRESULT STDMETHODCALLTYPE CreateStorage(
const OLECHAR *pwcsName,
DWORD grfMode,
DWORD dwStgFmt,
DWORD reserved2,
IStorage **ppstg);
virtual HRESULT STDMETHODCALLTYPE OpenStorage(
const OLECHAR * wcsName,
IStorage * pStgPriority,
DWORD dwMode,
_In_
SNB snbExclude,
DWORD reserved,
IStorage ** ppStg);
virtual HRESULT STDMETHODCALLTYPE CopyTo(
DWORD cIidExclude,
const IID * rgIidExclude,
_In_
SNB snbExclude,
IStorage * pStgDest);
virtual HRESULT STDMETHODCALLTYPE MoveElementTo(
const OLECHAR *pwcsName,
IStorage *pstgDest,
const OLECHAR *pwcsNewName,
DWORD grfFlags);
virtual HRESULT STDMETHODCALLTYPE Commit(
DWORD grfCommitFlags);
virtual HRESULT STDMETHODCALLTYPE Revert();
virtual HRESULT STDMETHODCALLTYPE EnumElements(
DWORD reserved1,
void *reserved2,
DWORD reserved3,
IEnumSTATSTG **ppenum);
virtual HRESULT STDMETHODCALLTYPE DestroyElement(
const OLECHAR *pwcsName);
virtual HRESULT STDMETHODCALLTYPE RenameElement(
const OLECHAR *pwcsOldName,
const OLECHAR *pwcsNewName);
virtual HRESULT STDMETHODCALLTYPE SetElementTimes(
const OLECHAR *pwcsName,
const FILETIME *pctime,
const FILETIME *patime,
const FILETIME *pmtime);
virtual HRESULT STDMETHODCALLTYPE SetClass(
REFCLSID clsid);
virtual HRESULT STDMETHODCALLTYPE SetStateBits(
DWORD grfStateBits,
DWORD grfMask);
virtual HRESULT STDMETHODCALLTYPE Stat(
STATSTG *pstatstg,
DWORD grfStatFlag);
virtual HRESULT STDMETHODCALLTYPE OpenStream(
LPCWSTR szStream,
ULONG *pcbData,
void **ppAddress);
// Access storage object.
StgIO *GetStgIO()
{ return (m_pStgIO); }
#if defined(_DEBUG)
ULONG PrintSizeInfo( // Size of streams.
bool verbose); // Be verbose?
#endif
protected:
HRESULT Write( // Return code.
LPCSTR szName, // Name of stream we're writing.
const void *pData, // Data to write.
ULONG cbData, // Size of data.
ULONG *pcbWritten); // How much did we write.
private:
HRESULT FindStream(LPCSTR szName, _Out_ PSTORAGESTREAM *stream);
HRESULT WriteSignature(LPCSTR pVersion);
HRESULT VerifySignature(PSTORAGESIGNATURE pSig);
HRESULT ReadHeader();
HRESULT VerifyHeader();
static HRESULT GetDefaultVersion(LPCSTR* ppVersion);
public:
// This function is a workaround to allow access to the "version requested" string.
HRESULT GetHeaderPointer(const void **ppv, ULONG *pcb);
private:
// State data.
StgIO *m_pStgIO; // Storage subsystem.
LONG m_cRef; // Ref count for COM.
// Header data.
STORAGEHEADER m_StgHdr; // Header for storage.
STORAGESTREAMLST m_Streams; // List of streams in the storage.
PSTORAGESTREAM m_pStreamList; // For read mode.
void *m_pbExtra; // Pointer to extra data if on disk.
};
//*****************************************************************************
// Debugging helpers. #define __SAVESIZE_TRACE__ to enable.
//*****************************************************************************
// #define __SAVESIZE_TRACE__
#ifdef __SAVESIZE_TRACE__
#define SAVETRACE(func) DEBUG_STMT(func)
#else
#define SAVETRACE(func)
#endif // __SAVESIZE_TRACE__
#endif // StgTiggerStorage
// EOF
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//*****************************************************************************
// StgTiggerStorage.h
//
//
// TiggerStorage is a stripped down version of compound doc files. Doc files
// have some very useful and complex features to them, unfortunately nothing
// comes for free. Given the incredibly tuned format of existing .tlb files,
// every single byte counts and 10% added by doc files is just too exspensive.
//
// The storage itself is made up of a bunch of streams (each aligned to a 4 byte
// value), followed at the end of the file with the header. The header is
// put at the end so that you can continue to write as many streams as you
// like without thrashing the disk.
// +-------------------+
// | Signature |
// +-------------------+
// | Stream 1, 2, [] |
// +-------------------+
// | STORAGEHEADER |
// | Extra data |
// | STORAGESTREAM[] |
// +-------------------+
// | offset |
// +-------------------+
//
// The STORAGEHEADER contains flags describing the rest of the file, including
// the ability to have extra data stored in the header. If there is extra
// data, then immediately after the STORAGEHEADER struct is a 4 byte size of
// that data, followed immediately by the extra data. The length must be
// 4 byte aligned so that the first STORAGESTREAM starts on an aligned
// boundary. The contents of the extra data is caller defined.
//
// This code handles the signature at the start of the file, and the list of
// streams at the end (kept in the header). The data in each stream is, of
// course, caller specific.
//
// This code requires the StgIO code to handle the input and output from the
// backing storage, whatever scheme that may be. There are no consistency
// checks on the data (for example crc's) due to the expense in computation
// required. There is a signature at the front of the file and in the header.
//
//*****************************************************************************
#ifndef __StgTiggerStorage_h__
#define __StgTiggerStorage_h__
//#include "utilcode.h" // Helpers.
#include "mdfileformat.h"
typedef CDynArray<STORAGESTREAM> STORAGESTREAMLST;
// Forwards.
class TiggerStream;
class StgIO;
class TiggerStorage :
public IStorage
{
friend class TiggerStream;
public:
TiggerStorage();
virtual ~TiggerStorage();
// IUnknown so you can ref count this thing.
virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID riid, PVOID *pp)
{ return (BadError(E_NOTIMPL)); }
virtual ULONG STDMETHODCALLTYPE AddRef()
{ return (InterlockedIncrement(&m_cRef)); }
virtual ULONG STDMETHODCALLTYPE Release()
{
SUPPORTS_DAC_HOST_ONLY;
ULONG cRef;
if ((cRef = InterlockedDecrement(&m_cRef)) == 0)
delete this;
return (cRef);
}
//*****************************************************************************
// Init this storage object on top of the given storage unit.
//*****************************************************************************
HRESULT Init( // Return code.
StgIO *pStgIO, // The I/O subsystem.
_In_ _In_z_ LPSTR pVersion); // Compiler-supplied CLR version
//*****************************************************************************
// Retrieves a the size and a pointer to the extra data that can optionally be
// written in the header of the storage system. This data is not required to
// be in the file, in which case *pcbExtra will come back as 0 and pbData will
// be set to null. You must have initialized the storage using Init() before
// calling this function.
//*****************************************************************************
HRESULT GetExtraData( // Return code.
ULONG *pcbExtra, // Return size of extra data.
BYTE *&pbData); // Return a pointer to extra data.
//*****************************************************************************
// Flushes the header to disk.
//*****************************************************************************
HRESULT WriteHeader( // Return code.
STORAGESTREAMLST *pList, // List of streams.
ULONG cbExtraData, // Size of extra data, may be 0.
BYTE *pbExtraData); // Pointer to extra data for header.
//*****************************************************************************
// Called when all data has been written. Forces cached data to be flushed
// and stream lists to be validated.
//*****************************************************************************
HRESULT WriteFinished( // Return code.
STORAGESTREAMLST *pList, // List of streams.
ULONG *pcbSaveSize, // Return size of total data.
BOOL fDeltaSave); // Was this a delta
//*****************************************************************************
// Called after a successful rewrite of an existing file. The in memory
// backing store is no longer valid because all new data is in memory and
// on disk. This is essentially the same state as created, so free up some
// working set and remember this state.
//*****************************************************************************
HRESULT ResetBackingStore(); // Return code.
//*****************************************************************************
// Called to restore the original file. If this operation is successful, then
// the backup file is deleted as requested. The restore of the file is done
// in write through mode to the disk help ensure the contents are not lost.
// This is not good enough to fulfill ACID props, but it ain't that bad.
//*****************************************************************************
HRESULT Restore( // Return code.
_In_ _In_z_ LPWSTR szBackup, // If non-0, backup the file.
int bDeleteOnSuccess); // Delete backup file if successful.
//*****************************************************************************
// Given the name of a stream that will be persisted into a stream in this
// storage type, figure out how big that stream would be including the user's
// stream data and the header overhead the file format incurs. The name is
// stored in ANSI and the header struct is aligned to 4 bytes.
//*****************************************************************************
static HRESULT GetStreamSaveSize( // Return code.
LPCWSTR szStreamName, // Name of stream.
UINT32 cbDataSize, // Size of data to go into stream.
UINT32 *pcbSaveSize); // Return data size plus stream overhead.
//*****************************************************************************
// Return the fixed size overhead for the storage implementation. This includes
// the signature and fixed header overhead. The overhead in the header for each
// stream is calculated as part of GetStreamSaveSize because these structs are
// variable sized on the name.
//*****************************************************************************
static HRESULT GetStorageSaveSize( // Return code.
ULONG *pcbSaveSize, // [in] current size, [out] plus overhead.
ULONG cbExtra, // How much extra data to store in header.
LPCSTR pRuntimeVersion); // The version string as it's length is part of the total size.
//*****************************************************************************
// Adjust the offset in each known stream to match where it will wind up after
// a save operation.
//*****************************************************************************
static HRESULT CalcOffsets( // Return code.
STORAGESTREAMLST *pStreamList, // List of streams for header.
ULONG cbExtra, // Size of variable extra data in header.
LPCSTR pRuntimeVersion); // The version string as it's length is part of the total size.
//*****************************************************************************
// Returns the size of the signature plus the verion information
//*****************************************************************************
static HRESULT SizeOfStorageSignature(
LPCSTR pRuntimeVersion, // The version string as it's length is part of the total size.
ULONG *pcbSignatureSize);
// IStorage
virtual HRESULT STDMETHODCALLTYPE CreateStream(
const OLECHAR *pwcsName,
DWORD grfMode,
DWORD reserved1,
DWORD reserved2,
IStream **ppstm);
virtual HRESULT STDMETHODCALLTYPE CreateStream(
LPCSTR szName,
DWORD grfMode,
DWORD reserved1,
DWORD reserved2,
IStream **ppstm)
DAC_UNEXPECTED();
virtual HRESULT STDMETHODCALLTYPE OpenStream(
const OLECHAR *pwcsName,
void *reserved1,
DWORD grfMode,
DWORD reserved2,
IStream **ppstm);
virtual HRESULT STDMETHODCALLTYPE CreateStorage(
const OLECHAR *pwcsName,
DWORD grfMode,
DWORD dwStgFmt,
DWORD reserved2,
IStorage **ppstg);
virtual HRESULT STDMETHODCALLTYPE OpenStorage(
const OLECHAR * wcsName,
IStorage * pStgPriority,
DWORD dwMode,
_In_
SNB snbExclude,
DWORD reserved,
IStorage ** ppStg);
virtual HRESULT STDMETHODCALLTYPE CopyTo(
DWORD cIidExclude,
const IID * rgIidExclude,
_In_
SNB snbExclude,
IStorage * pStgDest);
virtual HRESULT STDMETHODCALLTYPE MoveElementTo(
const OLECHAR *pwcsName,
IStorage *pstgDest,
const OLECHAR *pwcsNewName,
DWORD grfFlags);
virtual HRESULT STDMETHODCALLTYPE Commit(
DWORD grfCommitFlags);
virtual HRESULT STDMETHODCALLTYPE Revert();
virtual HRESULT STDMETHODCALLTYPE EnumElements(
DWORD reserved1,
void *reserved2,
DWORD reserved3,
IEnumSTATSTG **ppenum);
virtual HRESULT STDMETHODCALLTYPE DestroyElement(
const OLECHAR *pwcsName);
virtual HRESULT STDMETHODCALLTYPE RenameElement(
const OLECHAR *pwcsOldName,
const OLECHAR *pwcsNewName);
virtual HRESULT STDMETHODCALLTYPE SetElementTimes(
const OLECHAR *pwcsName,
const FILETIME *pctime,
const FILETIME *patime,
const FILETIME *pmtime);
virtual HRESULT STDMETHODCALLTYPE SetClass(
REFCLSID clsid);
virtual HRESULT STDMETHODCALLTYPE SetStateBits(
DWORD grfStateBits,
DWORD grfMask);
virtual HRESULT STDMETHODCALLTYPE Stat(
STATSTG *pstatstg,
DWORD grfStatFlag);
virtual HRESULT STDMETHODCALLTYPE OpenStream(
LPCWSTR szStream,
ULONG *pcbData,
void **ppAddress);
// Access storage object.
StgIO *GetStgIO()
{ return (m_pStgIO); }
#if defined(_DEBUG)
ULONG PrintSizeInfo( // Size of streams.
bool verbose); // Be verbose?
#endif
protected:
HRESULT Write( // Return code.
LPCSTR szName, // Name of stream we're writing.
const void *pData, // Data to write.
ULONG cbData, // Size of data.
ULONG *pcbWritten); // How much did we write.
private:
HRESULT FindStream(LPCSTR szName, _Out_ PSTORAGESTREAM *stream);
HRESULT WriteSignature(LPCSTR pVersion);
HRESULT VerifySignature(PSTORAGESIGNATURE pSig);
HRESULT ReadHeader();
HRESULT VerifyHeader();
static HRESULT GetDefaultVersion(LPCSTR* ppVersion);
public:
// This function is a workaround to allow access to the "version requested" string.
HRESULT GetHeaderPointer(const void **ppv, ULONG *pcb);
private:
// State data.
StgIO *m_pStgIO; // Storage subsystem.
LONG m_cRef; // Ref count for COM.
// Header data.
STORAGEHEADER m_StgHdr; // Header for storage.
STORAGESTREAMLST m_Streams; // List of streams in the storage.
PSTORAGESTREAM m_pStreamList; // For read mode.
void *m_pbExtra; // Pointer to extra data if on disk.
};
//*****************************************************************************
// Debugging helpers. #define __SAVESIZE_TRACE__ to enable.
//*****************************************************************************
// #define __SAVESIZE_TRACE__
#ifdef __SAVESIZE_TRACE__
#define SAVETRACE(func) DEBUG_STMT(func)
#else
#define SAVETRACE(func)
#endif // __SAVESIZE_TRACE__
#endif // StgTiggerStorage
// EOF
| -1 |
dotnet/runtime | 65,922 | Rely on PGO for isinst/castclass | Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | EgorBo | "2022-02-26T22:13:46Z" | "2022-04-01T21:25:56Z" | 063d08018602291f8dbd9d9b6055b8e1af874cb8 | f249a3d58b0b2bb55adca1a56c5a39cf446437b7 | Rely on PGO for isinst/castclass. Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | ./src/native/eventpipe/ds-ipc-pal-socket.h | #ifndef __DIAGNOSTICS_IPC_PAL_SOCKET_H__
#define __DIAGNOSTICS_IPC_PAL_SOCKET_H__
#include "ds-rt-config.h"
#ifdef ENABLE_PERFTRACING
#include "ds-ipc-pal.h"
#undef DS_IMPL_GETTER_SETTER
#ifdef DS_IMPL_IPC_PAL_SOCKET_GETTER_SETTER
#define DS_IMPL_GETTER_SETTER
#endif
#include "ds-getter-setter.h"
#ifdef HOST_WIN32
#include <winsock2.h>
typedef SOCKET ds_ipc_socket_t;
typedef SOCKADDR ds_ipc_socket_address_t;
typedef ADDRESS_FAMILY ds_ipc_socket_family_t;
typedef int ds_ipc_socket_len_t;
#else
#include <sys/socket.h>
typedef int ds_ipc_socket_t;
typedef struct sockaddr ds_ipc_socket_address_t;
typedef int ds_ipc_socket_family_t;
typedef socklen_t ds_ipc_socket_len_t;
#endif
/*
* DiagnosticsIpc.
*/
#if defined(DS_INLINE_GETTER_SETTER) || defined(DS_IMPL_IPC_PAL_SOCKET_GETTER_SETTER)
struct _DiagnosticsIpc {
#else
struct _DiagnosticsIpc_Internal {
#endif
ds_ipc_socket_address_t *server_address;
ds_ipc_socket_len_t server_address_len;
ds_ipc_socket_family_t server_address_family;
ds_ipc_socket_t server_socket;
bool is_listening;
bool is_closed;
bool is_dual_mode;
DiagnosticsIpcConnectionMode mode;
};
#if !defined(DS_INLINE_GETTER_SETTER) && !defined(DS_IMPL_IPC_PAL_SOCKET_GETTER_SETTER)
struct _DiagnosticsIpc {
uint8_t _internal [sizeof (struct _DiagnosticsIpc_Internal)];
};
#endif
/*
* DiagnosticsIpcStream.
*/
#if defined(DS_INLINE_GETTER_SETTER) || defined(DS_IMPL_IPC_PAL_SOCKET_GETTER_SETTER)
struct _DiagnosticsIpcStream {
#else
struct _DiagnosticsIpcStream_Internal {
#endif
IpcStream stream;
ds_ipc_socket_t client_socket;
DiagnosticsIpcConnectionMode mode;
};
#if !defined(DS_INLINE_GETTER_SETTER) && !defined(DS_IMPL_IPC_PAL_SOCKET_GETTER_SETTER)
struct _DiagnosticsIpcStream {
uint8_t _internal [sizeof (struct _DiagnosticsIpcStream_Internal)];
};
#endif
#endif /* ENABLE_PERFTRACING */
#endif /* __DIAGNOSTICS_IPC_PAL_SOCKET_H__ */
| #ifndef __DIAGNOSTICS_IPC_PAL_SOCKET_H__
#define __DIAGNOSTICS_IPC_PAL_SOCKET_H__
#include "ds-rt-config.h"
#ifdef ENABLE_PERFTRACING
#include "ds-ipc-pal.h"
#undef DS_IMPL_GETTER_SETTER
#ifdef DS_IMPL_IPC_PAL_SOCKET_GETTER_SETTER
#define DS_IMPL_GETTER_SETTER
#endif
#include "ds-getter-setter.h"
#ifdef HOST_WIN32
#include <winsock2.h>
typedef SOCKET ds_ipc_socket_t;
typedef SOCKADDR ds_ipc_socket_address_t;
typedef ADDRESS_FAMILY ds_ipc_socket_family_t;
typedef int ds_ipc_socket_len_t;
#else
#include <sys/socket.h>
typedef int ds_ipc_socket_t;
typedef struct sockaddr ds_ipc_socket_address_t;
typedef int ds_ipc_socket_family_t;
typedef socklen_t ds_ipc_socket_len_t;
#endif
/*
* DiagnosticsIpc.
*/
#if defined(DS_INLINE_GETTER_SETTER) || defined(DS_IMPL_IPC_PAL_SOCKET_GETTER_SETTER)
struct _DiagnosticsIpc {
#else
struct _DiagnosticsIpc_Internal {
#endif
ds_ipc_socket_address_t *server_address;
ds_ipc_socket_len_t server_address_len;
ds_ipc_socket_family_t server_address_family;
ds_ipc_socket_t server_socket;
bool is_listening;
bool is_closed;
bool is_dual_mode;
DiagnosticsIpcConnectionMode mode;
};
#if !defined(DS_INLINE_GETTER_SETTER) && !defined(DS_IMPL_IPC_PAL_SOCKET_GETTER_SETTER)
struct _DiagnosticsIpc {
uint8_t _internal [sizeof (struct _DiagnosticsIpc_Internal)];
};
#endif
/*
* DiagnosticsIpcStream.
*/
#if defined(DS_INLINE_GETTER_SETTER) || defined(DS_IMPL_IPC_PAL_SOCKET_GETTER_SETTER)
struct _DiagnosticsIpcStream {
#else
struct _DiagnosticsIpcStream_Internal {
#endif
IpcStream stream;
ds_ipc_socket_t client_socket;
DiagnosticsIpcConnectionMode mode;
};
#if !defined(DS_INLINE_GETTER_SETTER) && !defined(DS_IMPL_IPC_PAL_SOCKET_GETTER_SETTER)
struct _DiagnosticsIpcStream {
uint8_t _internal [sizeof (struct _DiagnosticsIpcStream_Internal)];
};
#endif
#endif /* ENABLE_PERFTRACING */
#endif /* __DIAGNOSTICS_IPC_PAL_SOCKET_H__ */
| -1 |
dotnet/runtime | 65,922 | Rely on PGO for isinst/castclass | Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | EgorBo | "2022-02-26T22:13:46Z" | "2022-04-01T21:25:56Z" | 063d08018602291f8dbd9d9b6055b8e1af874cb8 | f249a3d58b0b2bb55adca1a56c5a39cf446437b7 | Rely on PGO for isinst/castclass. Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | ./src/native/external/libunwind/include/tdep-s390x/libunwind_i.h | /* libunwind - a platform-independent unwind library
Copyright (C) 2002-2005 Hewlett-Packard Co
Contributed by David Mosberger-Tang <davidm@hpl.hp.com>
Modified for x86_64 by Max Asbock <masbock@us.ibm.com>
This file is part of libunwind.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
#ifndef S390X_LIBUNWIND_I_H
#define S390X_LIBUNWIND_I_H
/* Target-dependent definitions that are internal to libunwind but need
to be shared with target-independent code. */
#include <stdlib.h>
#include <libunwind.h>
#include <stdatomic.h>
#include "elf64.h"
#include "mempool.h"
#include "dwarf.h"
struct unw_addr_space
{
struct unw_accessors acc;
unw_caching_policy_t caching_policy;
_Atomic uint32_t cache_generation;
unw_word_t dyn_generation; /* see dyn-common.h */
unw_word_t dyn_info_list_addr; /* (cached) dyn_info_list_addr */
struct dwarf_rs_cache global_cache;
struct unw_debug_frame_list *debug_frames;
};
struct cursor
{
struct dwarf_cursor dwarf; /* must be first */
/* Format of sigcontext structure and address at which it is
stored: */
enum
{
S390X_SCF_NONE = 0, /* no signal frame encountered */
S390X_SCF_LINUX_SIGFRAME = 1, /* Linux struct sigcontext */
S390X_SCF_LINUX_RT_SIGFRAME = 2, /* Linux ucontext_t */
}
sigcontext_format;
unw_word_t sigcontext_addr;
unw_word_t sigcontext_sp;
unw_word_t sigcontext_pc;
int validate;
ucontext_t *uc;
};
static inline ucontext_t *
dwarf_get_uc(const struct dwarf_cursor *cursor)
{
const struct cursor *c = (struct cursor *) cursor->as_arg;
return c->uc;
}
#define DWARF_GET_LOC(l) ((l).val)
# define DWARF_LOC_TYPE_MEM (0 << 0)
# define DWARF_LOC_TYPE_FP (1 << 0)
# define DWARF_LOC_TYPE_REG (1 << 1)
# define DWARF_LOC_TYPE_VAL (1 << 2)
# define DWARF_IS_REG_LOC(l) (((l).type & DWARF_LOC_TYPE_REG) != 0)
# define DWARF_IS_FP_LOC(l) (((l).type & DWARF_LOC_TYPE_FP) != 0)
# define DWARF_IS_MEM_LOC(l) ((l).type == DWARF_LOC_TYPE_MEM)
# define DWARF_IS_VAL_LOC(l) (((l).type & DWARF_LOC_TYPE_VAL) != 0)
# define DWARF_LOC(r, t) ((dwarf_loc_t) { .val = (r), .type = (t) })
# define DWARF_VAL_LOC(c,v) DWARF_LOC ((v), DWARF_LOC_TYPE_VAL)
# define DWARF_MEM_LOC(c,m) DWARF_LOC ((m), DWARF_LOC_TYPE_MEM)
#ifdef UNW_LOCAL_ONLY
# define DWARF_NULL_LOC DWARF_LOC (0, 0)
# define DWARF_IS_NULL_LOC(l) (DWARF_GET_LOC (l) == 0)
# define DWARF_REG_LOC(c,r) (DWARF_LOC((unw_word_t) \
tdep_uc_addr(dwarf_get_uc(c), (r)), 0))
# define DWARF_FPREG_LOC(c,r) (DWARF_LOC((unw_word_t) \
tdep_uc_addr(dwarf_get_uc(c), (r)), 0))
#else /* !UNW_LOCAL_ONLY */
# define DWARF_NULL_LOC DWARF_LOC (0, 0)
# define DWARF_IS_NULL_LOC(l) \
({ dwarf_loc_t _l = (l); _l.val == 0 && _l.type == 0; })
# define DWARF_REG_LOC(c,r) DWARF_LOC((r), DWARF_LOC_TYPE_REG)
# define DWARF_FPREG_LOC(c,r) DWARF_LOC((r), (DWARF_LOC_TYPE_REG \
| DWARF_LOC_TYPE_FP))
#endif /* !UNW_LOCAL_ONLY */
static inline int
dwarf_getfp (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t *val)
{
assert(sizeof(unw_fpreg_t) == sizeof(unw_word_t));
if (DWARF_IS_NULL_LOC (loc))
return -UNW_EBADREG;
if (DWARF_IS_FP_LOC (loc))
return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_LOC (loc), val,
0, c->as_arg);
/* FPRs may be saved in GPRs */
if (DWARF_IS_REG_LOC (loc))
return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), (unw_word_t*)val,
0, c->as_arg);
if (DWARF_IS_MEM_LOC (loc))
return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), (unw_word_t*)val,
0, c->as_arg);
assert(DWARF_IS_VAL_LOC (loc));
*val = *(unw_fpreg_t*) DWARF_GET_LOC (loc);
return 0;
}
static inline int
dwarf_putfp (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t val)
{
assert(sizeof(unw_fpreg_t) == sizeof(unw_word_t));
assert(!DWARF_IS_VAL_LOC (loc));
if (DWARF_IS_NULL_LOC (loc))
return -UNW_EBADREG;
if (DWARF_IS_FP_LOC (loc))
return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_LOC (loc), &val,
1, c->as_arg);
/* FPRs may be saved in GPRs */
if (DWARF_IS_REG_LOC (loc))
return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), (unw_word_t*) &val,
1, c->as_arg);
assert(DWARF_IS_MEM_LOC (loc));
return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), (unw_word_t*) &val,
1, c->as_arg);
}
static inline int
dwarf_get (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t *val)
{
assert(sizeof(unw_fpreg_t) == sizeof(unw_word_t));
if (DWARF_IS_NULL_LOC (loc))
return -UNW_EBADREG;
/* GPRs may be saved in FPRs */
if (DWARF_IS_FP_LOC (loc))
return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_LOC (loc), (unw_fpreg_t*)val,
0, c->as_arg);
if (DWARF_IS_REG_LOC (loc))
return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), val,
0, c->as_arg);
if (DWARF_IS_MEM_LOC (loc))
return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), val,
0, c->as_arg);
assert(DWARF_IS_VAL_LOC (loc));
*val = DWARF_GET_LOC (loc);
return 0;
}
static inline int
dwarf_put (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t val)
{
assert(sizeof(unw_fpreg_t) == sizeof(unw_word_t));
assert(!DWARF_IS_VAL_LOC (loc));
if (DWARF_IS_NULL_LOC (loc))
return -UNW_EBADREG;
/* GPRs may be saved in FPRs */
if (DWARF_IS_FP_LOC (loc))
return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_LOC (loc), (unw_fpreg_t*) &val,
1, c->as_arg);
if (DWARF_IS_REG_LOC (loc))
return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), &val,
1, c->as_arg);
assert(DWARF_IS_MEM_LOC (loc));
return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), &val,
1, c->as_arg);
}
#define tdep_getcontext_trace unw_getcontext
#define tdep_init_done UNW_OBJ(init_done)
#define tdep_init_mem_validate UNW_OBJ(init_mem_validate)
#define tdep_init UNW_OBJ(init)
/* Platforms that support UNW_INFO_FORMAT_TABLE need to define
tdep_search_unwind_table. */
#define tdep_search_unwind_table dwarf_search_unwind_table
#define tdep_find_unwind_table dwarf_find_unwind_table
#define tdep_get_elf_image UNW_ARCH_OBJ(get_elf_image)
#define tdep_get_exe_image_path UNW_ARCH_OBJ(get_exe_image_path)
#define tdep_access_reg UNW_OBJ(access_reg)
#define tdep_access_fpreg UNW_OBJ(access_fpreg)
#define tdep_fetch_frame(c,ip,n) do {} while(0)
#define tdep_cache_frame(c) 0
#define tdep_reuse_frame(c,rs) do {} while(0)
#define tdep_stash_frame(cs,rs) do {} while(0)
#define tdep_trace(cur,addr,n) (-UNW_ENOINFO)
#define tdep_uc_addr UNW_OBJ(uc_addr)
#ifdef UNW_LOCAL_ONLY
# define tdep_find_proc_info(c,ip,n) \
dwarf_find_proc_info((c)->as, (ip), &(c)->pi, (n), \
(c)->as_arg)
# define tdep_put_unwind_info(as,pi,arg) \
dwarf_put_unwind_info((as), (pi), (arg))
#else
# define tdep_find_proc_info(c,ip,n) \
(*(c)->as->acc.find_proc_info)((c)->as, (ip), &(c)->pi, (n), \
(c)->as_arg)
# define tdep_put_unwind_info(as,pi,arg) \
(*(as)->acc.put_unwind_info)((as), (pi), (arg))
#endif
#define tdep_get_as(c) ((c)->dwarf.as)
#define tdep_get_as_arg(c) ((c)->dwarf.as_arg)
#define tdep_get_ip(c) ((c)->dwarf.ip)
#define tdep_big_endian(as) 1
extern atomic_bool tdep_init_done;
extern void tdep_init (void);
extern void tdep_init_mem_validate (void);
extern int tdep_search_unwind_table (unw_addr_space_t as, unw_word_t ip,
unw_dyn_info_t *di, unw_proc_info_t *pi,
int need_unwind_info, void *arg);
extern void *tdep_uc_addr (unw_tdep_context_t *uc, int reg);
extern int tdep_get_elf_image (struct elf_image *ei, pid_t pid, unw_word_t ip,
unsigned long *segbase, unsigned long *mapoff,
char *path, size_t pathlen);
extern void tdep_get_exe_image_path (char *path);
extern int tdep_access_reg (struct cursor *c, unw_regnum_t reg,
unw_word_t *valp, int write);
extern int tdep_access_fpreg (struct cursor *c, unw_regnum_t reg,
unw_fpreg_t *valp, int write);
#endif /* S390X_LIBUNWIND_I_H */
| /* libunwind - a platform-independent unwind library
Copyright (C) 2002-2005 Hewlett-Packard Co
Contributed by David Mosberger-Tang <davidm@hpl.hp.com>
Modified for x86_64 by Max Asbock <masbock@us.ibm.com>
This file is part of libunwind.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
#ifndef S390X_LIBUNWIND_I_H
#define S390X_LIBUNWIND_I_H
/* Target-dependent definitions that are internal to libunwind but need
to be shared with target-independent code. */
#include <stdlib.h>
#include <libunwind.h>
#include <stdatomic.h>
#include "elf64.h"
#include "mempool.h"
#include "dwarf.h"
struct unw_addr_space
{
struct unw_accessors acc;
unw_caching_policy_t caching_policy;
_Atomic uint32_t cache_generation;
unw_word_t dyn_generation; /* see dyn-common.h */
unw_word_t dyn_info_list_addr; /* (cached) dyn_info_list_addr */
struct dwarf_rs_cache global_cache;
struct unw_debug_frame_list *debug_frames;
};
struct cursor
{
struct dwarf_cursor dwarf; /* must be first */
/* Format of sigcontext structure and address at which it is
stored: */
enum
{
S390X_SCF_NONE = 0, /* no signal frame encountered */
S390X_SCF_LINUX_SIGFRAME = 1, /* Linux struct sigcontext */
S390X_SCF_LINUX_RT_SIGFRAME = 2, /* Linux ucontext_t */
}
sigcontext_format;
unw_word_t sigcontext_addr;
unw_word_t sigcontext_sp;
unw_word_t sigcontext_pc;
int validate;
ucontext_t *uc;
};
static inline ucontext_t *
dwarf_get_uc(const struct dwarf_cursor *cursor)
{
const struct cursor *c = (struct cursor *) cursor->as_arg;
return c->uc;
}
#define DWARF_GET_LOC(l) ((l).val)
# define DWARF_LOC_TYPE_MEM (0 << 0)
# define DWARF_LOC_TYPE_FP (1 << 0)
# define DWARF_LOC_TYPE_REG (1 << 1)
# define DWARF_LOC_TYPE_VAL (1 << 2)
# define DWARF_IS_REG_LOC(l) (((l).type & DWARF_LOC_TYPE_REG) != 0)
# define DWARF_IS_FP_LOC(l) (((l).type & DWARF_LOC_TYPE_FP) != 0)
# define DWARF_IS_MEM_LOC(l) ((l).type == DWARF_LOC_TYPE_MEM)
# define DWARF_IS_VAL_LOC(l) (((l).type & DWARF_LOC_TYPE_VAL) != 0)
# define DWARF_LOC(r, t) ((dwarf_loc_t) { .val = (r), .type = (t) })
# define DWARF_VAL_LOC(c,v) DWARF_LOC ((v), DWARF_LOC_TYPE_VAL)
# define DWARF_MEM_LOC(c,m) DWARF_LOC ((m), DWARF_LOC_TYPE_MEM)
#ifdef UNW_LOCAL_ONLY
# define DWARF_NULL_LOC DWARF_LOC (0, 0)
# define DWARF_IS_NULL_LOC(l) (DWARF_GET_LOC (l) == 0)
# define DWARF_REG_LOC(c,r) (DWARF_LOC((unw_word_t) \
tdep_uc_addr(dwarf_get_uc(c), (r)), 0))
# define DWARF_FPREG_LOC(c,r) (DWARF_LOC((unw_word_t) \
tdep_uc_addr(dwarf_get_uc(c), (r)), 0))
#else /* !UNW_LOCAL_ONLY */
# define DWARF_NULL_LOC DWARF_LOC (0, 0)
# define DWARF_IS_NULL_LOC(l) \
({ dwarf_loc_t _l = (l); _l.val == 0 && _l.type == 0; })
# define DWARF_REG_LOC(c,r) DWARF_LOC((r), DWARF_LOC_TYPE_REG)
# define DWARF_FPREG_LOC(c,r) DWARF_LOC((r), (DWARF_LOC_TYPE_REG \
| DWARF_LOC_TYPE_FP))
#endif /* !UNW_LOCAL_ONLY */
static inline int
dwarf_getfp (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t *val)
{
assert(sizeof(unw_fpreg_t) == sizeof(unw_word_t));
if (DWARF_IS_NULL_LOC (loc))
return -UNW_EBADREG;
if (DWARF_IS_FP_LOC (loc))
return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_LOC (loc), val,
0, c->as_arg);
/* FPRs may be saved in GPRs */
if (DWARF_IS_REG_LOC (loc))
return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), (unw_word_t*)val,
0, c->as_arg);
if (DWARF_IS_MEM_LOC (loc))
return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), (unw_word_t*)val,
0, c->as_arg);
assert(DWARF_IS_VAL_LOC (loc));
*val = *(unw_fpreg_t*) DWARF_GET_LOC (loc);
return 0;
}
static inline int
dwarf_putfp (struct dwarf_cursor *c, dwarf_loc_t loc, unw_fpreg_t val)
{
assert(sizeof(unw_fpreg_t) == sizeof(unw_word_t));
assert(!DWARF_IS_VAL_LOC (loc));
if (DWARF_IS_NULL_LOC (loc))
return -UNW_EBADREG;
if (DWARF_IS_FP_LOC (loc))
return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_LOC (loc), &val,
1, c->as_arg);
/* FPRs may be saved in GPRs */
if (DWARF_IS_REG_LOC (loc))
return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), (unw_word_t*) &val,
1, c->as_arg);
assert(DWARF_IS_MEM_LOC (loc));
return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), (unw_word_t*) &val,
1, c->as_arg);
}
static inline int
dwarf_get (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t *val)
{
assert(sizeof(unw_fpreg_t) == sizeof(unw_word_t));
if (DWARF_IS_NULL_LOC (loc))
return -UNW_EBADREG;
/* GPRs may be saved in FPRs */
if (DWARF_IS_FP_LOC (loc))
return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_LOC (loc), (unw_fpreg_t*)val,
0, c->as_arg);
if (DWARF_IS_REG_LOC (loc))
return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), val,
0, c->as_arg);
if (DWARF_IS_MEM_LOC (loc))
return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), val,
0, c->as_arg);
assert(DWARF_IS_VAL_LOC (loc));
*val = DWARF_GET_LOC (loc);
return 0;
}
static inline int
dwarf_put (struct dwarf_cursor *c, dwarf_loc_t loc, unw_word_t val)
{
assert(sizeof(unw_fpreg_t) == sizeof(unw_word_t));
assert(!DWARF_IS_VAL_LOC (loc));
if (DWARF_IS_NULL_LOC (loc))
return -UNW_EBADREG;
/* GPRs may be saved in FPRs */
if (DWARF_IS_FP_LOC (loc))
return (*c->as->acc.access_fpreg) (c->as, DWARF_GET_LOC (loc), (unw_fpreg_t*) &val,
1, c->as_arg);
if (DWARF_IS_REG_LOC (loc))
return (*c->as->acc.access_reg) (c->as, DWARF_GET_LOC (loc), &val,
1, c->as_arg);
assert(DWARF_IS_MEM_LOC (loc));
return (*c->as->acc.access_mem) (c->as, DWARF_GET_LOC (loc), &val,
1, c->as_arg);
}
#define tdep_getcontext_trace unw_getcontext
#define tdep_init_done UNW_OBJ(init_done)
#define tdep_init_mem_validate UNW_OBJ(init_mem_validate)
#define tdep_init UNW_OBJ(init)
/* Platforms that support UNW_INFO_FORMAT_TABLE need to define
tdep_search_unwind_table. */
#define tdep_search_unwind_table dwarf_search_unwind_table
#define tdep_find_unwind_table dwarf_find_unwind_table
#define tdep_get_elf_image UNW_ARCH_OBJ(get_elf_image)
#define tdep_get_exe_image_path UNW_ARCH_OBJ(get_exe_image_path)
#define tdep_access_reg UNW_OBJ(access_reg)
#define tdep_access_fpreg UNW_OBJ(access_fpreg)
#define tdep_fetch_frame(c,ip,n) do {} while(0)
#define tdep_cache_frame(c) 0
#define tdep_reuse_frame(c,rs) do {} while(0)
#define tdep_stash_frame(cs,rs) do {} while(0)
#define tdep_trace(cur,addr,n) (-UNW_ENOINFO)
#define tdep_uc_addr UNW_OBJ(uc_addr)
#ifdef UNW_LOCAL_ONLY
# define tdep_find_proc_info(c,ip,n) \
dwarf_find_proc_info((c)->as, (ip), &(c)->pi, (n), \
(c)->as_arg)
# define tdep_put_unwind_info(as,pi,arg) \
dwarf_put_unwind_info((as), (pi), (arg))
#else
# define tdep_find_proc_info(c,ip,n) \
(*(c)->as->acc.find_proc_info)((c)->as, (ip), &(c)->pi, (n), \
(c)->as_arg)
# define tdep_put_unwind_info(as,pi,arg) \
(*(as)->acc.put_unwind_info)((as), (pi), (arg))
#endif
#define tdep_get_as(c) ((c)->dwarf.as)
#define tdep_get_as_arg(c) ((c)->dwarf.as_arg)
#define tdep_get_ip(c) ((c)->dwarf.ip)
#define tdep_big_endian(as) 1
extern atomic_bool tdep_init_done;
extern void tdep_init (void);
extern void tdep_init_mem_validate (void);
extern int tdep_search_unwind_table (unw_addr_space_t as, unw_word_t ip,
unw_dyn_info_t *di, unw_proc_info_t *pi,
int need_unwind_info, void *arg);
extern void *tdep_uc_addr (unw_tdep_context_t *uc, int reg);
extern int tdep_get_elf_image (struct elf_image *ei, pid_t pid, unw_word_t ip,
unsigned long *segbase, unsigned long *mapoff,
char *path, size_t pathlen);
extern void tdep_get_exe_image_path (char *path);
extern int tdep_access_reg (struct cursor *c, unw_regnum_t reg,
unw_word_t *valp, int write);
extern int tdep_access_fpreg (struct cursor *c, unw_regnum_t reg,
unw_fpreg_t *valp, int write);
#endif /* S390X_LIBUNWIND_I_H */
| -1 |
dotnet/runtime | 65,922 | Rely on PGO for isinst/castclass | Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | EgorBo | "2022-02-26T22:13:46Z" | "2022-04-01T21:25:56Z" | 063d08018602291f8dbd9d9b6055b8e1af874cb8 | f249a3d58b0b2bb55adca1a56c5a39cf446437b7 | Rely on PGO for isinst/castclass. Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | ./src/native/public/mono/metadata/tokentype.h | /**
* \file
*/
#ifndef _MONO_METADATA_TOKENTYPE_H_
#define _MONO_METADATA_TOKENTYPE_H_
/*
* These tokens match the table ID except for the last
* three (string, name and base type which are special)
*/
typedef enum {
MONO_TOKEN_MODULE = 0x00000000,
MONO_TOKEN_TYPE_REF = 0x01000000,
MONO_TOKEN_TYPE_DEF = 0x02000000,
MONO_TOKEN_FIELD_DEF = 0x04000000,
MONO_TOKEN_METHOD_DEF = 0x06000000,
MONO_TOKEN_PARAM_DEF = 0x08000000,
MONO_TOKEN_INTERFACE_IMPL = 0x09000000,
MONO_TOKEN_MEMBER_REF = 0x0a000000,
MONO_TOKEN_CUSTOM_ATTRIBUTE = 0x0c000000,
MONO_TOKEN_PERMISSION = 0x0e000000,
MONO_TOKEN_SIGNATURE = 0x11000000,
MONO_TOKEN_EVENT = 0x14000000,
MONO_TOKEN_PROPERTY = 0x17000000,
MONO_TOKEN_MODULE_REF = 0x1a000000,
MONO_TOKEN_TYPE_SPEC = 0x1b000000,
MONO_TOKEN_ASSEMBLY = 0x20000000,
MONO_TOKEN_ASSEMBLY_REF = 0x23000000,
MONO_TOKEN_FILE = 0x26000000,
MONO_TOKEN_EXPORTED_TYPE = 0x27000000,
MONO_TOKEN_MANIFEST_RESOURCE = 0x28000000,
MONO_TOKEN_GENERIC_PARAM = 0x2a000000,
MONO_TOKEN_METHOD_SPEC = 0x2b000000,
/*
* These do not match metadata tables directly
*/
MONO_TOKEN_STRING = 0x70000000,
MONO_TOKEN_NAME = 0x71000000,
MONO_TOKEN_BASE_TYPE = 0x72000000
} MonoTokenType;
#endif /* _MONO_METADATA_TOKENTYPE_H_ */
| /**
* \file
*/
#ifndef _MONO_METADATA_TOKENTYPE_H_
#define _MONO_METADATA_TOKENTYPE_H_
/*
* These tokens match the table ID except for the last
* three (string, name and base type which are special)
*/
typedef enum {
MONO_TOKEN_MODULE = 0x00000000,
MONO_TOKEN_TYPE_REF = 0x01000000,
MONO_TOKEN_TYPE_DEF = 0x02000000,
MONO_TOKEN_FIELD_DEF = 0x04000000,
MONO_TOKEN_METHOD_DEF = 0x06000000,
MONO_TOKEN_PARAM_DEF = 0x08000000,
MONO_TOKEN_INTERFACE_IMPL = 0x09000000,
MONO_TOKEN_MEMBER_REF = 0x0a000000,
MONO_TOKEN_CUSTOM_ATTRIBUTE = 0x0c000000,
MONO_TOKEN_PERMISSION = 0x0e000000,
MONO_TOKEN_SIGNATURE = 0x11000000,
MONO_TOKEN_EVENT = 0x14000000,
MONO_TOKEN_PROPERTY = 0x17000000,
MONO_TOKEN_MODULE_REF = 0x1a000000,
MONO_TOKEN_TYPE_SPEC = 0x1b000000,
MONO_TOKEN_ASSEMBLY = 0x20000000,
MONO_TOKEN_ASSEMBLY_REF = 0x23000000,
MONO_TOKEN_FILE = 0x26000000,
MONO_TOKEN_EXPORTED_TYPE = 0x27000000,
MONO_TOKEN_MANIFEST_RESOURCE = 0x28000000,
MONO_TOKEN_GENERIC_PARAM = 0x2a000000,
MONO_TOKEN_METHOD_SPEC = 0x2b000000,
/*
* These do not match metadata tables directly
*/
MONO_TOKEN_STRING = 0x70000000,
MONO_TOKEN_NAME = 0x71000000,
MONO_TOKEN_BASE_TYPE = 0x72000000
} MonoTokenType;
#endif /* _MONO_METADATA_TOKENTYPE_H_ */
| -1 |
dotnet/runtime | 65,922 | Rely on PGO for isinst/castclass | Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | EgorBo | "2022-02-26T22:13:46Z" | "2022-04-01T21:25:56Z" | 063d08018602291f8dbd9d9b6055b8e1af874cb8 | f249a3d58b0b2bb55adca1a56c5a39cf446437b7 | Rely on PGO for isinst/castclass. Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | ./src/mono/mono/sgen/sgen-qsort.h | /**
* \file
* Fast inline sorting
*
* Copyright (C) 2014 Xamarin Inc
*
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#ifndef __MONO_SGENQSORT_H__
#define __MONO_SGENQSORT_H__
/* Copied from non-inline implementation in sgen-qsort.c */
#define DEF_QSORT_INLINE(name, type, compare) \
static inline void \
qsort_swap_##name (type array[], const ssize_t i, const ssize_t j, type *const swap_tmp) \
{ \
*swap_tmp = array [i]; \
array [i] = array [j]; \
array [j] = *swap_tmp; \
} \
\
static void \
qsort_rec_##name ( \
type array[], \
ssize_t begin, \
ssize_t end, \
type *const pivot_tmp, \
type *const swap_tmp) \
{ \
ssize_t left, right, middle, pivot; \
while (begin < end) { \
left = begin; \
right = end; \
middle = begin + (end - begin) / 2; \
if (compare (array [middle], array [left]) < 0) \
qsort_swap_##name (array, middle, left, swap_tmp); \
if (compare (array [right], array [left]) < 0) \
qsort_swap_##name (array, right, left, swap_tmp); \
if (compare (array [right], array [middle]) < 0) \
qsort_swap_##name (array, right, middle, swap_tmp); \
pivot = middle; \
*pivot_tmp = array [pivot]; \
for (;;) { \
while (left <= right && compare (array [left], *pivot_tmp) <= 0) \
++left; \
while (left <= right && compare (array [right], *pivot_tmp) > 0) \
--right; \
if (left > right) \
break; \
qsort_swap_##name (array, left, right, swap_tmp); \
if (pivot == right) \
pivot = left; \
++left; \
--right; \
} \
array [pivot] = array [right]; \
array [right] = *pivot_tmp; \
--right; \
if (right - begin < end - left) { \
qsort_rec_##name (array, begin, right, pivot_tmp, swap_tmp); \
begin = left; \
} else { \
qsort_rec_##name (array, left, end, pivot_tmp, swap_tmp); \
end = right; \
} \
} \
} \
\
static inline void \
qsort_##name (type array[], size_t count) \
{ \
type pivot_tmp; \
type swap_tmp; \
qsort_rec_##name (array, 0, (ssize_t)count - 1, &pivot_tmp, &swap_tmp); \
}
#endif
| /**
* \file
* Fast inline sorting
*
* Copyright (C) 2014 Xamarin Inc
*
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#ifndef __MONO_SGENQSORT_H__
#define __MONO_SGENQSORT_H__
/* Copied from non-inline implementation in sgen-qsort.c */
#define DEF_QSORT_INLINE(name, type, compare) \
static inline void \
qsort_swap_##name (type array[], const ssize_t i, const ssize_t j, type *const swap_tmp) \
{ \
*swap_tmp = array [i]; \
array [i] = array [j]; \
array [j] = *swap_tmp; \
} \
\
static void \
qsort_rec_##name ( \
type array[], \
ssize_t begin, \
ssize_t end, \
type *const pivot_tmp, \
type *const swap_tmp) \
{ \
ssize_t left, right, middle, pivot; \
while (begin < end) { \
left = begin; \
right = end; \
middle = begin + (end - begin) / 2; \
if (compare (array [middle], array [left]) < 0) \
qsort_swap_##name (array, middle, left, swap_tmp); \
if (compare (array [right], array [left]) < 0) \
qsort_swap_##name (array, right, left, swap_tmp); \
if (compare (array [right], array [middle]) < 0) \
qsort_swap_##name (array, right, middle, swap_tmp); \
pivot = middle; \
*pivot_tmp = array [pivot]; \
for (;;) { \
while (left <= right && compare (array [left], *pivot_tmp) <= 0) \
++left; \
while (left <= right && compare (array [right], *pivot_tmp) > 0) \
--right; \
if (left > right) \
break; \
qsort_swap_##name (array, left, right, swap_tmp); \
if (pivot == right) \
pivot = left; \
++left; \
--right; \
} \
array [pivot] = array [right]; \
array [right] = *pivot_tmp; \
--right; \
if (right - begin < end - left) { \
qsort_rec_##name (array, begin, right, pivot_tmp, swap_tmp); \
begin = left; \
} else { \
qsort_rec_##name (array, left, end, pivot_tmp, swap_tmp); \
end = right; \
} \
} \
} \
\
static inline void \
qsort_##name (type array[], size_t count) \
{ \
type pivot_tmp; \
type swap_tmp; \
qsort_rec_##name (array, 0, (ssize_t)count - 1, &pivot_tmp, &swap_tmp); \
}
#endif
| -1 |
dotnet/runtime | 65,922 | Rely on PGO for isinst/castclass | Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | EgorBo | "2022-02-26T22:13:46Z" | "2022-04-01T21:25:56Z" | 063d08018602291f8dbd9d9b6055b8e1af874cb8 | f249a3d58b0b2bb55adca1a56c5a39cf446437b7 | Rely on PGO for isinst/castclass. Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | ./src/coreclr/utilcode/yieldprocessornormalized.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "stdafx.h"
#include "yieldprocessornormalized.h"
bool YieldProcessorNormalization::s_isMeasurementScheduled;
// Defaults are for when normalization has not yet been done
unsigned int YieldProcessorNormalization::s_yieldsPerNormalizedYield = 1;
unsigned int YieldProcessorNormalization::s_optimalMaxNormalizedYieldsPerSpinIteration =
(unsigned int)
(
(double)YieldProcessorNormalization::TargetMaxNsPerSpinIteration /
YieldProcessorNormalization::TargetNsPerNormalizedYield +
0.5
);
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "stdafx.h"
#include "yieldprocessornormalized.h"
bool YieldProcessorNormalization::s_isMeasurementScheduled;
// Defaults are for when normalization has not yet been done
unsigned int YieldProcessorNormalization::s_yieldsPerNormalizedYield = 1;
unsigned int YieldProcessorNormalization::s_optimalMaxNormalizedYieldsPerSpinIteration =
(unsigned int)
(
(double)YieldProcessorNormalization::TargetMaxNsPerSpinIteration /
YieldProcessorNormalization::TargetNsPerNormalizedYield +
0.5
);
| -1 |
dotnet/runtime | 65,922 | Rely on PGO for isinst/castclass | Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | EgorBo | "2022-02-26T22:13:46Z" | "2022-04-01T21:25:56Z" | 063d08018602291f8dbd9d9b6055b8e1af874cb8 | f249a3d58b0b2bb55adca1a56c5a39cf446437b7 | Rely on PGO for isinst/castclass. Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | ./src/mono/mono/metadata/object-internals.h | /**
* \file
*/
#ifndef __MONO_OBJECT_INTERNALS_H__
#define __MONO_OBJECT_INTERNALS_H__
#include <mono/utils/mono-forward-internal.h>
#include <mono/metadata/object-forward.h>
#include <mono/metadata/handle-decl.h>
#include <mono/metadata/object.h>
#include <mono/metadata/threads.h>
#include <mono/metadata/reflection.h>
#include <mono/metadata/mempool.h>
#include <mono/metadata/class-internals.h>
#include <mono/metadata/threads-types.h>
#include <mono/metadata/handle.h>
#include <mono/metadata/abi-details.h>
#include "mono/utils/mono-compiler.h"
#include "mono/utils/mono-error.h"
#include "mono/utils/mono-error-internals.h"
#include "mono/utils/mono-machine.h"
#include "mono/utils/mono-stack-unwinding.h"
#include "mono/utils/mono-tls.h"
#include "mono/utils/mono-coop-mutex.h"
#include <mono/metadata/icalls.h>
/* Use this as MONO_CHECK_ARG (arg,expr,) in functions returning void */
#define MONO_CHECK_ARG(arg, expr, retval) do { \
if (G_UNLIKELY (!(expr))) \
{ \
if (0) { (void)(arg); } /* check if the name exists */ \
ERROR_DECL (error); \
mono_error_set_argument_format (error, #arg, "assertion `%s' failed", #expr); \
mono_error_set_pending_exception (error); \
return retval; \
} \
} while (0)
#define MONO_CHECK_ARG_NULL_NAMED(arg, argname, retval) do { \
if (G_UNLIKELY (!(arg))) \
{ \
ERROR_DECL (error); \
mono_error_set_argument_null (error, (argname), ""); \
mono_error_set_pending_exception (error); \
return retval; \
} \
} while (0)
/* Use this as MONO_CHECK_ARG_NULL (arg,) in functions returning void */
#define MONO_CHECK_ARG_NULL(arg, retval) do { \
if (G_UNLIKELY (!(arg))) \
{ \
mono_error_set_argument_null (error, #arg, ""); \
return retval; \
} \
} while (0)
/* Use this as MONO_CHECK_ARG_NULL_HANDLE (arg,) in functions returning void */
#define MONO_CHECK_ARG_NULL_HANDLE(arg, retval) do { \
if (G_UNLIKELY (MONO_HANDLE_IS_NULL (arg))) \
{ \
mono_error_set_argument_null (error, #arg, ""); \
return retval; \
} \
} while (0)
#define MONO_CHECK_ARG_NULL_HANDLE_NAMED(arg, argname, retval) do { \
if (G_UNLIKELY (MONO_HANDLE_IS_NULL (arg))) \
{ \
mono_error_set_argument_null (error, (argname), ""); \
return retval; \
} \
} while (0)
/* Use this as MONO_CHECK_NULL (arg,) in functions returning void */
#define MONO_CHECK_NULL(arg, retval) do { \
if (G_UNLIKELY (!(arg))) \
{ \
ERROR_DECL (error); \
mono_error_set_null_reference (error); \
mono_error_set_pending_exception (error); \
return retval; \
} \
} while (0)
MONO_COMPONENT_API MonoClass *
mono_class_create_array (MonoClass *element_class, uint32_t rank);
MonoArrayHandle
mono_array_new_specific_handle (MonoVTable *vtable, uintptr_t n, MonoError *error);
MonoArray*
mono_array_new_specific_checked (MonoVTable *vtable, uintptr_t n, MonoError *error);
/*
* Macros which cache.
* These should be used instead of the original versions.
*/
static inline MonoClass*
mono_array_class_get_cached_function (MonoClass *eclass, MonoClass **aclass)
{
MonoClass *a = *aclass;
if (a)
return a;
a = mono_class_create_array (eclass, 1);
g_assert (a);
if (a)
*aclass = a;
return *aclass;
}
// eclass should be a run-time constant
// If you get an error using this macro, you need to manually instantiate the MonoClass *foo ## _array cache.
// See for example object_class_array.
#define mono_array_class_get_cached(eclass) (mono_array_class_get_cached_function ((eclass), &(eclass ## _array)))
static inline MonoArray*
mono_array_new_cached_function (MonoClass *aclass, int size, MonoError *error)
{
MonoVTable *vtable = mono_class_vtable_checked (aclass, error);
MonoArray *arr = NULL;
if (is_ok (error))
arr = mono_array_new_specific_checked (vtable, size, error);
return arr;
}
// eclass should be a run-time constant
// If you get an error using this macro, you need to manually instantiate the MonoClass *foo ## _array cache.
// See for example object_class_array.
#define mono_array_new_cached(eclass, size, error) \
mono_array_new_cached_function (mono_array_class_get_cached (eclass), (size), (error))
static inline MonoArrayHandle
mono_array_new_cached_handle_function (MonoClass *aclass, int size, MonoError *error)
{
MonoVTable *vtable = mono_class_vtable_checked (aclass, error);
MonoArrayHandle arr = NULL_HANDLE_ARRAY;
if (is_ok (error))
arr = mono_array_new_specific_handle (vtable, size, error);
return arr;
}
// eclass should be a run-time constant
// If you get an error using this macro, you need to manually instantiate the MonoClass *foo ## _array cache.
// See for example object_class_array.
#define mono_array_new_cached_handle(eclass, size, error) \
mono_array_new_cached_handle_function (mono_array_class_get_cached (eclass), (size), (error))
typedef uint32_t mono_array_size_t;
typedef int32_t mono_array_lower_bound_t;
#define MONO_ARRAY_MAX_INDEX ((int32_t) 0x7fffffff)
#define MONO_ARRAY_MAX_SIZE ((uint32_t) 0xffffffff)
typedef struct {
mono_array_size_t length;
mono_array_lower_bound_t lower_bound;
} MonoArrayBounds;
struct _MonoArray {
MonoObject obj;
/* bounds is NULL for szarrays */
MonoArrayBounds *bounds;
/* total number of elements of the array */
mono_array_size_t max_length;
/* we use mono_64bitaligned_t to ensure proper alignment on platforms that need it */
mono_64bitaligned_t vector [MONO_ZERO_LEN_ARRAY];
};
/* match the layout of the managed definition of Span<T> */
#define MONO_DEFINE_SPAN_OF_T(name, type) \
typedef struct { \
type* _pointer; \
uint32_t _length; \
} name;
MONO_DEFINE_SPAN_OF_T (MonoSpanOfObjects, MonoObject*)
#define MONO_SIZEOF_MONO_ARRAY (MONO_STRUCT_OFFSET_CONSTANT (MonoArray, vector))
struct _MonoString {
MonoObject object;
int32_t length;
mono_unichar2 chars [MONO_ZERO_LEN_ARRAY];
};
#define MONO_SIZEOF_MONO_STRING (MONO_STRUCT_OFFSET (MonoString, chars))
#define mono_object_class(obj) (((MonoObject*)(obj))->vtable->klass)
#define mono_object_domain(obj) (((MonoObject*)(obj))->vtable->domain)
#define mono_string_chars_fast(s) ((mono_unichar2*)(s)->chars)
#define mono_string_length_fast(s) ((s)->length)
/**
* mono_array_length_internal:
* \param array a \c MonoArray*
* \returns the total number of elements in the array. This works for
* both vectors and multidimensional arrays.
*/
#define mono_array_length_internal(array) ((array)->max_length)
static inline
uintptr_t
mono_array_handle_length (MonoArrayHandle arr)
{
MONO_REQ_GC_UNSAFE_MODE;
return mono_array_length_internal (MONO_HANDLE_RAW (arr));
}
// Equivalent to mono_array_addr_with_size, except:
// 1. A macro instead of a function -- the types of size and index are open.
// 2. mono_array_addr_with_size could, but does not, do GC mode transitions.
#define mono_array_addr_with_size_fast(array,size,index) ( ((char*)(array)->vector) + (size) * (index) )
#define mono_array_addr_fast(array,type,index) ((type*)(void*) mono_array_addr_with_size_fast (array, sizeof (type), index))
#define mono_array_get_fast(array,type,index) ( *(type*)mono_array_addr_fast ((array), type, (index)) )
#define mono_array_set_fast(array,type,index,value) \
do { \
type *__p = (type *) mono_array_addr_fast ((array), type, (index)); \
*__p = (value); \
} while (0)
#define mono_array_setref_fast(array,index,value) \
do { \
void **__p = (void **) mono_array_addr_fast ((array), void*, (index)); \
mono_gc_wbarrier_set_arrayref_internal ((array), __p, (MonoObject*)(value)); \
/* *__p = (value);*/ \
} while (0)
#define mono_array_memcpy_refs_fast(dest,destidx,src,srcidx,count) \
do { \
void **__p = (void **) mono_array_addr_fast ((dest), void*, (destidx)); \
void **__s = mono_array_addr_fast ((src), void*, (srcidx)); \
mono_gc_wbarrier_arrayref_copy_internal (__p, __s, (count)); \
} while (0)
// _internal is like _fast, but preserves the preexisting subtlety of the closed types of things:
// int size
// uintptr_t idx
// in order to mimic non-_internal but without the GC mode transitions, or at least,
// to avoid the runtime using the embedding API, whether or not it has GC mode transitions.
static inline char*
mono_array_addr_with_size_internal (MonoArray *array, int size, uintptr_t idx)
{
return mono_array_addr_with_size_fast (array, size, idx);
}
#define mono_array_addr_internal(array,type,index) ((type*)(void*) mono_array_addr_with_size_internal (array, sizeof (type), index))
#define mono_array_get_internal(array,type,index) ( *(type*)mono_array_addr_internal ((array), type, (index)) )
#define mono_array_set_internal(array,type,index,value) \
do { \
type *__p = (type *) mono_array_addr_internal ((array), type, (index)); \
*__p = (value); \
} while (0)
#define mono_array_setref_internal(array,index,value) \
do { \
void **__p = (void **) mono_array_addr_internal ((array), void*, (index)); \
mono_gc_wbarrier_set_arrayref_internal ((array), __p, (MonoObject*)(value)); \
/* *__p = (value);*/ \
} while (0)
#define mono_array_memcpy_refs_internal(dest,destidx,src,srcidx,count) \
do { \
void **__p = (void **) mono_array_addr_internal ((dest), void*, (destidx)); \
void **__s = mono_array_addr_internal ((src), void*, (srcidx)); \
mono_gc_wbarrier_arrayref_copy_internal (__p, __s, (count)); \
} while (0)
static inline gboolean
mono_handle_array_has_bounds (MonoArrayHandle arr)
{
return MONO_HANDLE_GETVAL (arr, bounds) != NULL;
}
static inline void
mono_handle_array_get_bounds_dim (MonoArrayHandle arr, gint32 dim, MonoArrayBounds *bounds)
{
*bounds = MONO_HANDLE_GETVAL (arr, bounds [dim]);
}
#define mono_span_length(span) (span->_length)
#define mono_span_get(span,type,idx) (type)(!span->_pointer ? (type)0 : span->_pointer[idx])
#define mono_span_addr(span,type,idx) (type*)(span->_pointer + idx)
#define mono_span_setref(span,index,value) \
do { \
void **__p = (void **) mono_span_addr ((span), void*, (index)); \
mono_gc_wbarrier_generic_store_internal (__p, (MonoObject*)(value)); \
/* *__p = (value);*/ \
} while (0)
static inline MonoSpanOfObjects
mono_span_create_from_object_array (MonoArray *arr) {
MonoSpanOfObjects span;
if (arr != NULL) {
span._length = (int32_t)mono_array_length_internal (arr);
span._pointer = mono_array_addr_fast (arr, MonoObject*, 0);
} else {
span._length = 0;
span._pointer = NULL;
}
return span;
}
typedef struct {
MonoObject obj;
} MonoMarshalByRefObject;
TYPED_HANDLE_DECL (MonoMarshalByRefObject);
/* This is a copy of System.AppDomain */
struct _MonoAppDomain {
MonoMarshalByRefObject mbr;
};
/* Safely access System.AppDomain from native code */
TYPED_HANDLE_DECL (MonoAppDomain);
typedef struct _MonoStringBuilder MonoStringBuilder;
TYPED_HANDLE_DECL (MonoStringBuilder);
struct _MonoStringBuilder {
MonoObject object;
MonoArray *chunkChars;
MonoStringBuilder* chunkPrevious; // Link to the block logically before this block
int chunkLength; // The index in ChunkChars that represent the end of the block
int chunkOffset; // The logial offset (sum of all characters in previous blocks)
int maxCapacity;
};
static inline int
mono_string_builder_capacity (MonoStringBuilderHandle sbh)
{
MonoStringBuilder *sb = MONO_HANDLE_RAW (sbh);
return sb->chunkOffset + sb->chunkChars->max_length;
}
static inline int
mono_string_builder_string_length (MonoStringBuilderHandle sbh)
{
MonoStringBuilder *sb = MONO_HANDLE_RAW (sbh);
return sb->chunkOffset + sb->chunkLength;
}
typedef struct {
MonoType *type;
gpointer value;
MonoClass *klass;
} MonoTypedRef;
typedef struct {
gpointer args;
} MonoArgumentHandle;
typedef struct {
MonoMethodSignature *sig;
gpointer args;
gint32 next_arg;
gint32 num_args;
} MonoArgIterator;
struct _MonoException {
MonoObject object;
MonoString *class_name;
MonoString *message;
MonoObject *_data;
MonoObject *inner_ex;
MonoString *help_link;
/* Stores the IPs and the generic sharing infos
(vtable/MRGCTX) of the frames. */
MonoArray *trace_ips;
MonoString *stack_trace;
MonoString *remote_stack_trace;
gint32 remote_stack_index;
/* Dynamic methods referenced by the stack trace */
MonoArray *dynamic_methods;
gint32 hresult;
MonoString *source;
MonoObject *serialization_manager;
MonoObject *captured_traces;
MonoArray *native_trace_ips;
gint32 caught_in_unmanaged;
};
typedef struct {
MonoException base;
} MonoSystemException;
TYPED_HANDLE_DECL (MonoSystemException);
typedef struct {
MonoObject object;
MonoObject *async_state;
MonoObject *handle;
MonoObject *async_delegate;
gpointer *data;
MonoObject *object_data;
MonoBoolean sync_completed;
MonoBoolean completed;
MonoBoolean endinvoke_called;
MonoObject *async_callback;
MonoObject *execution_context;
MonoObject *original_context;
gint64 add_time;
} MonoAsyncResult;
TYPED_HANDLE_DECL (MonoAsyncResult);
typedef struct {
MonoMarshalByRefObject object;
gpointer handle;
} MonoWaitHandle;
TYPED_HANDLE_DECL (MonoWaitHandle);
/* System.Threading.StackCrawlMark */
/*
* This type is used to identify the method where execution has entered
* the BCL during stack walks. The outermost public method should
* define it like this:
* StackCrawlMark stackMark = StackCrawlMark.LookForMyCaller;
* and pass the stackMark as a byref argument down the call chain
* until it reaches an icall.
*/
typedef enum {
STACK_CRAWL_ME = 0,
STACK_CRAWL_CALLER = 1,
STACK_CRAWL_CALLERS_CALLER = 2,
STACK_CRAWL_THREAD = 3
} MonoStackCrawlMark;
/* MonoSafeHandle is in class-internals.h. */
/* Safely access System.Net.Sockets.SafeSocketHandle from native code */
TYPED_HANDLE_DECL (MonoSafeHandle);
/* This corresponds to System.Type */
struct _MonoReflectionType {
MonoObject object;
MonoType *type;
};
/* Safely access System.Type from native code */
TYPED_HANDLE_DECL (MonoReflectionType);
/* This corresponds to System.Runtime.CompilerServices.QCallTypeHandle */
struct _MonoQCallTypeHandle {
gpointer _ptr;
MonoType *type;
};
typedef struct _MonoQCallTypeHandle MonoQCallTypeHandle;
/* This corresponds to System.Runtime.CompilerServices.QCallAssembly */
struct _MonoQCallAssemblyHandle {
gpointer _ptr;
MonoAssembly *assembly;
};
typedef struct _MonoQCallAssemblyHandle MonoQCallAssemblyHandle;
typedef struct {
MonoObject object;
MonoReflectionType *class_to_proxy;
MonoObject *context;
MonoObject *unwrapped_server;
gint32 target_domain_id;
MonoString *target_uri;
MonoObject *object_identity;
MonoObject *obj_TP;
MonoObject *stub_data;
} MonoRealProxy;
/* Safely access System.Runtime.Remoting.Proxies.RealProxy from native code */
TYPED_HANDLE_DECL (MonoRealProxy);
typedef struct _MonoIUnknown MonoIUnknown;
typedef struct _MonoIUnknownVTable MonoIUnknownVTable;
/* STDCALL on windows, CDECL everywhere else to work with XPCOM and MainWin COM */
#ifdef HOST_WIN32
#define STDCALL __stdcall
#else
#define STDCALL
#endif
struct _MonoIUnknownVTable
{
int (STDCALL *QueryInterface)(MonoIUnknown *pUnk, gconstpointer riid, gpointer* ppv);
int (STDCALL *AddRef)(MonoIUnknown *pUnk);
int (STDCALL *Release)(MonoIUnknown *pUnk);
};
struct _MonoIUnknown
{
const MonoIUnknownVTable *vtable;
};
typedef struct {
MonoMarshalByRefObject object;
MonoIUnknown *iunknown;
GHashTable* itf_hash;
MonoObject *synchronization_context;
} MonoComObject;
TYPED_HANDLE_DECL (MonoComObject);
typedef struct {
MonoRealProxy real_proxy;
MonoComObject *com_object;
gint32 ref_count;
} MonoComInteropProxy;
TYPED_HANDLE_DECL (MonoComInteropProxy);
typedef struct {
MonoObject object;
MonoRealProxy *rp;
MonoRemoteClass *remote_class;
MonoBoolean custom_type_info;
} MonoTransparentProxy;
/* Safely access System.Runtime.Remoting.Proxies.TransparentProxy from native code */
TYPED_HANDLE_DECL (MonoTransparentProxy);
typedef struct {
MonoObject obj;
MonoReflectionMethod *method;
MonoArray *args;
MonoArray *names;
MonoArray *arg_types;
MonoObject *ctx;
MonoObject *rval;
MonoObject *exc;
MonoAsyncResult *async_result;
guint32 call_type;
} MonoMethodMessage;
TYPED_HANDLE_DECL (MonoMethodMessage);
/* Keep in sync with the System.MonoAsyncCall */
typedef struct {
MonoObject object;
MonoMethodMessage *msg;
MonoMethod *cb_method;
MonoDelegate *cb_target;
MonoObject *state;
MonoObject *res;
MonoArray *out_args;
} MonoAsyncCall;
TYPED_HANDLE_DECL (MonoAsyncCall);
typedef struct {
MonoObject obj;
MonoArray *frames;
MonoArray *captured_traces;
MonoBoolean debug_info;
} MonoStackTrace;
TYPED_HANDLE_DECL (MonoStackTrace);
typedef struct {
MonoObject obj;
gint32 il_offset;
gint32 native_offset;
gint64 method_address;
gint32 method_index;
MonoReflectionMethod *method;
MonoString *filename;
gint32 line;
gint32 column;
MonoString *internal_method_name;
} MonoStackFrame;
TYPED_HANDLE_DECL (MonoStackFrame);
typedef enum {
MONO_THREAD_FLAG_DONT_MANAGE = 1, // Don't wait for or abort this thread
MONO_THREAD_FLAG_NAME_SET = 2, // Thread name set from managed code
MONO_THREAD_FLAG_CLEANUP_FROM_NATIVE = 4, // Thread initialized in native so clean up in native
} MonoThreadFlags;
struct _MonoThreadInfo;
typedef struct MonoThreadName {
char* volatile chars; // null check outside of lock
gint32 free; // bool
gint32 length;
} MonoThreadName;
void
mono_gstring_append_thread_name (GString*, MonoInternalThread*);
struct _MonoInternalThread {
MonoObject obj;
volatile int lock_thread_id; /* to be used as the pre-shifted thread id in thin locks */
MonoThreadHandle *handle;
gpointer native_handle;
MonoThreadName name;
guint32 state; /* must be accessed while longlived->synch_cs is locked */
MonoException *abort_exc;
MonoGCHandle abort_state_handle;
guint64 tid; /* This is accessed as a gsize in the code (so it can hold a 64bit pointer on systems that need it), but needs to reserve 64 bits of space on all machines as it corresponds to a field in managed code */
gsize debugger_thread; // FIXME switch to bool as soon as CI testing with corlib version bump works
gpointer *static_data;
struct _MonoThreadInfo *thread_info;
/* This is modified using atomic ops, so keep it a gint32 */
gint32 __interruption_requested;
/* data that must live as long as this managed object is not finalized
* or as long as the underlying thread is attached, whichever is
* longer */
MonoLongLivedThreadData *longlived;
MonoBoolean threadpool_thread;
guint8 apartment_state;
gint32 managed_id;
guint32 small_id;
MonoThreadManageCallback manage_callback;
gsize flags;
gpointer thread_pinning_ref;
gint32 priority;
GPtrArray *owned_mutexes;
MonoOSEvent *suspended;
gint32 self_suspended; // TRUE | FALSE
gsize thread_state;
/* Points to self, set when starting up/attaching */
struct _MonoInternalThread *internal_thread;
MonoException *pending_exception;
/* This is used only to check that we are in sync between the representation
* of MonoInternalThread in native and InternalThread in managed
*
* DO NOT RENAME! DO NOT ADD FIELDS AFTER! */
gpointer last;
};
typedef struct {
guint32 state;
MonoObject *additional;
} MonoStreamingContext;
typedef struct {
MonoObject object;
guint32 intType;
} MonoInterfaceTypeAttribute;
typedef struct {
MonoObject object;
guint32 intType;
} MonoClassInterfaceAttribute;
/* Safely access System.Delegate from native code */
TYPED_HANDLE_DECL (MonoDelegate);
typedef void (*InterpJitInfoFunc) (MonoJitInfo *ji, gpointer user_data);
/*
* Callbacks supplied by the runtime and called by the modules in metadata/
* This interface is easier to extend than adding a new function type +
* a new 'install' function for every callback.
*/
typedef struct {
gpointer (*create_ftnptr) (gpointer addr);
gpointer (*get_addr_from_ftnptr) (gpointer descr);
char* (*get_runtime_build_info) (void);
const char* (*get_runtime_build_version) (void);
gpointer (*get_vtable_trampoline) (MonoVTable *vtable, int slot_index);
gpointer (*get_imt_trampoline) (MonoVTable *vtable, int imt_slot_index);
gboolean (*imt_entry_inited) (MonoVTable *vtable, int imt_slot_index);
void (*set_cast_details) (MonoClass *from, MonoClass *to);
void (*debug_log) (int level, MonoString *category, MonoString *message);
gboolean (*debug_log_is_enabled) (void);
void (*init_delegate) (MonoDelegateHandle delegate, MonoObjectHandle target, gpointer addr, MonoMethod *method, MonoError *error);
MonoObject* (*runtime_invoke) (MonoMethod *method, void *obj, void **params, MonoObject **exc, MonoError *error);
void* (*compile_method) (MonoMethod *method, MonoError *error);
gpointer (*create_jit_trampoline) (MonoMethod *method, MonoError *error);
/* used to free a dynamic method */
void (*free_method) (MonoMethod *method);
gpointer (*create_delegate_trampoline) (MonoClass *klass);
GHashTable *(*get_weak_field_indexes) (MonoImage *image);
gboolean (*is_interpreter_enabled) (void);
void (*init_mem_manager)(MonoMemoryManager*);
void (*free_mem_manager)(MonoMemoryManager*);
void (*metadata_update_published) (MonoAssemblyLoadContext *alc, uint32_t generation);
void (*get_jit_stats)(gint64 *methods_compiled, gint64 *cil_code_size_bytes, gint64 *native_code_size_bytes, gint64 *jit_time);
void (*get_exception_stats)(guint32 *exception_count);
// Same as compile_method, but returns a MonoFtnDesc in llvmonly mode
gpointer (*get_ftnptr)(MonoMethod *method, MonoError *error);
void (*interp_jit_info_foreach)(InterpJitInfoFunc func, gpointer user_data);
gboolean (*interp_sufficient_stack)(gsize size);
} MonoRuntimeCallbacks;
typedef gboolean (*MonoInternalStackWalk) (MonoStackFrameInfo *frame, MonoContext *ctx, gpointer data);
typedef gboolean (*MonoInternalExceptionFrameWalk) (MonoMethod *method, gpointer ip, size_t native_offset, gboolean managed, gpointer user_data);
typedef struct {
void (*mono_walk_stack_with_ctx) (MonoInternalStackWalk func, MonoContext *ctx, MonoUnwindOptions options, void *user_data);
void (*mono_walk_stack_with_state) (MonoInternalStackWalk func, MonoThreadUnwindState *state, MonoUnwindOptions options, void *user_data);
void (*mono_raise_exception) (MonoException *ex);
void (*mono_raise_exception_with_ctx) (MonoException *ex, MonoContext *ctx);
gboolean (*mono_exception_walk_trace) (MonoException *ex, MonoInternalExceptionFrameWalk func, gpointer user_data);
gboolean (*mono_install_handler_block_guard) (MonoThreadUnwindState *unwind_state);
void (*mono_uninstall_current_handler_block_guard) (void);
gboolean (*mono_current_thread_has_handle_block_guard) (void);
gboolean (*mono_above_abort_threshold) (void);
void (*mono_clear_abort_threshold) (void);
void (*mono_reraise_exception) (MonoException *ex);
} MonoRuntimeExceptionHandlingCallbacks;
MONO_COLD void mono_set_pending_exception (MonoException *exc);
void
mono_delegate_ctor (MonoObjectHandle this_obj, MonoObjectHandle target, gpointer addr, MonoMethod *method, MonoError *error);
MonoMethod *
mono_get_delegate_invoke_checked (MonoClass *klass, MonoError *error);
MonoMethod *
mono_get_delegate_begin_invoke_checked (MonoClass *klass, MonoError *error);
MonoMethod *
mono_get_delegate_end_invoke_checked (MonoClass *klass, MonoError *error);
void
mono_runtime_free_method (MonoMethod *method);
void
mono_install_callbacks (MonoRuntimeCallbacks *cbs);
MONO_COMPONENT_API
MonoRuntimeCallbacks*
mono_get_runtime_callbacks (void);
void
mono_install_eh_callbacks (MonoRuntimeExceptionHandlingCallbacks *cbs);
MONO_COMPONENT_API
MonoRuntimeExceptionHandlingCallbacks *
mono_get_eh_callbacks (void);
void
mono_raise_exception_deprecated (MonoException *ex);
void
mono_reraise_exception_deprecated (MonoException *ex);
void
mono_raise_exception_with_context (MonoException *ex, MonoContext *ctx);
void
mono_type_initialization_init (void);
int
mono_thread_kill (MonoInternalThread *thread, int signal);
MonoNativeTlsKey
mono_thread_get_tls_key (void);
gint32
mono_thread_get_tls_offset (void);
MonoNativeTlsKey
mono_domain_get_tls_key (void);
gint32
mono_domain_get_tls_offset (void);
/* Reflection and Reflection.Emit support */
/*
* Handling System.Type objects:
*
* Fields defined as System.Type in managed code should be defined as MonoObject*
* in unmanaged structures, and the monotype_cast () function should be used for
* casting them to MonoReflectionType* to avoid crashes/security issues when
* encountering instances of user defined subclasses of System.Type.
*/
#define IS_MONOTYPE(obj) (!(obj) || (m_class_get_image (mono_object_class ((obj))) == mono_defaults.corlib && ((MonoReflectionType*)(obj))->type != NULL))
#define IS_MONOTYPE_HANDLE(obj) IS_MONOTYPE (MONO_HANDLE_RAW (obj))
/* This should be used for accessing members of Type[] arrays */
#define mono_type_array_get(arr,index) monotype_cast (mono_array_get_internal ((arr), gpointer, (index)))
/*
* Cast an object to MonoReflectionType, making sure it is a System.MonoType or
* a subclass of it.
*/
static inline MonoReflectionType*
monotype_cast (MonoObject *obj)
{
g_assert (IS_MONOTYPE (obj));
return (MonoReflectionType*)obj;
}
/*
* The following structure must match the C# implementation in our corlib.
*/
struct _MonoReflectionMethod {
MonoObject object;
MonoMethod *method;
MonoString *name;
MonoReflectionType *reftype;
};
/* Safely access System.Reflection.MonoMethod from native code */
TYPED_HANDLE_DECL (MonoReflectionMethod);
struct _MonoDelegate {
MonoObject object;
/* The compiled code of the target method */
gpointer method_ptr;
/* The invoke code */
gpointer invoke_impl;
MonoObject *target;
MonoMethod *method;
gpointer delegate_trampoline;
/* Extra argument passed to the target method in llvmonly mode */
gpointer extra_arg;
/*
* If non-NULL, this points to a memory location which stores the address of
* the compiled code of the method, or NULL if it is not yet compiled.
*/
guint8 **method_code;
gpointer interp_method;
/* Interp method that is executed when invoking the delegate */
gpointer interp_invoke_impl;
MonoReflectionMethod *method_info;
MonoReflectionMethod *original_method_info;
MonoObject *data;
MonoBoolean method_is_virtual;
};
typedef struct _MonoMulticastDelegate MonoMulticastDelegate;
struct _MonoMulticastDelegate {
MonoDelegate delegate;
MonoArray *delegates;
};
/* Safely access System.MulticastDelegate from native code */
TYPED_HANDLE_DECL (MonoMulticastDelegate);
struct _MonoReflectionField {
MonoObject object;
MonoClass *klass;
MonoClassField *field;
MonoString *name;
MonoReflectionType *type;
guint32 attrs;
};
/* Safely access System.Reflection.MonoField from native code */
TYPED_HANDLE_DECL (MonoReflectionField);
struct _MonoReflectionProperty {
MonoObject object;
MonoClass *klass;
MonoProperty *property;
};
/* Safely access System.Reflection.MonoProperty from native code */
TYPED_HANDLE_DECL (MonoReflectionProperty);
/*This is System.EventInfo*/
struct _MonoReflectionEvent {
MonoObject object;
};
/* Safely access System.Reflection.EventInfo from native code */
TYPED_HANDLE_DECL (MonoReflectionEvent);
typedef struct {
MonoReflectionEvent object;
MonoClass *klass;
MonoEvent *event;
} MonoReflectionMonoEvent;
/* Safely access Systme.Reflection.MonoEvent from native code */
TYPED_HANDLE_DECL (MonoReflectionMonoEvent);
typedef struct {
MonoObject object;
} MonoReflectionParameter;
/* Safely access System.Reflection.ParameterInfo from native code */
TYPED_HANDLE_DECL (MonoReflectionParameter);
struct _MonoReflectionMethodBody {
MonoObject object;
};
/* Safely access System.Reflection.MethodBody from native code */
TYPED_HANDLE_DECL (MonoReflectionMethodBody);
/* System.RuntimeAssembly */
struct _MonoReflectionAssembly {
MonoObject object;
MonoAssembly *assembly;
};
typedef struct {
MonoReflectionType *utype;
MonoArray *values;
MonoArray *names;
} MonoEnumInfo;
typedef struct {
MonoReflectionType *parent;
MonoReflectionType *ret;
guint32 attrs;
guint32 implattrs;
guint32 callconv;
} MonoMethodInfo;
typedef struct {
MonoReflectionType *parent;
MonoReflectionType *declaring_type;
MonoString *name;
MonoReflectionMethod *get;
MonoReflectionMethod *set;
guint32 attrs;
} MonoPropertyInfo;
typedef struct {
MonoReflectionType *declaring_type;
MonoReflectionType *reflected_type;
MonoString *name;
MonoReflectionMethod *add_method;
MonoReflectionMethod *remove_method;
MonoReflectionMethod *raise_method;
guint32 attrs;
MonoArray *other_methods;
} MonoEventInfo;
typedef struct {
MonoObject *member;
gint32 code_pos;
} MonoReflectionILTokenInfo;
typedef struct {
MonoObject object;
MonoArray *code;
gint32 code_len;
gint32 max_stack;
gint32 cur_stack;
MonoArray *locals;
MonoArray *ex_handlers;
gint32 num_token_fixups;
MonoArray *token_fixups;
} MonoReflectionILGen;
typedef struct {
MonoArray *handlers;
gint32 start;
gint32 len;
gint32 label;
} MonoILExceptionInfo;
typedef struct {
MonoObject *extype;
gint32 type;
gint32 start;
gint32 len;
gint32 filter_offset;
} MonoILExceptionBlock;
typedef struct {
MonoObject object;
MonoObject *catch_type;
gint32 filter_offset;
gint32 flags;
gint32 try_offset;
gint32 try_length;
gint32 handler_offset;
gint32 handler_length;
} MonoReflectionExceptionHandlingClause;
/* Safely access System.Reflection.ExceptionHandlingClause from native code */
TYPED_HANDLE_DECL (MonoReflectionExceptionHandlingClause);
typedef struct {
MonoObject object;
MonoReflectionType *local_type;
MonoBoolean is_pinned;
guint16 local_index;
} MonoReflectionLocalVariableInfo;
/* Safely access System.Reflection.LocalVariableInfo from native code */
TYPED_HANDLE_DECL (MonoReflectionLocalVariableInfo);
typedef struct {
/*
* Must have the same layout as MonoReflectionLocalVariableInfo, since
* LocalBuilder inherits from it under net 2.0.
*/
MonoObject object;
MonoObject *type;
MonoBoolean is_pinned;
guint16 local_index;
MonoString *name;
} MonoReflectionLocalBuilder;
typedef struct {
MonoObject object;
gint32 count;
gint32 type;
gint32 eltype;
MonoString *guid;
MonoString *mcookie;
MonoString *marshaltype;
MonoObject *marshaltyperef;
gint32 param_num;
MonoBoolean has_size;
} MonoReflectionMarshal;
typedef struct {
MonoObject object;
MonoObject* methodb;
MonoString *name;
MonoArray *cattrs;
MonoReflectionMarshal *marshal_info;
guint32 attrs;
int position;
guint32 table_idx;
MonoObject *def_value;
} MonoReflectionParamBuilder;
typedef struct {
MonoObject object;
MonoMethod *mhandle;
MonoReflectionILGen *ilgen;
MonoArray *parameters;
guint32 attrs;
guint32 iattrs;
guint32 table_idx;
guint32 call_conv;
MonoObject *type;
MonoArray *pinfo;
MonoArray *cattrs;
MonoBoolean init_locals;
MonoArray *param_modreq;
MonoArray *param_modopt;
} MonoReflectionCtorBuilder;
/* Safely access System.Reflection.Emit.ConstructorBuilder from native code */
TYPED_HANDLE_DECL (MonoReflectionCtorBuilder);
typedef struct {
MonoObject object;
MonoMethod *mhandle;
MonoObject *rtype;
MonoArray *parameters;
guint32 attrs;
guint32 iattrs;
MonoString *name;
guint32 table_idx;
MonoArray *code;
MonoReflectionILGen *ilgen;
MonoObject *type;
MonoArray *pinfo;
MonoArray *cattrs;
MonoArray *override_methods;
MonoString *dll;
MonoString *dllentry;
guint32 charset;
guint32 extra_flags;
guint32 native_cc;
guint32 call_conv;
MonoBoolean init_locals;
MonoGenericContainer *generic_container;
MonoArray *generic_params;
MonoArray *return_modreq;
MonoArray *return_modopt;
MonoArray *param_modreq;
MonoArray *param_modopt;
} MonoReflectionMethodBuilder;
/* Safely access System.Reflection.Emit.MethodBuilder from native code */
TYPED_HANDLE_DECL (MonoReflectionMethodBuilder);
typedef struct {
MonoObject object;
MonoMethod *mhandle;
MonoReflectionType *parent;
MonoReflectionType *ret;
MonoArray *parameters;
MonoString *name;
guint32 table_idx;
guint32 call_conv;
} MonoReflectionArrayMethod;
/* Safely access System.Reflection.Emit.MonoArrayMethod from native code */
TYPED_HANDLE_DECL (MonoReflectionArrayMethod);
typedef struct {
MonoReflectionAssembly assembly;
MonoDynamicAssembly *dynamic_assembly;
MonoArray *modules;
MonoString *name;
MonoArray *cattrs;
MonoString *version;
MonoString *culture;
MonoArray *public_key_token;
MonoArray *loaded_modules;
guint32 access;
} MonoReflectionAssemblyBuilder;
/* Safely access System.Reflection.Emit.AssemblyBuilder from native code */
TYPED_HANDLE_DECL (MonoReflectionAssemblyBuilder);
typedef struct {
MonoObject object;
guint32 attrs;
MonoObject *type;
MonoString *name;
MonoObject *def_value;
gint32 offset;
MonoReflectionType *typeb;
MonoArray *rva_data;
MonoArray *cattrs;
MonoReflectionMarshal *marshal_info;
MonoClassField *handle;
MonoArray *modreq;
MonoArray *modopt;
} MonoReflectionFieldBuilder;
/* Safely access System.Reflection.Emit.FieldBuilder from native code */
TYPED_HANDLE_DECL (MonoReflectionFieldBuilder);
typedef struct {
MonoObject object;
guint32 attrs;
MonoString *name;
MonoObject *type;
MonoArray *parameters;
MonoArray *cattrs;
MonoObject *def_value;
MonoReflectionMethodBuilder *set_method;
MonoReflectionMethodBuilder *get_method;
gint32 table_idx;
MonoObject *type_builder;
MonoArray *returnModReq;
MonoArray *returnModOpt;
MonoArray *paramModReq;
MonoArray *paramModOpt;
guint32 call_conv;
} MonoReflectionPropertyBuilder;
/* System.RuntimeModule */
struct _MonoReflectionModule {
MonoObject obj;
MonoImage *image;
MonoReflectionAssembly *assembly;
MonoString *fqname;
MonoString *name;
MonoString *scopename;
MonoBoolean is_resource;
guint32 token;
};
/* Safely access System.Reflection.Module from native code */
TYPED_HANDLE_DECL (MonoReflectionModule);
typedef struct {
MonoReflectionModule module;
MonoDynamicImage *dynamic_image;
gint32 num_types;
MonoArray *types;
MonoArray *cattrs;
guint32 table_idx;
MonoReflectionAssemblyBuilder *assemblyb;
gboolean is_main;
MonoArray *resources;
GHashTable *unparented_classes;
MonoArray *table_indexes;
} MonoReflectionModuleBuilder;
/* Safely acess System.Reflection.Emit.ModuleBuidler from native code */
TYPED_HANDLE_DECL (MonoReflectionModuleBuilder);
typedef enum {
MonoTypeBuilderNew = 0,
MonoTypeBuilderEntered = 1,
MonoTypeBuilderFinished = 2
} MonoTypeBuilderState;
struct _MonoReflectionTypeBuilder {
MonoReflectionType type;
MonoString *name;
MonoString *nspace;
MonoObject *parent;
MonoReflectionType *nesting_type;
MonoArray *interfaces;
gint32 num_methods;
MonoArray *methods;
MonoArray *ctors;
MonoArray *properties;
gint32 num_fields;
MonoArray *fields;
MonoArray *events;
MonoArray *cattrs;
MonoArray *subtypes;
guint32 attrs;
guint32 table_idx;
MonoReflectionModuleBuilder *module;
gint32 class_size;
gint32 packing_size;
MonoGenericContainer *generic_container;
MonoArray *generic_params;
MonoReflectionType *created;
gint32 is_byreflike_set;
gint32 state;
};
typedef struct {
MonoReflectionType type;
MonoReflectionType *element_type;
gint32 rank;
} MonoReflectionArrayType;
/* Safely access System.Reflection.Emit.ArrayType (in DerivedTypes.cs) from native code */
TYPED_HANDLE_DECL (MonoReflectionArrayType);
typedef struct {
MonoReflectionType type;
MonoReflectionType *element_type;
} MonoReflectionDerivedType;
/* Safely access System.Reflection.Emit.SymbolType and subclasses (in DerivedTypes.cs) from native code */
TYPED_HANDLE_DECL (MonoReflectionDerivedType);
typedef struct {
MonoReflectionType type;
MonoReflectionTypeBuilder *tbuilder;
MonoReflectionMethodBuilder *mbuilder;
MonoString *name;
guint32 index;
MonoReflectionType *base_type;
MonoArray *iface_constraints;
MonoArray *cattrs;
guint32 attrs;
} MonoReflectionGenericParam;
/* Safely access System.Reflection.Emit.GenericTypeParameterBuilder from native code */
TYPED_HANDLE_DECL (MonoReflectionGenericParam);
typedef struct {
MonoReflectionType type;
MonoReflectionTypeBuilder *tb;
} MonoReflectionEnumBuilder;
/* Safely access System.Reflection.Emit.EnumBuilder from native code */
TYPED_HANDLE_DECL (MonoReflectionEnumBuilder);
typedef struct _MonoReflectionGenericClass MonoReflectionGenericClass;
struct _MonoReflectionGenericClass {
MonoReflectionType type;
MonoReflectionType *generic_type; /*Can be either a MonoType or a TypeBuilder*/
MonoArray *type_arguments;
};
/* Safely access System.Reflection.Emit.TypeBuilderInstantiation from native code */
TYPED_HANDLE_DECL (MonoReflectionGenericClass);
typedef struct {
MonoObject obj;
MonoString *name;
MonoReflectionType *type;
MonoReflectionTypeBuilder *typeb;
MonoArray *cattrs;
MonoReflectionMethodBuilder *add_method;
MonoReflectionMethodBuilder *remove_method;
MonoReflectionMethodBuilder *raise_method;
MonoArray *other_methods;
guint32 attrs;
guint32 table_idx;
} MonoReflectionEventBuilder;
typedef struct {
MonoObject obj;
MonoReflectionMethod *ctor;
MonoArray *data;
} MonoReflectionCustomAttr;
TYPED_HANDLE_DECL (MonoReflectionCustomAttr);
typedef struct {
MonoObject object;
guint32 utype;
gint32 safe_array_subtype;
MonoReflectionType *marshal_safe_array_user_defined_subtype;
gint32 IidParameterIndex;
guint32 array_subtype;
gint16 size_param_index;
gint32 size_const;
MonoString *marshal_type;
MonoReflectionType *marshal_type_ref;
MonoString *marshal_cookie;
} MonoReflectionMarshalAsAttribute;
/* Safely access System.Runtime.InteropServices.MarshalAsAttribute */
TYPED_HANDLE_DECL (MonoReflectionMarshalAsAttribute);
typedef struct {
MonoObject object;
gint32 call_conv;
gint32 charset;
MonoBoolean best_fit_mapping;
MonoBoolean throw_on_unmappable;
MonoBoolean set_last_error;
} MonoReflectionUnmanagedFunctionPointerAttribute;
typedef struct {
MonoObject object;
MonoString *guid;
} MonoReflectionGuidAttribute;
typedef struct {
MonoObject object;
MonoMethod *mhandle;
MonoString *name;
MonoReflectionType *rtype;
MonoArray *parameters;
guint32 attrs;
guint32 call_conv;
MonoReflectionModule *module;
MonoBoolean skip_visibility;
MonoBoolean init_locals;
MonoReflectionILGen *ilgen;
gint32 nrefs;
MonoArray *refs;
GSList *referenced_by;
MonoReflectionType *owner;
} MonoReflectionDynamicMethod;
/* Safely access System.Reflection.Emit.DynamicMethod from native code */
TYPED_HANDLE_DECL (MonoReflectionDynamicMethod);
typedef struct {
MonoObject object;
MonoReflectionModuleBuilder *module;
MonoArray *arguments;
guint32 type;
MonoReflectionType *return_type;
guint32 call_conv;
guint32 unmanaged_call_conv;
MonoArray *modreqs;
MonoArray *modopts;
} MonoReflectionSigHelper;
/* Safely access System.Reflection.Emit.SignatureHelper from native code */
TYPED_HANDLE_DECL (MonoReflectionSigHelper);
typedef struct {
MonoObject object;
MonoBoolean visible;
} MonoReflectionComVisibleAttribute;
typedef struct {
MonoObject object;
MonoReflectionType *type;
} MonoReflectionComDefaultInterfaceAttribute;
enum {
RESOURCE_LOCATION_EMBEDDED = 1,
RESOURCE_LOCATION_ANOTHER_ASSEMBLY = 2,
RESOURCE_LOCATION_IN_MANIFEST = 4
};
typedef struct {
MonoObject object;
MonoReflectionAssembly *assembly;
MonoString *filename;
guint32 location;
} MonoManifestResourceInfo;
/* Safely access System.Reflection.ManifestResourceInfo from native code */
TYPED_HANDLE_DECL (MonoManifestResourceInfo);
/* A boxed IntPtr */
typedef struct {
MonoObject object;
gpointer m_value;
} MonoIntPtr;
/* Keep in sync with System.GenericParameterAttributes */
typedef enum {
GENERIC_PARAMETER_ATTRIBUTE_NON_VARIANT = 0,
GENERIC_PARAMETER_ATTRIBUTE_COVARIANT = 1,
GENERIC_PARAMETER_ATTRIBUTE_CONTRAVARIANT = 2,
GENERIC_PARAMETER_ATTRIBUTE_VARIANCE_MASK = 3,
GENERIC_PARAMETER_ATTRIBUTE_NO_SPECIAL_CONSTRAINT = 0,
GENERIC_PARAMETER_ATTRIBUTE_REFERENCE_TYPE_CONSTRAINT = 4,
GENERIC_PARAMETER_ATTRIBUTE_VALUE_TYPE_CONSTRAINT = 8,
GENERIC_PARAMETER_ATTRIBUTE_CONSTRUCTOR_CONSTRAINT = 16,
GENERIC_PARAMETER_ATTRIBUTE_SPECIAL_CONSTRAINTS_MASK = 28
} GenericParameterAttributes;
typedef struct {
MonoType *type;
MonoClassField *field;
MonoProperty *prop;
} CattrNamedArg;
// Keep in sync with System.Runtime.Loader.AssemblyLoadContext.InternalState
typedef enum {
ALIVE = 0,
UNLOADING = 1
} MonoManagedAssemblyLoadContextInternalState;
/* All MonoInternalThread instances should be pinned, so it's safe to use the raw ptr. However
* for uniformity, icall wrapping will make handles anyway. So this is the method for getting the payload.
*/
static inline MonoInternalThread*
mono_internal_thread_handle_ptr (MonoInternalThreadHandle h)
{
/* The SUPPRESS here prevents a Centrinel warning due to merely seeing this
* function definition. Callees will still get a warning unless we
* attach a suppress attribute to the declaration.
*/
return MONO_HANDLE_SUPPRESS (MONO_HANDLE_RAW (h));
}
guint32 mono_image_insert_string (MonoReflectionModuleBuilderHandle module, MonoStringHandle str, MonoError *error);
guint32 mono_image_create_token (MonoDynamicImage *assembly, MonoObjectHandle obj, gboolean create_methodspec, gboolean register_token, MonoError *error);
void mono_dynamic_image_free (MonoDynamicImage *image);
void mono_dynamic_image_free_image (MonoDynamicImage *image);
void mono_dynamic_image_release_gc_roots (MonoDynamicImage *image);
void mono_reflection_setup_internal_class (MonoReflectionTypeBuilder *tb);
void mono_reflection_get_dynamic_overrides (MonoClass *klass, MonoMethod ***overrides, int *num_overrides, MonoError *error);
void mono_reflection_destroy_dynamic_method (MonoReflectionDynamicMethod *mb);
ICALL_EXPORT
void
ves_icall_SymbolType_create_unmanaged_type (MonoReflectionType *type);
void mono_reflection_register_with_runtime (MonoReflectionType *type);
MonoMethodSignature * mono_reflection_lookup_signature (MonoImage *image, MonoMethod *method, guint32 token, MonoError *error);
MonoArrayHandle mono_param_get_objects_internal (MonoMethod *method, MonoClass *refclass, MonoError *error);
MonoClass*
mono_class_bind_generic_parameters (MonoClass *klass, int type_argc, MonoType **types, gboolean is_dynamic);
MonoType*
mono_reflection_bind_generic_parameters (MonoReflectionTypeHandle type, int type_argc, MonoType **types, MonoError *error);
void
mono_reflection_generic_class_initialize (MonoReflectionGenericClass *type, MonoArray *fields);
ICALL_EXPORT
MonoReflectionEvent *
ves_icall_TypeBuilder_get_event_info (MonoReflectionTypeBuilder *tb, MonoReflectionEventBuilder *eb);
MonoReflectionMarshalAsAttributeHandle
mono_reflection_marshal_as_attribute_from_marshal_spec (MonoClass *klass, MonoMarshalSpec *spec, MonoError *error);
gpointer
mono_reflection_lookup_dynamic_token (MonoImage *image, guint32 token, gboolean valid_token, MonoClass **handle_class, MonoGenericContext *context, MonoError *error);
gboolean
mono_reflection_call_is_assignable_to (MonoClass *klass, MonoClass *oklass, MonoError *error);
gboolean
mono_get_constant_value_from_blob (MonoTypeEnum type, const char *blob, void *value, MonoStringHandleOut string_handle, MonoError *error);
gboolean
mono_metadata_read_constant_value (const char *blob, MonoTypeEnum type, void *value, MonoError *error);
char*
mono_string_from_blob (const char *str, MonoError *error);
void
mono_release_type_locks (MonoInternalThread *thread);
/**
* mono_string_handle_length:
* \param s \c MonoString
* \returns the length in characters of the string
*/
#ifdef ENABLE_CHECKED_BUILD_GC
int
mono_string_handle_length (MonoStringHandle s);
#else
#define mono_string_handle_length(s) (MONO_HANDLE_GETVAL ((s), length))
#endif
char *
mono_string_handle_to_utf8 (MonoStringHandle s, MonoError *error);
char *
mono_string_to_utf8_image (MonoImage *image, MonoStringHandle s, MonoError *error);
MonoArrayHandle
mono_array_clone_in_domain (MonoArrayHandle array, MonoError *error);
MonoArray*
mono_array_clone_checked (MonoArray *array, MonoError *error);
void
mono_array_full_copy (MonoArray *src, MonoArray *dest);
void
mono_array_full_copy_unchecked_size (MonoArray *src, MonoArray *dest, MonoClass *klass, uintptr_t size);
gboolean
mono_array_calc_byte_len (MonoClass *klass, uintptr_t len, uintptr_t *res);
MonoArray*
mono_array_new_checked (MonoClass *eclass, uintptr_t n, MonoError *error);
MONO_COMPONENT_API MonoArray*
mono_array_new_full_checked (MonoClass *array_class, uintptr_t *lengths, intptr_t *lower_bounds, MonoError *error);
MonoArray*
mono_array_new_jagged_checked (MonoClass *klass, int n, uintptr_t *lengths, MonoError *error);
ICALL_EXPORT
MonoArray*
ves_icall_array_new_specific (MonoVTable *vtable, uintptr_t n);
gpointer
mono_create_ftnptr (gpointer addr);
gpointer
mono_get_addr_from_ftnptr (gpointer descr);
MONO_COMPONENT_API void
mono_nullable_init (guint8 *buf, MonoObject *value, MonoClass *klass);
void
mono_nullable_init_from_handle (guint8 *buf, MonoObjectHandle value, MonoClass *klass);
void
mono_nullable_init_unboxed (guint8 *buf, gpointer value, MonoClass *klass);
MONO_COMPONENT_API MonoObject *
mono_value_box_checked (MonoClass *klass, void* val, MonoError *error);
MonoObjectHandle
mono_value_box_handle (MonoClass *klass, gpointer val, MonoError *error);
MONO_COMPONENT_API MonoObject*
mono_nullable_box (gpointer buf, MonoClass *klass, MonoError *error);
MonoObjectHandle
mono_nullable_box_handle (gpointer buf, MonoClass *klass, MonoError *error);
// A code size optimization (source and object) equivalent to MONO_HANDLE_NEW (MonoObject, NULL);
MonoObjectHandle
mono_new_null (void);
#ifdef MONO_SMALL_CONFIG
#define MONO_IMT_SIZE 9
#else
#define MONO_IMT_SIZE 19
#endif
typedef union {
int vtable_slot;
gpointer target_code;
} MonoImtItemValue;
typedef struct _MonoImtBuilderEntry {
gpointer key;
struct _MonoImtBuilderEntry *next;
MonoImtItemValue value;
int children;
guint8 has_target_code : 1;
} MonoImtBuilderEntry;
typedef struct _MonoIMTCheckItem MonoIMTCheckItem;
struct _MonoIMTCheckItem {
gpointer key;
int check_target_idx;
MonoImtItemValue value;
guint8 *jmp_code;
guint8 *code_target;
guint8 is_equals;
guint8 compare_done;
guint8 chunk_size;
guint8 short_branch;
guint8 has_target_code;
};
typedef gpointer (*MonoImtTrampolineBuilder) (MonoVTable *vtable, MonoIMTCheckItem **imt_entries, int count, gpointer fail_trunk);
void
mono_install_imt_trampoline_builder (MonoImtTrampolineBuilder func);
void
mono_set_always_build_imt_trampolines (gboolean value);
void
mono_vtable_build_imt_slot (MonoVTable* vtable, int imt_slot);
guint32
mono_method_get_imt_slot (MonoMethod *method);
void
mono_method_add_generic_virtual_invocation (MonoVTable *vtable,
gpointer *vtable_slot,
MonoMethod *method, gpointer code);
void
mono_unhandled_exception_checked (MonoObjectHandle exc, MonoError *error);
void
mono_first_chance_exception_checked (MonoObjectHandle exc, MonoError *error);
void
mono_first_chance_exception_internal (MonoObject *exc_raw);
MonoVTable *
mono_class_try_get_vtable (MonoClass *klass);
gboolean
mono_runtime_run_module_cctor (MonoImage *image, MonoError *error);
MONO_COMPONENT_API gboolean
mono_runtime_class_init_full (MonoVTable *vtable, MonoError *error);
void
mono_method_clear_object (MonoMethod *method);
gsize*
mono_class_compute_bitmap (MonoClass *klass, gsize *bitmap, int size, int offset, int *max_set, gboolean static_fields);
gboolean
mono_class_is_reflection_method_or_constructor (MonoClass *klass);
MonoObjectHandle
mono_get_object_from_blob (MonoType *type, const char *blob, MonoStringHandleOut string_handle, MonoError *error);
gboolean
mono_class_has_ref_info (MonoClass *klass);
MonoReflectionTypeBuilder*
mono_class_get_ref_info_raw (MonoClass *klass);
void
mono_class_set_ref_info (MonoClass *klass, MonoObjectHandle obj);
void
mono_class_free_ref_info (MonoClass *klass);
MONO_COMPONENT_API MonoObject *
mono_object_new_pinned (MonoClass *klass, MonoError *error);
MonoObjectHandle
mono_object_new_pinned_handle (MonoClass *klass, MonoError *error);
MonoObject *
mono_object_new_specific_checked (MonoVTable *vtable, MonoError *error);
ICALL_EXPORT
MonoObject *
ves_icall_object_new (MonoClass *klass);
ICALL_EXPORT
MonoObject *
ves_icall_object_new_specific (MonoVTable *vtable);
MonoObject *
mono_object_new_alloc_specific_checked (MonoVTable *vtable, MonoError *error);
void
mono_field_get_value_internal (MonoObject *obj, MonoClassField *field, void *value);
MONO_COMPONENT_API void
mono_field_static_get_value_checked (MonoVTable *vt, MonoClassField *field, void *value, MonoStringHandleOut string_handle, MonoError *error);
MONO_COMPONENT_API void
mono_field_static_get_value_for_thread (MonoInternalThread *thread, MonoVTable *vt, MonoClassField *field, void *value, MonoStringHandleOut string_handle, MonoError *error);
guint8*
mono_static_field_get_addr (MonoVTable *vt, MonoClassField *field);
MonoMethod*
mono_object_handle_get_virtual_method (MonoObjectHandle obj, MonoMethod *method, MonoError *error);
/* exported, used by the debugger */
MONO_API void *
mono_vtable_get_static_field_data (MonoVTable *vt);
MonoObject *
mono_field_get_value_object_checked (MonoClassField *field, MonoObject *obj, MonoError *error);
MonoObjectHandle
mono_static_field_get_value_handle (MonoClassField *field, MonoError *error);
MONO_COMPONENT_API gpointer
mono_special_static_field_get_offset (MonoClassField *field, MonoError *error);
gboolean
mono_property_set_value_handle (MonoProperty *prop, MonoObjectHandle obj, void **params, MonoError *error);
MonoObject*
mono_property_get_value_checked (MonoProperty *prop, void *obj, void **params, MonoError *error);
MonoString*
mono_object_try_to_string (MonoObject *obj, MonoObject **exc, MonoError *error);
char *
mono_string_to_utf8_ignore (MonoString *s);
gboolean
mono_monitor_is_il_fastpath_wrapper (MonoMethod *method);
MonoStringHandle
mono_string_is_interned_lookup (MonoStringHandle str, gboolean insert, MonoError *error);
/**
* mono_string_intern_checked:
* \param str String to intern
* \param error set on error.
* Interns the string passed.
* \returns The interned string. On failure returns NULL and sets \p error
*/
#define mono_string_intern_checked(str, error) (mono_string_is_interned_lookup ((str), TRUE, (error)))
/**
* mono_string_is_interned_internal:
* \param o String to probe
* \returns Whether the string has been interned.
*/
#define mono_string_is_interned_internal(str, error) (mono_string_is_interned_lookup ((str), FALSE, (error)))
char *
mono_exception_handle_get_native_backtrace (MonoExceptionHandle exc);
char *
mono_exception_get_managed_backtrace (MonoException *exc);
gboolean
mono_exception_try_get_managed_backtrace (MonoException *exc, const char *prefix, char **result);
void
mono_copy_value (MonoType *type, void *dest, void *value, int deref_pointer);
void
mono_error_raise_exception_deprecated (MonoError *target_error);
gboolean
mono_error_set_pending_exception_slow (MonoError *error);
static inline gboolean
mono_error_set_pending_exception (MonoError *error)
{
return is_ok (error) ? FALSE : mono_error_set_pending_exception_slow (error);
}
MonoArray *
mono_glist_to_array (GList *list, MonoClass *eclass, MonoError *error);
MONO_COMPONENT_API MonoObject *
mono_object_new_checked (MonoClass *klass, MonoError *error);
MonoObjectHandle
mono_object_new_handle (MonoClass *klass, MonoError *error);
// This function skips handling of remoting and COM.
// "alloc" means "less".
MonoObjectHandle
mono_object_new_alloc_by_vtable (MonoVTable *vtable, MonoError *error);
MonoObject*
mono_object_new_mature (MonoVTable *vtable, MonoError *error);
MonoObjectHandle
mono_object_new_handle_mature (MonoVTable *vtable, MonoError *error);
MonoObject *
mono_object_clone_checked (MonoObject *obj, MonoError *error);
MonoObjectHandle
mono_object_clone_handle (MonoObjectHandle obj, MonoError *error);
MONO_COMPONENT_API MonoObject *
mono_object_isinst_checked (MonoObject *obj, MonoClass *klass, MonoError *error);
MonoObjectHandle
mono_object_handle_isinst (MonoObjectHandle obj, MonoClass *klass, MonoError *error);
MonoObjectHandle
mono_object_handle_isinst_mbyref (MonoObjectHandle obj, MonoClass *klass, MonoError *error);
gboolean
mono_object_handle_isinst_mbyref_raw (MonoObjectHandle obj, MonoClass *klass, MonoError *error);
MonoStringHandle
mono_string_new_size_handle (gint32 len, MonoError *error);
MonoString*
mono_string_new_len_checked (const char *text, guint length, MonoError *error);
MonoString *
mono_string_new_size_checked (gint32 len, MonoError *error);
MONO_COMPONENT_API MonoString*
mono_ldstr_checked (MonoImage *image, uint32_t str_index, MonoError *error);
MonoStringHandle
mono_ldstr_handle (MonoImage *image, uint32_t str_index, MonoError *error);
MONO_PROFILER_API MonoString*
mono_string_new_checked (const char *text, MonoError *merror);
MonoString*
mono_string_new_wtf8_len_checked (const char *text, guint length, MonoError *error);
MonoString *
mono_string_new_utf16_checked (const gunichar2 *text, gint32 len, MonoError *error);
MonoStringHandle
mono_string_new_utf16_handle (const gunichar2 *text, gint32 len, MonoError *error);
MonoStringHandle
mono_string_new_utf8_len (const char *text, guint length, MonoError *error);
MonoString *
mono_string_from_utf16_checked (const mono_unichar2 *data, MonoError *error);
MonoString *
mono_string_from_utf32_checked (const mono_unichar4 *data, MonoError *error);
char*
mono_ldstr_utf8 (MonoImage *image, guint32 idx, MonoError *error);
MONO_COMPONENT_API
char*
mono_utf16_to_utf8 (const mono_unichar2 *s, gsize slength, MonoError *error);
char*
mono_utf16_to_utf8len (const mono_unichar2 *s, gsize slength, gsize *utf8_length, MonoError *error);
gboolean
mono_runtime_object_init_checked (MonoObject *this_obj, MonoError *error);
MONO_PROFILER_API MonoObject*
mono_runtime_try_invoke (MonoMethod *method, void *obj, void **params, MonoObject **exc, MonoError *error);
// The exc parameter is deliberately missing and so far this has proven to reduce code duplication.
// In particular, if an exception is returned from underlying otherwise succeeded call,
// is set into the MonoError with mono_error_set_exception_instance.
// The result is that caller need only check MonoError.
MONO_COMPONENT_API MonoObjectHandle
mono_runtime_try_invoke_handle (MonoMethod *method, MonoObjectHandle obj, void **params, MonoError* error);
MONO_COMPONENT_API MonoObject*
mono_runtime_invoke_checked (MonoMethod *method, void *obj, void **params, MonoError *error);
MonoObjectHandle
mono_runtime_invoke_handle (MonoMethod *method, MonoObjectHandle obj, void **params, MonoError* error);
void
mono_runtime_invoke_handle_void (MonoMethod *method, MonoObjectHandle obj, void **params, MonoError* error);
MonoObject*
mono_runtime_try_invoke_array (MonoMethod *method, void *obj, MonoArray *params,
MonoObject **exc, MonoError *error);
MonoObject*
mono_runtime_invoke_span_checked (MonoMethod *method, void *obj, MonoSpanOfObjects *params,
MonoError *error);
void*
mono_compile_method_checked (MonoMethod *method, MonoError *error);
MonoObject*
mono_runtime_delegate_try_invoke (MonoObject *delegate, void **params,
MonoObject **exc, MonoError *error);
MonoObject*
mono_runtime_delegate_invoke_checked (MonoObject *delegate, void **params,
MonoError *error);
MonoArrayHandle
mono_runtime_get_main_args_handle (MonoError *error);
int
mono_runtime_run_main_checked (MonoMethod *method, int argc, char* argv[],
MonoError *error);
int
mono_runtime_try_run_main (MonoMethod *method, int argc, char* argv[],
MonoObject **exc);
int
mono_runtime_exec_main_checked (MonoMethod *method, MonoArray *args, MonoError *error);
int
mono_runtime_try_exec_main (MonoMethod *method, MonoArray *args, MonoObject **exc);
MonoAssembly*
mono_try_assembly_resolve_handle (MonoAssemblyLoadContext *alc, MonoStringHandle fname, MonoAssembly *requesting, MonoError *error);
gboolean
mono_runtime_object_init_handle (MonoObjectHandle this_obj, MonoError *error);
/* GC write barriers support */
void
mono_gc_wbarrier_object_copy_handle (MonoObjectHandle obj, MonoObjectHandle src);
MonoMethod*
mono_class_get_virtual_method (MonoClass *klass, MonoMethod *method, MonoError *error);
MonoStringHandle
mono_string_empty_handle (void);
/*
* mono_object_get_data:
*
* Return a pointer to the beginning of data inside a MonoObject.
*/
static inline gpointer
mono_object_get_data (MonoObject *o)
{
return (guint8*)o + MONO_ABI_SIZEOF (MonoObject);
}
#define mono_handle_get_data_unsafe(handle) ((gpointer)((guint8*)MONO_HANDLE_RAW (handle) + MONO_ABI_SIZEOF (MonoObject)))
MONO_COMPONENT_API gpointer
mono_vtype_get_field_addr (gpointer vtype, MonoClassField *field);
#define MONO_OBJECT_SETREF_INTERNAL(obj,fieldname,value) do { \
mono_gc_wbarrier_set_field_internal ((MonoObject*)(obj), &((obj)->fieldname), (MonoObject*)value); \
/*(obj)->fieldname = (value);*/ \
} while (0)
/* This should be used if 's' can reside on the heap */
#define MONO_STRUCT_SETREF_INTERNAL(s,field,value) do { \
mono_gc_wbarrier_generic_store_internal (&((s)->field), (MonoObject*)(value)); \
} while (0)
static inline gunichar2*
mono_string_chars_internal (MonoString *s)
{
MONO_REQ_GC_UNSAFE_MODE;
return s->chars;
}
static inline int
mono_string_length_internal (MonoString *s)
{
MONO_REQ_GC_UNSAFE_MODE;
return s->length;
}
MonoString*
mono_string_empty_internal (MonoDomain *domain);
char *
mono_string_to_utf8len (MonoStringHandle s, gsize *utf8len, MonoError *error);
MONO_COMPONENT_API char*
mono_string_to_utf8_checked_internal (MonoString *string_obj, MonoError *error);
mono_bool
mono_string_equal_internal (MonoString *s1, MonoString *s2);
unsigned
mono_string_hash_internal (MonoString *s);
MONO_COMPONENT_API int
mono_object_hash_internal (MonoObject* obj);
ICALL_EXPORT
void
mono_value_copy_internal (void* dest, const void* src, MonoClass *klass);
void
mono_value_copy_array_internal (MonoArray *dest, int dest_idx, const void* src, int count);
MONO_PROFILER_API MonoVTable* mono_object_get_vtable_internal (MonoObject *obj);
MonoDomain*
mono_object_get_domain_internal (MonoObject *obj);
static inline gpointer
mono_object_unbox_internal (MonoObject *obj)
{
/* add assert for valuetypes? */
g_assert (m_class_is_valuetype (mono_object_class (obj)));
return mono_object_get_data (obj);
}
ICALL_EXPORT
void
mono_monitor_exit_internal (MonoObject *obj);
MONO_PROFILER_API unsigned mono_object_get_size_internal (MonoObject *o);
MONO_PROFILER_API MonoDomain* mono_vtable_domain_internal (MonoVTable *vtable);
MONO_PROFILER_API MonoClass* mono_vtable_class_internal (MonoVTable *vtable);
MONO_COMPONENT_API MonoMethod*
mono_object_get_virtual_method_internal (MonoObject *obj, MonoMethod *method);
MonoMethod*
mono_get_delegate_invoke_internal (MonoClass *klass);
MonoMethod*
mono_get_delegate_begin_invoke_internal (MonoClass *klass);
MonoMethod*
mono_get_delegate_end_invoke_internal (MonoClass *klass);
void
mono_unhandled_exception_internal (MonoObject *exc);
void
mono_print_unhandled_exception_internal (MonoObject *exc);
void
mono_raise_exception_internal (MonoException *ex);
void
mono_field_set_value_internal (MonoObject *obj, MonoClassField *field, void *value);
MONO_COMPONENT_API void
mono_field_static_set_value_internal (MonoVTable *vt, MonoClassField *field, void *value);
void
mono_field_get_value_internal (MonoObject *obj, MonoClassField *field, void *value);
MonoMethod* mono_get_context_capture_method (void);
guint8*
mono_runtime_get_aotid_arr (void);
/* GC handles support
*
* A handle can be created to refer to a managed object and either prevent it
* from being garbage collected or moved or to be able to know if it has been
* collected or not (weak references).
* mono_gchandle_new () is used to prevent an object from being garbage collected
* until mono_gchandle_free() is called. Use a TRUE value for the pinned argument to
* prevent the object from being moved (this should be avoided as much as possible
* and this should be used only for shorts periods of time or performance will suffer).
* To create a weakref use mono_gchandle_new_weakref (): track_resurrection should
* usually be false (see the GC docs for more details).
* mono_gchandle_get_target () can be used to get the object referenced by both kinds
* of handle: for a weakref handle, if an object has been collected, it will return NULL.
*/
MonoGCHandle
mono_gchandle_new_internal (MonoObject *obj, mono_bool pinned);
MONO_COMPONENT_API MonoGCHandle
mono_gchandle_new_weakref_internal (MonoObject *obj, mono_bool track_resurrection);
MONO_COMPONENT_API
MonoObject*
mono_gchandle_get_target_internal (MonoGCHandle gchandle);
MONO_COMPONENT_API void mono_gchandle_free_internal (MonoGCHandle gchandle);
/* Reference queue support
*
* A reference queue is used to get notifications of when objects are collected.
* Call mono_gc_reference_queue_new to create a new queue and pass the callback that
* will be invoked when registered objects are collected.
* Call mono_gc_reference_queue_add to register a pair of objects and data within a queue.
* The callback will be triggered once an object is both unreachable and finalized.
*/
MonoReferenceQueue*
mono_gc_reference_queue_new_internal (mono_reference_queue_callback callback);
void
mono_gc_reference_queue_free_internal (MonoReferenceQueue *queue);
mono_bool
mono_gc_reference_queue_add_internal (MonoReferenceQueue *queue, MonoObject *obj, void *user_data);
#define mono_gc_reference_queue_add_handle(queue, obj, user_data) \
(mono_gc_reference_queue_add_internal ((queue), MONO_HANDLE_RAW (MONO_HANDLE_CAST (MonoObject, obj)), (user_data)))
/* GC write barriers support */
void
mono_gc_wbarrier_set_field_internal (MonoObject *obj, void* field_ptr, MonoObject* value);
void
mono_gc_wbarrier_set_arrayref_internal (MonoArray *arr, void* slot_ptr, MonoObject* value);
void
mono_gc_wbarrier_arrayref_copy_internal (void* dest_ptr, const void* src_ptr, int count);
MONO_COMPONENT_API void
mono_gc_wbarrier_generic_store_internal (void volatile* ptr, MonoObject* value);
void
mono_gc_wbarrier_generic_store_atomic_internal (void *ptr, MonoObject *value);
ICALL_EXPORT
void
mono_gc_wbarrier_generic_nostore_internal (void* ptr);
void
mono_gc_wbarrier_value_copy_internal (void* dest, const void* src, int count, MonoClass *klass);
void
mono_gc_wbarrier_object_copy_internal (MonoObject* obj, MonoObject *src);
MONO_COMPONENT_API char *
mono_runtime_get_managed_cmd_line (void);
#ifdef HOST_WASM
int
mono_string_instance_is_interned (MonoString *str);
#endif
gpointer
mono_method_get_unmanaged_wrapper_ftnptr_internal (MonoMethod *method, gboolean only_unmanaged_callers_only, MonoError *error);
#endif /* __MONO_OBJECT_INTERNALS_H__ */
| /**
* \file
*/
#ifndef __MONO_OBJECT_INTERNALS_H__
#define __MONO_OBJECT_INTERNALS_H__
#include <mono/utils/mono-forward-internal.h>
#include <mono/metadata/object-forward.h>
#include <mono/metadata/handle-decl.h>
#include <mono/metadata/object.h>
#include <mono/metadata/threads.h>
#include <mono/metadata/reflection.h>
#include <mono/metadata/mempool.h>
#include <mono/metadata/class-internals.h>
#include <mono/metadata/threads-types.h>
#include <mono/metadata/handle.h>
#include <mono/metadata/abi-details.h>
#include "mono/utils/mono-compiler.h"
#include "mono/utils/mono-error.h"
#include "mono/utils/mono-error-internals.h"
#include "mono/utils/mono-machine.h"
#include "mono/utils/mono-stack-unwinding.h"
#include "mono/utils/mono-tls.h"
#include "mono/utils/mono-coop-mutex.h"
#include <mono/metadata/icalls.h>
/* Use this as MONO_CHECK_ARG (arg,expr,) in functions returning void */
#define MONO_CHECK_ARG(arg, expr, retval) do { \
if (G_UNLIKELY (!(expr))) \
{ \
if (0) { (void)(arg); } /* check if the name exists */ \
ERROR_DECL (error); \
mono_error_set_argument_format (error, #arg, "assertion `%s' failed", #expr); \
mono_error_set_pending_exception (error); \
return retval; \
} \
} while (0)
#define MONO_CHECK_ARG_NULL_NAMED(arg, argname, retval) do { \
if (G_UNLIKELY (!(arg))) \
{ \
ERROR_DECL (error); \
mono_error_set_argument_null (error, (argname), ""); \
mono_error_set_pending_exception (error); \
return retval; \
} \
} while (0)
/* Use this as MONO_CHECK_ARG_NULL (arg,) in functions returning void */
#define MONO_CHECK_ARG_NULL(arg, retval) do { \
if (G_UNLIKELY (!(arg))) \
{ \
mono_error_set_argument_null (error, #arg, ""); \
return retval; \
} \
} while (0)
/* Use this as MONO_CHECK_ARG_NULL_HANDLE (arg,) in functions returning void */
#define MONO_CHECK_ARG_NULL_HANDLE(arg, retval) do { \
if (G_UNLIKELY (MONO_HANDLE_IS_NULL (arg))) \
{ \
mono_error_set_argument_null (error, #arg, ""); \
return retval; \
} \
} while (0)
#define MONO_CHECK_ARG_NULL_HANDLE_NAMED(arg, argname, retval) do { \
if (G_UNLIKELY (MONO_HANDLE_IS_NULL (arg))) \
{ \
mono_error_set_argument_null (error, (argname), ""); \
return retval; \
} \
} while (0)
/* Use this as MONO_CHECK_NULL (arg,) in functions returning void */
#define MONO_CHECK_NULL(arg, retval) do { \
if (G_UNLIKELY (!(arg))) \
{ \
ERROR_DECL (error); \
mono_error_set_null_reference (error); \
mono_error_set_pending_exception (error); \
return retval; \
} \
} while (0)
MONO_COMPONENT_API MonoClass *
mono_class_create_array (MonoClass *element_class, uint32_t rank);
MonoArrayHandle
mono_array_new_specific_handle (MonoVTable *vtable, uintptr_t n, MonoError *error);
MonoArray*
mono_array_new_specific_checked (MonoVTable *vtable, uintptr_t n, MonoError *error);
/*
* Macros which cache.
* These should be used instead of the original versions.
*/
static inline MonoClass*
mono_array_class_get_cached_function (MonoClass *eclass, MonoClass **aclass)
{
MonoClass *a = *aclass;
if (a)
return a;
a = mono_class_create_array (eclass, 1);
g_assert (a);
if (a)
*aclass = a;
return *aclass;
}
// eclass should be a run-time constant
// If you get an error using this macro, you need to manually instantiate the MonoClass *foo ## _array cache.
// See for example object_class_array.
#define mono_array_class_get_cached(eclass) (mono_array_class_get_cached_function ((eclass), &(eclass ## _array)))
static inline MonoArray*
mono_array_new_cached_function (MonoClass *aclass, int size, MonoError *error)
{
MonoVTable *vtable = mono_class_vtable_checked (aclass, error);
MonoArray *arr = NULL;
if (is_ok (error))
arr = mono_array_new_specific_checked (vtable, size, error);
return arr;
}
// eclass should be a run-time constant
// If you get an error using this macro, you need to manually instantiate the MonoClass *foo ## _array cache.
// See for example object_class_array.
#define mono_array_new_cached(eclass, size, error) \
mono_array_new_cached_function (mono_array_class_get_cached (eclass), (size), (error))
static inline MonoArrayHandle
mono_array_new_cached_handle_function (MonoClass *aclass, int size, MonoError *error)
{
MonoVTable *vtable = mono_class_vtable_checked (aclass, error);
MonoArrayHandle arr = NULL_HANDLE_ARRAY;
if (is_ok (error))
arr = mono_array_new_specific_handle (vtable, size, error);
return arr;
}
// eclass should be a run-time constant
// If you get an error using this macro, you need to manually instantiate the MonoClass *foo ## _array cache.
// See for example object_class_array.
#define mono_array_new_cached_handle(eclass, size, error) \
mono_array_new_cached_handle_function (mono_array_class_get_cached (eclass), (size), (error))
typedef uint32_t mono_array_size_t;
typedef int32_t mono_array_lower_bound_t;
#define MONO_ARRAY_MAX_INDEX ((int32_t) 0x7fffffff)
#define MONO_ARRAY_MAX_SIZE ((uint32_t) 0xffffffff)
typedef struct {
mono_array_size_t length;
mono_array_lower_bound_t lower_bound;
} MonoArrayBounds;
struct _MonoArray {
MonoObject obj;
/* bounds is NULL for szarrays */
MonoArrayBounds *bounds;
/* total number of elements of the array */
mono_array_size_t max_length;
/* we use mono_64bitaligned_t to ensure proper alignment on platforms that need it */
mono_64bitaligned_t vector [MONO_ZERO_LEN_ARRAY];
};
/* match the layout of the managed definition of Span<T> */
#define MONO_DEFINE_SPAN_OF_T(name, type) \
typedef struct { \
type* _pointer; \
uint32_t _length; \
} name;
MONO_DEFINE_SPAN_OF_T (MonoSpanOfObjects, MonoObject*)
#define MONO_SIZEOF_MONO_ARRAY (MONO_STRUCT_OFFSET_CONSTANT (MonoArray, vector))
struct _MonoString {
MonoObject object;
int32_t length;
mono_unichar2 chars [MONO_ZERO_LEN_ARRAY];
};
#define MONO_SIZEOF_MONO_STRING (MONO_STRUCT_OFFSET (MonoString, chars))
#define mono_object_class(obj) (((MonoObject*)(obj))->vtable->klass)
#define mono_object_domain(obj) (((MonoObject*)(obj))->vtable->domain)
#define mono_string_chars_fast(s) ((mono_unichar2*)(s)->chars)
#define mono_string_length_fast(s) ((s)->length)
/**
* mono_array_length_internal:
* \param array a \c MonoArray*
* \returns the total number of elements in the array. This works for
* both vectors and multidimensional arrays.
*/
#define mono_array_length_internal(array) ((array)->max_length)
static inline
uintptr_t
mono_array_handle_length (MonoArrayHandle arr)
{
MONO_REQ_GC_UNSAFE_MODE;
return mono_array_length_internal (MONO_HANDLE_RAW (arr));
}
// Equivalent to mono_array_addr_with_size, except:
// 1. A macro instead of a function -- the types of size and index are open.
// 2. mono_array_addr_with_size could, but does not, do GC mode transitions.
#define mono_array_addr_with_size_fast(array,size,index) ( ((char*)(array)->vector) + (size) * (index) )
#define mono_array_addr_fast(array,type,index) ((type*)(void*) mono_array_addr_with_size_fast (array, sizeof (type), index))
#define mono_array_get_fast(array,type,index) ( *(type*)mono_array_addr_fast ((array), type, (index)) )
#define mono_array_set_fast(array,type,index,value) \
do { \
type *__p = (type *) mono_array_addr_fast ((array), type, (index)); \
*__p = (value); \
} while (0)
#define mono_array_setref_fast(array,index,value) \
do { \
void **__p = (void **) mono_array_addr_fast ((array), void*, (index)); \
mono_gc_wbarrier_set_arrayref_internal ((array), __p, (MonoObject*)(value)); \
/* *__p = (value);*/ \
} while (0)
#define mono_array_memcpy_refs_fast(dest,destidx,src,srcidx,count) \
do { \
void **__p = (void **) mono_array_addr_fast ((dest), void*, (destidx)); \
void **__s = mono_array_addr_fast ((src), void*, (srcidx)); \
mono_gc_wbarrier_arrayref_copy_internal (__p, __s, (count)); \
} while (0)
// _internal is like _fast, but preserves the preexisting subtlety of the closed types of things:
// int size
// uintptr_t idx
// in order to mimic non-_internal but without the GC mode transitions, or at least,
// to avoid the runtime using the embedding API, whether or not it has GC mode transitions.
static inline char*
mono_array_addr_with_size_internal (MonoArray *array, int size, uintptr_t idx)
{
return mono_array_addr_with_size_fast (array, size, idx);
}
#define mono_array_addr_internal(array,type,index) ((type*)(void*) mono_array_addr_with_size_internal (array, sizeof (type), index))
#define mono_array_get_internal(array,type,index) ( *(type*)mono_array_addr_internal ((array), type, (index)) )
#define mono_array_set_internal(array,type,index,value) \
do { \
type *__p = (type *) mono_array_addr_internal ((array), type, (index)); \
*__p = (value); \
} while (0)
#define mono_array_setref_internal(array,index,value) \
do { \
void **__p = (void **) mono_array_addr_internal ((array), void*, (index)); \
mono_gc_wbarrier_set_arrayref_internal ((array), __p, (MonoObject*)(value)); \
/* *__p = (value);*/ \
} while (0)
#define mono_array_memcpy_refs_internal(dest,destidx,src,srcidx,count) \
do { \
void **__p = (void **) mono_array_addr_internal ((dest), void*, (destidx)); \
void **__s = mono_array_addr_internal ((src), void*, (srcidx)); \
mono_gc_wbarrier_arrayref_copy_internal (__p, __s, (count)); \
} while (0)
static inline gboolean
mono_handle_array_has_bounds (MonoArrayHandle arr)
{
return MONO_HANDLE_GETVAL (arr, bounds) != NULL;
}
static inline void
mono_handle_array_get_bounds_dim (MonoArrayHandle arr, gint32 dim, MonoArrayBounds *bounds)
{
*bounds = MONO_HANDLE_GETVAL (arr, bounds [dim]);
}
#define mono_span_length(span) (span->_length)
#define mono_span_get(span,type,idx) (type)(!span->_pointer ? (type)0 : span->_pointer[idx])
#define mono_span_addr(span,type,idx) (type*)(span->_pointer + idx)
#define mono_span_setref(span,index,value) \
do { \
void **__p = (void **) mono_span_addr ((span), void*, (index)); \
mono_gc_wbarrier_generic_store_internal (__p, (MonoObject*)(value)); \
/* *__p = (value);*/ \
} while (0)
static inline MonoSpanOfObjects
mono_span_create_from_object_array (MonoArray *arr) {
MonoSpanOfObjects span;
if (arr != NULL) {
span._length = (int32_t)mono_array_length_internal (arr);
span._pointer = mono_array_addr_fast (arr, MonoObject*, 0);
} else {
span._length = 0;
span._pointer = NULL;
}
return span;
}
typedef struct {
MonoObject obj;
} MonoMarshalByRefObject;
TYPED_HANDLE_DECL (MonoMarshalByRefObject);
/* This is a copy of System.AppDomain */
struct _MonoAppDomain {
MonoMarshalByRefObject mbr;
};
/* Safely access System.AppDomain from native code */
TYPED_HANDLE_DECL (MonoAppDomain);
typedef struct _MonoStringBuilder MonoStringBuilder;
TYPED_HANDLE_DECL (MonoStringBuilder);
struct _MonoStringBuilder {
MonoObject object;
MonoArray *chunkChars;
MonoStringBuilder* chunkPrevious; // Link to the block logically before this block
int chunkLength; // The index in ChunkChars that represent the end of the block
int chunkOffset; // The logial offset (sum of all characters in previous blocks)
int maxCapacity;
};
static inline int
mono_string_builder_capacity (MonoStringBuilderHandle sbh)
{
MonoStringBuilder *sb = MONO_HANDLE_RAW (sbh);
return sb->chunkOffset + sb->chunkChars->max_length;
}
static inline int
mono_string_builder_string_length (MonoStringBuilderHandle sbh)
{
MonoStringBuilder *sb = MONO_HANDLE_RAW (sbh);
return sb->chunkOffset + sb->chunkLength;
}
typedef struct {
MonoType *type;
gpointer value;
MonoClass *klass;
} MonoTypedRef;
typedef struct {
gpointer args;
} MonoArgumentHandle;
typedef struct {
MonoMethodSignature *sig;
gpointer args;
gint32 next_arg;
gint32 num_args;
} MonoArgIterator;
struct _MonoException {
MonoObject object;
MonoString *class_name;
MonoString *message;
MonoObject *_data;
MonoObject *inner_ex;
MonoString *help_link;
/* Stores the IPs and the generic sharing infos
(vtable/MRGCTX) of the frames. */
MonoArray *trace_ips;
MonoString *stack_trace;
MonoString *remote_stack_trace;
gint32 remote_stack_index;
/* Dynamic methods referenced by the stack trace */
MonoArray *dynamic_methods;
gint32 hresult;
MonoString *source;
MonoObject *serialization_manager;
MonoObject *captured_traces;
MonoArray *native_trace_ips;
gint32 caught_in_unmanaged;
};
typedef struct {
MonoException base;
} MonoSystemException;
TYPED_HANDLE_DECL (MonoSystemException);
typedef struct {
MonoObject object;
MonoObject *async_state;
MonoObject *handle;
MonoObject *async_delegate;
gpointer *data;
MonoObject *object_data;
MonoBoolean sync_completed;
MonoBoolean completed;
MonoBoolean endinvoke_called;
MonoObject *async_callback;
MonoObject *execution_context;
MonoObject *original_context;
gint64 add_time;
} MonoAsyncResult;
TYPED_HANDLE_DECL (MonoAsyncResult);
typedef struct {
MonoMarshalByRefObject object;
gpointer handle;
} MonoWaitHandle;
TYPED_HANDLE_DECL (MonoWaitHandle);
/* System.Threading.StackCrawlMark */
/*
* This type is used to identify the method where execution has entered
* the BCL during stack walks. The outermost public method should
* define it like this:
* StackCrawlMark stackMark = StackCrawlMark.LookForMyCaller;
* and pass the stackMark as a byref argument down the call chain
* until it reaches an icall.
*/
typedef enum {
STACK_CRAWL_ME = 0,
STACK_CRAWL_CALLER = 1,
STACK_CRAWL_CALLERS_CALLER = 2,
STACK_CRAWL_THREAD = 3
} MonoStackCrawlMark;
/* MonoSafeHandle is in class-internals.h. */
/* Safely access System.Net.Sockets.SafeSocketHandle from native code */
TYPED_HANDLE_DECL (MonoSafeHandle);
/* This corresponds to System.Type */
struct _MonoReflectionType {
MonoObject object;
MonoType *type;
};
/* Safely access System.Type from native code */
TYPED_HANDLE_DECL (MonoReflectionType);
/* This corresponds to System.Runtime.CompilerServices.QCallTypeHandle */
struct _MonoQCallTypeHandle {
gpointer _ptr;
MonoType *type;
};
typedef struct _MonoQCallTypeHandle MonoQCallTypeHandle;
/* This corresponds to System.Runtime.CompilerServices.QCallAssembly */
struct _MonoQCallAssemblyHandle {
gpointer _ptr;
MonoAssembly *assembly;
};
typedef struct _MonoQCallAssemblyHandle MonoQCallAssemblyHandle;
typedef struct {
MonoObject object;
MonoReflectionType *class_to_proxy;
MonoObject *context;
MonoObject *unwrapped_server;
gint32 target_domain_id;
MonoString *target_uri;
MonoObject *object_identity;
MonoObject *obj_TP;
MonoObject *stub_data;
} MonoRealProxy;
/* Safely access System.Runtime.Remoting.Proxies.RealProxy from native code */
TYPED_HANDLE_DECL (MonoRealProxy);
typedef struct _MonoIUnknown MonoIUnknown;
typedef struct _MonoIUnknownVTable MonoIUnknownVTable;
/* STDCALL on windows, CDECL everywhere else to work with XPCOM and MainWin COM */
#ifdef HOST_WIN32
#define STDCALL __stdcall
#else
#define STDCALL
#endif
struct _MonoIUnknownVTable
{
int (STDCALL *QueryInterface)(MonoIUnknown *pUnk, gconstpointer riid, gpointer* ppv);
int (STDCALL *AddRef)(MonoIUnknown *pUnk);
int (STDCALL *Release)(MonoIUnknown *pUnk);
};
struct _MonoIUnknown
{
const MonoIUnknownVTable *vtable;
};
typedef struct {
MonoMarshalByRefObject object;
MonoIUnknown *iunknown;
GHashTable* itf_hash;
MonoObject *synchronization_context;
} MonoComObject;
TYPED_HANDLE_DECL (MonoComObject);
typedef struct {
MonoRealProxy real_proxy;
MonoComObject *com_object;
gint32 ref_count;
} MonoComInteropProxy;
TYPED_HANDLE_DECL (MonoComInteropProxy);
typedef struct {
MonoObject object;
MonoRealProxy *rp;
MonoRemoteClass *remote_class;
MonoBoolean custom_type_info;
} MonoTransparentProxy;
/* Safely access System.Runtime.Remoting.Proxies.TransparentProxy from native code */
TYPED_HANDLE_DECL (MonoTransparentProxy);
typedef struct {
MonoObject obj;
MonoReflectionMethod *method;
MonoArray *args;
MonoArray *names;
MonoArray *arg_types;
MonoObject *ctx;
MonoObject *rval;
MonoObject *exc;
MonoAsyncResult *async_result;
guint32 call_type;
} MonoMethodMessage;
TYPED_HANDLE_DECL (MonoMethodMessage);
/* Keep in sync with the System.MonoAsyncCall */
typedef struct {
MonoObject object;
MonoMethodMessage *msg;
MonoMethod *cb_method;
MonoDelegate *cb_target;
MonoObject *state;
MonoObject *res;
MonoArray *out_args;
} MonoAsyncCall;
TYPED_HANDLE_DECL (MonoAsyncCall);
typedef struct {
MonoObject obj;
MonoArray *frames;
MonoArray *captured_traces;
MonoBoolean debug_info;
} MonoStackTrace;
TYPED_HANDLE_DECL (MonoStackTrace);
typedef struct {
MonoObject obj;
gint32 il_offset;
gint32 native_offset;
gint64 method_address;
gint32 method_index;
MonoReflectionMethod *method;
MonoString *filename;
gint32 line;
gint32 column;
MonoString *internal_method_name;
} MonoStackFrame;
TYPED_HANDLE_DECL (MonoStackFrame);
typedef enum {
MONO_THREAD_FLAG_DONT_MANAGE = 1, // Don't wait for or abort this thread
MONO_THREAD_FLAG_NAME_SET = 2, // Thread name set from managed code
MONO_THREAD_FLAG_CLEANUP_FROM_NATIVE = 4, // Thread initialized in native so clean up in native
} MonoThreadFlags;
struct _MonoThreadInfo;
typedef struct MonoThreadName {
char* volatile chars; // null check outside of lock
gint32 free; // bool
gint32 length;
} MonoThreadName;
void
mono_gstring_append_thread_name (GString*, MonoInternalThread*);
struct _MonoInternalThread {
MonoObject obj;
volatile int lock_thread_id; /* to be used as the pre-shifted thread id in thin locks */
MonoThreadHandle *handle;
gpointer native_handle;
MonoThreadName name;
guint32 state; /* must be accessed while longlived->synch_cs is locked */
MonoException *abort_exc;
MonoGCHandle abort_state_handle;
guint64 tid; /* This is accessed as a gsize in the code (so it can hold a 64bit pointer on systems that need it), but needs to reserve 64 bits of space on all machines as it corresponds to a field in managed code */
gsize debugger_thread; // FIXME switch to bool as soon as CI testing with corlib version bump works
gpointer *static_data;
struct _MonoThreadInfo *thread_info;
/* This is modified using atomic ops, so keep it a gint32 */
gint32 __interruption_requested;
/* data that must live as long as this managed object is not finalized
* or as long as the underlying thread is attached, whichever is
* longer */
MonoLongLivedThreadData *longlived;
MonoBoolean threadpool_thread;
guint8 apartment_state;
gint32 managed_id;
guint32 small_id;
MonoThreadManageCallback manage_callback;
gsize flags;
gpointer thread_pinning_ref;
gint32 priority;
GPtrArray *owned_mutexes;
MonoOSEvent *suspended;
gint32 self_suspended; // TRUE | FALSE
gsize thread_state;
/* Points to self, set when starting up/attaching */
struct _MonoInternalThread *internal_thread;
MonoException *pending_exception;
/* This is used only to check that we are in sync between the representation
* of MonoInternalThread in native and InternalThread in managed
*
* DO NOT RENAME! DO NOT ADD FIELDS AFTER! */
gpointer last;
};
typedef struct {
guint32 state;
MonoObject *additional;
} MonoStreamingContext;
typedef struct {
MonoObject object;
guint32 intType;
} MonoInterfaceTypeAttribute;
typedef struct {
MonoObject object;
guint32 intType;
} MonoClassInterfaceAttribute;
/* Safely access System.Delegate from native code */
TYPED_HANDLE_DECL (MonoDelegate);
typedef void (*InterpJitInfoFunc) (MonoJitInfo *ji, gpointer user_data);
/*
* Callbacks supplied by the runtime and called by the modules in metadata/
* This interface is easier to extend than adding a new function type +
* a new 'install' function for every callback.
*/
typedef struct {
gpointer (*create_ftnptr) (gpointer addr);
gpointer (*get_addr_from_ftnptr) (gpointer descr);
char* (*get_runtime_build_info) (void);
const char* (*get_runtime_build_version) (void);
gpointer (*get_vtable_trampoline) (MonoVTable *vtable, int slot_index);
gpointer (*get_imt_trampoline) (MonoVTable *vtable, int imt_slot_index);
gboolean (*imt_entry_inited) (MonoVTable *vtable, int imt_slot_index);
void (*set_cast_details) (MonoClass *from, MonoClass *to);
void (*debug_log) (int level, MonoString *category, MonoString *message);
gboolean (*debug_log_is_enabled) (void);
void (*init_delegate) (MonoDelegateHandle delegate, MonoObjectHandle target, gpointer addr, MonoMethod *method, MonoError *error);
MonoObject* (*runtime_invoke) (MonoMethod *method, void *obj, void **params, MonoObject **exc, MonoError *error);
void* (*compile_method) (MonoMethod *method, MonoError *error);
gpointer (*create_jit_trampoline) (MonoMethod *method, MonoError *error);
/* used to free a dynamic method */
void (*free_method) (MonoMethod *method);
gpointer (*create_delegate_trampoline) (MonoClass *klass);
GHashTable *(*get_weak_field_indexes) (MonoImage *image);
gboolean (*is_interpreter_enabled) (void);
void (*init_mem_manager)(MonoMemoryManager*);
void (*free_mem_manager)(MonoMemoryManager*);
void (*metadata_update_published) (MonoAssemblyLoadContext *alc, uint32_t generation);
void (*get_jit_stats)(gint64 *methods_compiled, gint64 *cil_code_size_bytes, gint64 *native_code_size_bytes, gint64 *jit_time);
void (*get_exception_stats)(guint32 *exception_count);
// Same as compile_method, but returns a MonoFtnDesc in llvmonly mode
gpointer (*get_ftnptr)(MonoMethod *method, MonoError *error);
void (*interp_jit_info_foreach)(InterpJitInfoFunc func, gpointer user_data);
gboolean (*interp_sufficient_stack)(gsize size);
} MonoRuntimeCallbacks;
typedef gboolean (*MonoInternalStackWalk) (MonoStackFrameInfo *frame, MonoContext *ctx, gpointer data);
typedef gboolean (*MonoInternalExceptionFrameWalk) (MonoMethod *method, gpointer ip, size_t native_offset, gboolean managed, gpointer user_data);
typedef struct {
void (*mono_walk_stack_with_ctx) (MonoInternalStackWalk func, MonoContext *ctx, MonoUnwindOptions options, void *user_data);
void (*mono_walk_stack_with_state) (MonoInternalStackWalk func, MonoThreadUnwindState *state, MonoUnwindOptions options, void *user_data);
void (*mono_raise_exception) (MonoException *ex);
void (*mono_raise_exception_with_ctx) (MonoException *ex, MonoContext *ctx);
gboolean (*mono_exception_walk_trace) (MonoException *ex, MonoInternalExceptionFrameWalk func, gpointer user_data);
gboolean (*mono_install_handler_block_guard) (MonoThreadUnwindState *unwind_state);
void (*mono_uninstall_current_handler_block_guard) (void);
gboolean (*mono_current_thread_has_handle_block_guard) (void);
gboolean (*mono_above_abort_threshold) (void);
void (*mono_clear_abort_threshold) (void);
void (*mono_reraise_exception) (MonoException *ex);
} MonoRuntimeExceptionHandlingCallbacks;
MONO_COLD void mono_set_pending_exception (MonoException *exc);
void
mono_delegate_ctor (MonoObjectHandle this_obj, MonoObjectHandle target, gpointer addr, MonoMethod *method, MonoError *error);
MonoMethod *
mono_get_delegate_invoke_checked (MonoClass *klass, MonoError *error);
MonoMethod *
mono_get_delegate_begin_invoke_checked (MonoClass *klass, MonoError *error);
MonoMethod *
mono_get_delegate_end_invoke_checked (MonoClass *klass, MonoError *error);
void
mono_runtime_free_method (MonoMethod *method);
void
mono_install_callbacks (MonoRuntimeCallbacks *cbs);
MONO_COMPONENT_API
MonoRuntimeCallbacks*
mono_get_runtime_callbacks (void);
void
mono_install_eh_callbacks (MonoRuntimeExceptionHandlingCallbacks *cbs);
MONO_COMPONENT_API
MonoRuntimeExceptionHandlingCallbacks *
mono_get_eh_callbacks (void);
void
mono_raise_exception_deprecated (MonoException *ex);
void
mono_reraise_exception_deprecated (MonoException *ex);
void
mono_raise_exception_with_context (MonoException *ex, MonoContext *ctx);
void
mono_type_initialization_init (void);
int
mono_thread_kill (MonoInternalThread *thread, int signal);
MonoNativeTlsKey
mono_thread_get_tls_key (void);
gint32
mono_thread_get_tls_offset (void);
MonoNativeTlsKey
mono_domain_get_tls_key (void);
gint32
mono_domain_get_tls_offset (void);
/* Reflection and Reflection.Emit support */
/*
* Handling System.Type objects:
*
* Fields defined as System.Type in managed code should be defined as MonoObject*
* in unmanaged structures, and the monotype_cast () function should be used for
* casting them to MonoReflectionType* to avoid crashes/security issues when
* encountering instances of user defined subclasses of System.Type.
*/
#define IS_MONOTYPE(obj) (!(obj) || (m_class_get_image (mono_object_class ((obj))) == mono_defaults.corlib && ((MonoReflectionType*)(obj))->type != NULL))
#define IS_MONOTYPE_HANDLE(obj) IS_MONOTYPE (MONO_HANDLE_RAW (obj))
/* This should be used for accessing members of Type[] arrays */
#define mono_type_array_get(arr,index) monotype_cast (mono_array_get_internal ((arr), gpointer, (index)))
/*
* Cast an object to MonoReflectionType, making sure it is a System.MonoType or
* a subclass of it.
*/
static inline MonoReflectionType*
monotype_cast (MonoObject *obj)
{
g_assert (IS_MONOTYPE (obj));
return (MonoReflectionType*)obj;
}
/*
* The following structure must match the C# implementation in our corlib.
*/
struct _MonoReflectionMethod {
MonoObject object;
MonoMethod *method;
MonoString *name;
MonoReflectionType *reftype;
};
/* Safely access System.Reflection.MonoMethod from native code */
TYPED_HANDLE_DECL (MonoReflectionMethod);
struct _MonoDelegate {
MonoObject object;
/* The compiled code of the target method */
gpointer method_ptr;
/* The invoke code */
gpointer invoke_impl;
MonoObject *target;
MonoMethod *method;
gpointer delegate_trampoline;
/* Extra argument passed to the target method in llvmonly mode */
gpointer extra_arg;
/*
* If non-NULL, this points to a memory location which stores the address of
* the compiled code of the method, or NULL if it is not yet compiled.
*/
guint8 **method_code;
gpointer interp_method;
/* Interp method that is executed when invoking the delegate */
gpointer interp_invoke_impl;
MonoReflectionMethod *method_info;
MonoReflectionMethod *original_method_info;
MonoObject *data;
MonoBoolean method_is_virtual;
};
typedef struct _MonoMulticastDelegate MonoMulticastDelegate;
struct _MonoMulticastDelegate {
MonoDelegate delegate;
MonoArray *delegates;
};
/* Safely access System.MulticastDelegate from native code */
TYPED_HANDLE_DECL (MonoMulticastDelegate);
struct _MonoReflectionField {
MonoObject object;
MonoClass *klass;
MonoClassField *field;
MonoString *name;
MonoReflectionType *type;
guint32 attrs;
};
/* Safely access System.Reflection.MonoField from native code */
TYPED_HANDLE_DECL (MonoReflectionField);
struct _MonoReflectionProperty {
MonoObject object;
MonoClass *klass;
MonoProperty *property;
};
/* Safely access System.Reflection.MonoProperty from native code */
TYPED_HANDLE_DECL (MonoReflectionProperty);
/*This is System.EventInfo*/
struct _MonoReflectionEvent {
MonoObject object;
};
/* Safely access System.Reflection.EventInfo from native code */
TYPED_HANDLE_DECL (MonoReflectionEvent);
typedef struct {
MonoReflectionEvent object;
MonoClass *klass;
MonoEvent *event;
} MonoReflectionMonoEvent;
/* Safely access Systme.Reflection.MonoEvent from native code */
TYPED_HANDLE_DECL (MonoReflectionMonoEvent);
typedef struct {
MonoObject object;
} MonoReflectionParameter;
/* Safely access System.Reflection.ParameterInfo from native code */
TYPED_HANDLE_DECL (MonoReflectionParameter);
struct _MonoReflectionMethodBody {
MonoObject object;
};
/* Safely access System.Reflection.MethodBody from native code */
TYPED_HANDLE_DECL (MonoReflectionMethodBody);
/* System.RuntimeAssembly */
struct _MonoReflectionAssembly {
MonoObject object;
MonoAssembly *assembly;
};
typedef struct {
MonoReflectionType *utype;
MonoArray *values;
MonoArray *names;
} MonoEnumInfo;
typedef struct {
MonoReflectionType *parent;
MonoReflectionType *ret;
guint32 attrs;
guint32 implattrs;
guint32 callconv;
} MonoMethodInfo;
typedef struct {
MonoReflectionType *parent;
MonoReflectionType *declaring_type;
MonoString *name;
MonoReflectionMethod *get;
MonoReflectionMethod *set;
guint32 attrs;
} MonoPropertyInfo;
typedef struct {
MonoReflectionType *declaring_type;
MonoReflectionType *reflected_type;
MonoString *name;
MonoReflectionMethod *add_method;
MonoReflectionMethod *remove_method;
MonoReflectionMethod *raise_method;
guint32 attrs;
MonoArray *other_methods;
} MonoEventInfo;
typedef struct {
MonoObject *member;
gint32 code_pos;
} MonoReflectionILTokenInfo;
typedef struct {
MonoObject object;
MonoArray *code;
gint32 code_len;
gint32 max_stack;
gint32 cur_stack;
MonoArray *locals;
MonoArray *ex_handlers;
gint32 num_token_fixups;
MonoArray *token_fixups;
} MonoReflectionILGen;
typedef struct {
MonoArray *handlers;
gint32 start;
gint32 len;
gint32 label;
} MonoILExceptionInfo;
typedef struct {
MonoObject *extype;
gint32 type;
gint32 start;
gint32 len;
gint32 filter_offset;
} MonoILExceptionBlock;
typedef struct {
MonoObject object;
MonoObject *catch_type;
gint32 filter_offset;
gint32 flags;
gint32 try_offset;
gint32 try_length;
gint32 handler_offset;
gint32 handler_length;
} MonoReflectionExceptionHandlingClause;
/* Safely access System.Reflection.ExceptionHandlingClause from native code */
TYPED_HANDLE_DECL (MonoReflectionExceptionHandlingClause);
typedef struct {
MonoObject object;
MonoReflectionType *local_type;
MonoBoolean is_pinned;
guint16 local_index;
} MonoReflectionLocalVariableInfo;
/* Safely access System.Reflection.LocalVariableInfo from native code */
TYPED_HANDLE_DECL (MonoReflectionLocalVariableInfo);
typedef struct {
/*
* Must have the same layout as MonoReflectionLocalVariableInfo, since
* LocalBuilder inherits from it under net 2.0.
*/
MonoObject object;
MonoObject *type;
MonoBoolean is_pinned;
guint16 local_index;
MonoString *name;
} MonoReflectionLocalBuilder;
typedef struct {
MonoObject object;
gint32 count;
gint32 type;
gint32 eltype;
MonoString *guid;
MonoString *mcookie;
MonoString *marshaltype;
MonoObject *marshaltyperef;
gint32 param_num;
MonoBoolean has_size;
} MonoReflectionMarshal;
typedef struct {
MonoObject object;
MonoObject* methodb;
MonoString *name;
MonoArray *cattrs;
MonoReflectionMarshal *marshal_info;
guint32 attrs;
int position;
guint32 table_idx;
MonoObject *def_value;
} MonoReflectionParamBuilder;
typedef struct {
MonoObject object;
MonoMethod *mhandle;
MonoReflectionILGen *ilgen;
MonoArray *parameters;
guint32 attrs;
guint32 iattrs;
guint32 table_idx;
guint32 call_conv;
MonoObject *type;
MonoArray *pinfo;
MonoArray *cattrs;
MonoBoolean init_locals;
MonoArray *param_modreq;
MonoArray *param_modopt;
} MonoReflectionCtorBuilder;
/* Safely access System.Reflection.Emit.ConstructorBuilder from native code */
TYPED_HANDLE_DECL (MonoReflectionCtorBuilder);
typedef struct {
MonoObject object;
MonoMethod *mhandle;
MonoObject *rtype;
MonoArray *parameters;
guint32 attrs;
guint32 iattrs;
MonoString *name;
guint32 table_idx;
MonoArray *code;
MonoReflectionILGen *ilgen;
MonoObject *type;
MonoArray *pinfo;
MonoArray *cattrs;
MonoArray *override_methods;
MonoString *dll;
MonoString *dllentry;
guint32 charset;
guint32 extra_flags;
guint32 native_cc;
guint32 call_conv;
MonoBoolean init_locals;
MonoGenericContainer *generic_container;
MonoArray *generic_params;
MonoArray *return_modreq;
MonoArray *return_modopt;
MonoArray *param_modreq;
MonoArray *param_modopt;
} MonoReflectionMethodBuilder;
/* Safely access System.Reflection.Emit.MethodBuilder from native code */
TYPED_HANDLE_DECL (MonoReflectionMethodBuilder);
typedef struct {
MonoObject object;
MonoMethod *mhandle;
MonoReflectionType *parent;
MonoReflectionType *ret;
MonoArray *parameters;
MonoString *name;
guint32 table_idx;
guint32 call_conv;
} MonoReflectionArrayMethod;
/* Safely access System.Reflection.Emit.MonoArrayMethod from native code */
TYPED_HANDLE_DECL (MonoReflectionArrayMethod);
typedef struct {
MonoReflectionAssembly assembly;
MonoDynamicAssembly *dynamic_assembly;
MonoArray *modules;
MonoString *name;
MonoArray *cattrs;
MonoString *version;
MonoString *culture;
MonoArray *public_key_token;
MonoArray *loaded_modules;
guint32 access;
} MonoReflectionAssemblyBuilder;
/* Safely access System.Reflection.Emit.AssemblyBuilder from native code */
TYPED_HANDLE_DECL (MonoReflectionAssemblyBuilder);
typedef struct {
MonoObject object;
guint32 attrs;
MonoObject *type;
MonoString *name;
MonoObject *def_value;
gint32 offset;
MonoReflectionType *typeb;
MonoArray *rva_data;
MonoArray *cattrs;
MonoReflectionMarshal *marshal_info;
MonoClassField *handle;
MonoArray *modreq;
MonoArray *modopt;
} MonoReflectionFieldBuilder;
/* Safely access System.Reflection.Emit.FieldBuilder from native code */
TYPED_HANDLE_DECL (MonoReflectionFieldBuilder);
typedef struct {
MonoObject object;
guint32 attrs;
MonoString *name;
MonoObject *type;
MonoArray *parameters;
MonoArray *cattrs;
MonoObject *def_value;
MonoReflectionMethodBuilder *set_method;
MonoReflectionMethodBuilder *get_method;
gint32 table_idx;
MonoObject *type_builder;
MonoArray *returnModReq;
MonoArray *returnModOpt;
MonoArray *paramModReq;
MonoArray *paramModOpt;
guint32 call_conv;
} MonoReflectionPropertyBuilder;
/* System.RuntimeModule */
struct _MonoReflectionModule {
MonoObject obj;
MonoImage *image;
MonoReflectionAssembly *assembly;
MonoString *fqname;
MonoString *name;
MonoString *scopename;
MonoBoolean is_resource;
guint32 token;
};
/* Safely access System.Reflection.Module from native code */
TYPED_HANDLE_DECL (MonoReflectionModule);
typedef struct {
MonoReflectionModule module;
MonoDynamicImage *dynamic_image;
gint32 num_types;
MonoArray *types;
MonoArray *cattrs;
guint32 table_idx;
MonoReflectionAssemblyBuilder *assemblyb;
gboolean is_main;
MonoArray *resources;
GHashTable *unparented_classes;
MonoArray *table_indexes;
} MonoReflectionModuleBuilder;
/* Safely acess System.Reflection.Emit.ModuleBuidler from native code */
TYPED_HANDLE_DECL (MonoReflectionModuleBuilder);
typedef enum {
MonoTypeBuilderNew = 0,
MonoTypeBuilderEntered = 1,
MonoTypeBuilderFinished = 2
} MonoTypeBuilderState;
struct _MonoReflectionTypeBuilder {
MonoReflectionType type;
MonoString *name;
MonoString *nspace;
MonoObject *parent;
MonoReflectionType *nesting_type;
MonoArray *interfaces;
gint32 num_methods;
MonoArray *methods;
MonoArray *ctors;
MonoArray *properties;
gint32 num_fields;
MonoArray *fields;
MonoArray *events;
MonoArray *cattrs;
MonoArray *subtypes;
guint32 attrs;
guint32 table_idx;
MonoReflectionModuleBuilder *module;
gint32 class_size;
gint32 packing_size;
MonoGenericContainer *generic_container;
MonoArray *generic_params;
MonoReflectionType *created;
gint32 is_byreflike_set;
gint32 state;
};
typedef struct {
MonoReflectionType type;
MonoReflectionType *element_type;
gint32 rank;
} MonoReflectionArrayType;
/* Safely access System.Reflection.Emit.ArrayType (in DerivedTypes.cs) from native code */
TYPED_HANDLE_DECL (MonoReflectionArrayType);
typedef struct {
MonoReflectionType type;
MonoReflectionType *element_type;
} MonoReflectionDerivedType;
/* Safely access System.Reflection.Emit.SymbolType and subclasses (in DerivedTypes.cs) from native code */
TYPED_HANDLE_DECL (MonoReflectionDerivedType);
typedef struct {
MonoReflectionType type;
MonoReflectionTypeBuilder *tbuilder;
MonoReflectionMethodBuilder *mbuilder;
MonoString *name;
guint32 index;
MonoReflectionType *base_type;
MonoArray *iface_constraints;
MonoArray *cattrs;
guint32 attrs;
} MonoReflectionGenericParam;
/* Safely access System.Reflection.Emit.GenericTypeParameterBuilder from native code */
TYPED_HANDLE_DECL (MonoReflectionGenericParam);
typedef struct {
MonoReflectionType type;
MonoReflectionTypeBuilder *tb;
} MonoReflectionEnumBuilder;
/* Safely access System.Reflection.Emit.EnumBuilder from native code */
TYPED_HANDLE_DECL (MonoReflectionEnumBuilder);
typedef struct _MonoReflectionGenericClass MonoReflectionGenericClass;
struct _MonoReflectionGenericClass {
MonoReflectionType type;
MonoReflectionType *generic_type; /*Can be either a MonoType or a TypeBuilder*/
MonoArray *type_arguments;
};
/* Safely access System.Reflection.Emit.TypeBuilderInstantiation from native code */
TYPED_HANDLE_DECL (MonoReflectionGenericClass);
typedef struct {
MonoObject obj;
MonoString *name;
MonoReflectionType *type;
MonoReflectionTypeBuilder *typeb;
MonoArray *cattrs;
MonoReflectionMethodBuilder *add_method;
MonoReflectionMethodBuilder *remove_method;
MonoReflectionMethodBuilder *raise_method;
MonoArray *other_methods;
guint32 attrs;
guint32 table_idx;
} MonoReflectionEventBuilder;
typedef struct {
MonoObject obj;
MonoReflectionMethod *ctor;
MonoArray *data;
} MonoReflectionCustomAttr;
TYPED_HANDLE_DECL (MonoReflectionCustomAttr);
typedef struct {
MonoObject object;
guint32 utype;
gint32 safe_array_subtype;
MonoReflectionType *marshal_safe_array_user_defined_subtype;
gint32 IidParameterIndex;
guint32 array_subtype;
gint16 size_param_index;
gint32 size_const;
MonoString *marshal_type;
MonoReflectionType *marshal_type_ref;
MonoString *marshal_cookie;
} MonoReflectionMarshalAsAttribute;
/* Safely access System.Runtime.InteropServices.MarshalAsAttribute */
TYPED_HANDLE_DECL (MonoReflectionMarshalAsAttribute);
typedef struct {
MonoObject object;
gint32 call_conv;
gint32 charset;
MonoBoolean best_fit_mapping;
MonoBoolean throw_on_unmappable;
MonoBoolean set_last_error;
} MonoReflectionUnmanagedFunctionPointerAttribute;
typedef struct {
MonoObject object;
MonoString *guid;
} MonoReflectionGuidAttribute;
typedef struct {
MonoObject object;
MonoMethod *mhandle;
MonoString *name;
MonoReflectionType *rtype;
MonoArray *parameters;
guint32 attrs;
guint32 call_conv;
MonoReflectionModule *module;
MonoBoolean skip_visibility;
MonoBoolean init_locals;
MonoReflectionILGen *ilgen;
gint32 nrefs;
MonoArray *refs;
GSList *referenced_by;
MonoReflectionType *owner;
} MonoReflectionDynamicMethod;
/* Safely access System.Reflection.Emit.DynamicMethod from native code */
TYPED_HANDLE_DECL (MonoReflectionDynamicMethod);
typedef struct {
MonoObject object;
MonoReflectionModuleBuilder *module;
MonoArray *arguments;
guint32 type;
MonoReflectionType *return_type;
guint32 call_conv;
guint32 unmanaged_call_conv;
MonoArray *modreqs;
MonoArray *modopts;
} MonoReflectionSigHelper;
/* Safely access System.Reflection.Emit.SignatureHelper from native code */
TYPED_HANDLE_DECL (MonoReflectionSigHelper);
typedef struct {
MonoObject object;
MonoBoolean visible;
} MonoReflectionComVisibleAttribute;
typedef struct {
MonoObject object;
MonoReflectionType *type;
} MonoReflectionComDefaultInterfaceAttribute;
enum {
RESOURCE_LOCATION_EMBEDDED = 1,
RESOURCE_LOCATION_ANOTHER_ASSEMBLY = 2,
RESOURCE_LOCATION_IN_MANIFEST = 4
};
typedef struct {
MonoObject object;
MonoReflectionAssembly *assembly;
MonoString *filename;
guint32 location;
} MonoManifestResourceInfo;
/* Safely access System.Reflection.ManifestResourceInfo from native code */
TYPED_HANDLE_DECL (MonoManifestResourceInfo);
/* A boxed IntPtr */
typedef struct {
MonoObject object;
gpointer m_value;
} MonoIntPtr;
/* Keep in sync with System.GenericParameterAttributes */
typedef enum {
GENERIC_PARAMETER_ATTRIBUTE_NON_VARIANT = 0,
GENERIC_PARAMETER_ATTRIBUTE_COVARIANT = 1,
GENERIC_PARAMETER_ATTRIBUTE_CONTRAVARIANT = 2,
GENERIC_PARAMETER_ATTRIBUTE_VARIANCE_MASK = 3,
GENERIC_PARAMETER_ATTRIBUTE_NO_SPECIAL_CONSTRAINT = 0,
GENERIC_PARAMETER_ATTRIBUTE_REFERENCE_TYPE_CONSTRAINT = 4,
GENERIC_PARAMETER_ATTRIBUTE_VALUE_TYPE_CONSTRAINT = 8,
GENERIC_PARAMETER_ATTRIBUTE_CONSTRUCTOR_CONSTRAINT = 16,
GENERIC_PARAMETER_ATTRIBUTE_SPECIAL_CONSTRAINTS_MASK = 28
} GenericParameterAttributes;
typedef struct {
MonoType *type;
MonoClassField *field;
MonoProperty *prop;
} CattrNamedArg;
// Keep in sync with System.Runtime.Loader.AssemblyLoadContext.InternalState
typedef enum {
ALIVE = 0,
UNLOADING = 1
} MonoManagedAssemblyLoadContextInternalState;
/* All MonoInternalThread instances should be pinned, so it's safe to use the raw ptr. However
* for uniformity, icall wrapping will make handles anyway. So this is the method for getting the payload.
*/
static inline MonoInternalThread*
mono_internal_thread_handle_ptr (MonoInternalThreadHandle h)
{
/* The SUPPRESS here prevents a Centrinel warning due to merely seeing this
* function definition. Callees will still get a warning unless we
* attach a suppress attribute to the declaration.
*/
return MONO_HANDLE_SUPPRESS (MONO_HANDLE_RAW (h));
}
guint32 mono_image_insert_string (MonoReflectionModuleBuilderHandle module, MonoStringHandle str, MonoError *error);
guint32 mono_image_create_token (MonoDynamicImage *assembly, MonoObjectHandle obj, gboolean create_methodspec, gboolean register_token, MonoError *error);
void mono_dynamic_image_free (MonoDynamicImage *image);
void mono_dynamic_image_free_image (MonoDynamicImage *image);
void mono_dynamic_image_release_gc_roots (MonoDynamicImage *image);
void mono_reflection_setup_internal_class (MonoReflectionTypeBuilder *tb);
void mono_reflection_get_dynamic_overrides (MonoClass *klass, MonoMethod ***overrides, int *num_overrides, MonoError *error);
void mono_reflection_destroy_dynamic_method (MonoReflectionDynamicMethod *mb);
ICALL_EXPORT
void
ves_icall_SymbolType_create_unmanaged_type (MonoReflectionType *type);
void mono_reflection_register_with_runtime (MonoReflectionType *type);
MonoMethodSignature * mono_reflection_lookup_signature (MonoImage *image, MonoMethod *method, guint32 token, MonoError *error);
MonoArrayHandle mono_param_get_objects_internal (MonoMethod *method, MonoClass *refclass, MonoError *error);
MonoClass*
mono_class_bind_generic_parameters (MonoClass *klass, int type_argc, MonoType **types, gboolean is_dynamic);
MonoType*
mono_reflection_bind_generic_parameters (MonoReflectionTypeHandle type, int type_argc, MonoType **types, MonoError *error);
void
mono_reflection_generic_class_initialize (MonoReflectionGenericClass *type, MonoArray *fields);
ICALL_EXPORT
MonoReflectionEvent *
ves_icall_TypeBuilder_get_event_info (MonoReflectionTypeBuilder *tb, MonoReflectionEventBuilder *eb);
MonoReflectionMarshalAsAttributeHandle
mono_reflection_marshal_as_attribute_from_marshal_spec (MonoClass *klass, MonoMarshalSpec *spec, MonoError *error);
gpointer
mono_reflection_lookup_dynamic_token (MonoImage *image, guint32 token, gboolean valid_token, MonoClass **handle_class, MonoGenericContext *context, MonoError *error);
gboolean
mono_reflection_call_is_assignable_to (MonoClass *klass, MonoClass *oklass, MonoError *error);
gboolean
mono_get_constant_value_from_blob (MonoTypeEnum type, const char *blob, void *value, MonoStringHandleOut string_handle, MonoError *error);
gboolean
mono_metadata_read_constant_value (const char *blob, MonoTypeEnum type, void *value, MonoError *error);
char*
mono_string_from_blob (const char *str, MonoError *error);
void
mono_release_type_locks (MonoInternalThread *thread);
/**
* mono_string_handle_length:
* \param s \c MonoString
* \returns the length in characters of the string
*/
#ifdef ENABLE_CHECKED_BUILD_GC
int
mono_string_handle_length (MonoStringHandle s);
#else
#define mono_string_handle_length(s) (MONO_HANDLE_GETVAL ((s), length))
#endif
char *
mono_string_handle_to_utf8 (MonoStringHandle s, MonoError *error);
char *
mono_string_to_utf8_image (MonoImage *image, MonoStringHandle s, MonoError *error);
MonoArrayHandle
mono_array_clone_in_domain (MonoArrayHandle array, MonoError *error);
MonoArray*
mono_array_clone_checked (MonoArray *array, MonoError *error);
void
mono_array_full_copy (MonoArray *src, MonoArray *dest);
void
mono_array_full_copy_unchecked_size (MonoArray *src, MonoArray *dest, MonoClass *klass, uintptr_t size);
gboolean
mono_array_calc_byte_len (MonoClass *klass, uintptr_t len, uintptr_t *res);
MonoArray*
mono_array_new_checked (MonoClass *eclass, uintptr_t n, MonoError *error);
MONO_COMPONENT_API MonoArray*
mono_array_new_full_checked (MonoClass *array_class, uintptr_t *lengths, intptr_t *lower_bounds, MonoError *error);
MonoArray*
mono_array_new_jagged_checked (MonoClass *klass, int n, uintptr_t *lengths, MonoError *error);
ICALL_EXPORT
MonoArray*
ves_icall_array_new_specific (MonoVTable *vtable, uintptr_t n);
gpointer
mono_create_ftnptr (gpointer addr);
gpointer
mono_get_addr_from_ftnptr (gpointer descr);
MONO_COMPONENT_API void
mono_nullable_init (guint8 *buf, MonoObject *value, MonoClass *klass);
void
mono_nullable_init_from_handle (guint8 *buf, MonoObjectHandle value, MonoClass *klass);
void
mono_nullable_init_unboxed (guint8 *buf, gpointer value, MonoClass *klass);
MONO_COMPONENT_API MonoObject *
mono_value_box_checked (MonoClass *klass, void* val, MonoError *error);
MonoObjectHandle
mono_value_box_handle (MonoClass *klass, gpointer val, MonoError *error);
MONO_COMPONENT_API MonoObject*
mono_nullable_box (gpointer buf, MonoClass *klass, MonoError *error);
MonoObjectHandle
mono_nullable_box_handle (gpointer buf, MonoClass *klass, MonoError *error);
// A code size optimization (source and object) equivalent to MONO_HANDLE_NEW (MonoObject, NULL);
MonoObjectHandle
mono_new_null (void);
#ifdef MONO_SMALL_CONFIG
#define MONO_IMT_SIZE 9
#else
#define MONO_IMT_SIZE 19
#endif
typedef union {
int vtable_slot;
gpointer target_code;
} MonoImtItemValue;
typedef struct _MonoImtBuilderEntry {
gpointer key;
struct _MonoImtBuilderEntry *next;
MonoImtItemValue value;
int children;
guint8 has_target_code : 1;
} MonoImtBuilderEntry;
typedef struct _MonoIMTCheckItem MonoIMTCheckItem;
struct _MonoIMTCheckItem {
gpointer key;
int check_target_idx;
MonoImtItemValue value;
guint8 *jmp_code;
guint8 *code_target;
guint8 is_equals;
guint8 compare_done;
guint8 chunk_size;
guint8 short_branch;
guint8 has_target_code;
};
typedef gpointer (*MonoImtTrampolineBuilder) (MonoVTable *vtable, MonoIMTCheckItem **imt_entries, int count, gpointer fail_trunk);
void
mono_install_imt_trampoline_builder (MonoImtTrampolineBuilder func);
void
mono_set_always_build_imt_trampolines (gboolean value);
void
mono_vtable_build_imt_slot (MonoVTable* vtable, int imt_slot);
guint32
mono_method_get_imt_slot (MonoMethod *method);
void
mono_method_add_generic_virtual_invocation (MonoVTable *vtable,
gpointer *vtable_slot,
MonoMethod *method, gpointer code);
void
mono_unhandled_exception_checked (MonoObjectHandle exc, MonoError *error);
void
mono_first_chance_exception_checked (MonoObjectHandle exc, MonoError *error);
void
mono_first_chance_exception_internal (MonoObject *exc_raw);
MonoVTable *
mono_class_try_get_vtable (MonoClass *klass);
gboolean
mono_runtime_run_module_cctor (MonoImage *image, MonoError *error);
MONO_COMPONENT_API gboolean
mono_runtime_class_init_full (MonoVTable *vtable, MonoError *error);
void
mono_method_clear_object (MonoMethod *method);
gsize*
mono_class_compute_bitmap (MonoClass *klass, gsize *bitmap, int size, int offset, int *max_set, gboolean static_fields);
gboolean
mono_class_is_reflection_method_or_constructor (MonoClass *klass);
MonoObjectHandle
mono_get_object_from_blob (MonoType *type, const char *blob, MonoStringHandleOut string_handle, MonoError *error);
gboolean
mono_class_has_ref_info (MonoClass *klass);
MonoReflectionTypeBuilder*
mono_class_get_ref_info_raw (MonoClass *klass);
void
mono_class_set_ref_info (MonoClass *klass, MonoObjectHandle obj);
void
mono_class_free_ref_info (MonoClass *klass);
MONO_COMPONENT_API MonoObject *
mono_object_new_pinned (MonoClass *klass, MonoError *error);
MonoObjectHandle
mono_object_new_pinned_handle (MonoClass *klass, MonoError *error);
MonoObject *
mono_object_new_specific_checked (MonoVTable *vtable, MonoError *error);
ICALL_EXPORT
MonoObject *
ves_icall_object_new (MonoClass *klass);
ICALL_EXPORT
MonoObject *
ves_icall_object_new_specific (MonoVTable *vtable);
MonoObject *
mono_object_new_alloc_specific_checked (MonoVTable *vtable, MonoError *error);
void
mono_field_get_value_internal (MonoObject *obj, MonoClassField *field, void *value);
MONO_COMPONENT_API void
mono_field_static_get_value_checked (MonoVTable *vt, MonoClassField *field, void *value, MonoStringHandleOut string_handle, MonoError *error);
MONO_COMPONENT_API void
mono_field_static_get_value_for_thread (MonoInternalThread *thread, MonoVTable *vt, MonoClassField *field, void *value, MonoStringHandleOut string_handle, MonoError *error);
guint8*
mono_static_field_get_addr (MonoVTable *vt, MonoClassField *field);
MonoMethod*
mono_object_handle_get_virtual_method (MonoObjectHandle obj, MonoMethod *method, MonoError *error);
/* exported, used by the debugger */
MONO_API void *
mono_vtable_get_static_field_data (MonoVTable *vt);
MonoObject *
mono_field_get_value_object_checked (MonoClassField *field, MonoObject *obj, MonoError *error);
MonoObjectHandle
mono_static_field_get_value_handle (MonoClassField *field, MonoError *error);
MONO_COMPONENT_API gpointer
mono_special_static_field_get_offset (MonoClassField *field, MonoError *error);
gboolean
mono_property_set_value_handle (MonoProperty *prop, MonoObjectHandle obj, void **params, MonoError *error);
MonoObject*
mono_property_get_value_checked (MonoProperty *prop, void *obj, void **params, MonoError *error);
MonoString*
mono_object_try_to_string (MonoObject *obj, MonoObject **exc, MonoError *error);
char *
mono_string_to_utf8_ignore (MonoString *s);
gboolean
mono_monitor_is_il_fastpath_wrapper (MonoMethod *method);
MonoStringHandle
mono_string_is_interned_lookup (MonoStringHandle str, gboolean insert, MonoError *error);
/**
* mono_string_intern_checked:
* \param str String to intern
* \param error set on error.
* Interns the string passed.
* \returns The interned string. On failure returns NULL and sets \p error
*/
#define mono_string_intern_checked(str, error) (mono_string_is_interned_lookup ((str), TRUE, (error)))
/**
* mono_string_is_interned_internal:
* \param o String to probe
* \returns Whether the string has been interned.
*/
#define mono_string_is_interned_internal(str, error) (mono_string_is_interned_lookup ((str), FALSE, (error)))
char *
mono_exception_handle_get_native_backtrace (MonoExceptionHandle exc);
char *
mono_exception_get_managed_backtrace (MonoException *exc);
gboolean
mono_exception_try_get_managed_backtrace (MonoException *exc, const char *prefix, char **result);
void
mono_copy_value (MonoType *type, void *dest, void *value, int deref_pointer);
void
mono_error_raise_exception_deprecated (MonoError *target_error);
gboolean
mono_error_set_pending_exception_slow (MonoError *error);
static inline gboolean
mono_error_set_pending_exception (MonoError *error)
{
return is_ok (error) ? FALSE : mono_error_set_pending_exception_slow (error);
}
MonoArray *
mono_glist_to_array (GList *list, MonoClass *eclass, MonoError *error);
MONO_COMPONENT_API MonoObject *
mono_object_new_checked (MonoClass *klass, MonoError *error);
MonoObjectHandle
mono_object_new_handle (MonoClass *klass, MonoError *error);
// This function skips handling of remoting and COM.
// "alloc" means "less".
MonoObjectHandle
mono_object_new_alloc_by_vtable (MonoVTable *vtable, MonoError *error);
MonoObject*
mono_object_new_mature (MonoVTable *vtable, MonoError *error);
MonoObjectHandle
mono_object_new_handle_mature (MonoVTable *vtable, MonoError *error);
MonoObject *
mono_object_clone_checked (MonoObject *obj, MonoError *error);
MonoObjectHandle
mono_object_clone_handle (MonoObjectHandle obj, MonoError *error);
MONO_COMPONENT_API MonoObject *
mono_object_isinst_checked (MonoObject *obj, MonoClass *klass, MonoError *error);
MonoObjectHandle
mono_object_handle_isinst (MonoObjectHandle obj, MonoClass *klass, MonoError *error);
MonoObjectHandle
mono_object_handle_isinst_mbyref (MonoObjectHandle obj, MonoClass *klass, MonoError *error);
gboolean
mono_object_handle_isinst_mbyref_raw (MonoObjectHandle obj, MonoClass *klass, MonoError *error);
MonoStringHandle
mono_string_new_size_handle (gint32 len, MonoError *error);
MonoString*
mono_string_new_len_checked (const char *text, guint length, MonoError *error);
MonoString *
mono_string_new_size_checked (gint32 len, MonoError *error);
MONO_COMPONENT_API MonoString*
mono_ldstr_checked (MonoImage *image, uint32_t str_index, MonoError *error);
MonoStringHandle
mono_ldstr_handle (MonoImage *image, uint32_t str_index, MonoError *error);
MONO_PROFILER_API MonoString*
mono_string_new_checked (const char *text, MonoError *merror);
MonoString*
mono_string_new_wtf8_len_checked (const char *text, guint length, MonoError *error);
MonoString *
mono_string_new_utf16_checked (const gunichar2 *text, gint32 len, MonoError *error);
MonoStringHandle
mono_string_new_utf16_handle (const gunichar2 *text, gint32 len, MonoError *error);
MonoStringHandle
mono_string_new_utf8_len (const char *text, guint length, MonoError *error);
MonoString *
mono_string_from_utf16_checked (const mono_unichar2 *data, MonoError *error);
MonoString *
mono_string_from_utf32_checked (const mono_unichar4 *data, MonoError *error);
char*
mono_ldstr_utf8 (MonoImage *image, guint32 idx, MonoError *error);
MONO_COMPONENT_API
char*
mono_utf16_to_utf8 (const mono_unichar2 *s, gsize slength, MonoError *error);
char*
mono_utf16_to_utf8len (const mono_unichar2 *s, gsize slength, gsize *utf8_length, MonoError *error);
gboolean
mono_runtime_object_init_checked (MonoObject *this_obj, MonoError *error);
MONO_PROFILER_API MonoObject*
mono_runtime_try_invoke (MonoMethod *method, void *obj, void **params, MonoObject **exc, MonoError *error);
// The exc parameter is deliberately missing and so far this has proven to reduce code duplication.
// In particular, if an exception is returned from underlying otherwise succeeded call,
// is set into the MonoError with mono_error_set_exception_instance.
// The result is that caller need only check MonoError.
MONO_COMPONENT_API MonoObjectHandle
mono_runtime_try_invoke_handle (MonoMethod *method, MonoObjectHandle obj, void **params, MonoError* error);
MONO_COMPONENT_API MonoObject*
mono_runtime_invoke_checked (MonoMethod *method, void *obj, void **params, MonoError *error);
MonoObjectHandle
mono_runtime_invoke_handle (MonoMethod *method, MonoObjectHandle obj, void **params, MonoError* error);
void
mono_runtime_invoke_handle_void (MonoMethod *method, MonoObjectHandle obj, void **params, MonoError* error);
MonoObject*
mono_runtime_try_invoke_array (MonoMethod *method, void *obj, MonoArray *params,
MonoObject **exc, MonoError *error);
MonoObject*
mono_runtime_invoke_span_checked (MonoMethod *method, void *obj, MonoSpanOfObjects *params,
MonoError *error);
void*
mono_compile_method_checked (MonoMethod *method, MonoError *error);
MonoObject*
mono_runtime_delegate_try_invoke (MonoObject *delegate, void **params,
MonoObject **exc, MonoError *error);
MonoObject*
mono_runtime_delegate_invoke_checked (MonoObject *delegate, void **params,
MonoError *error);
MonoArrayHandle
mono_runtime_get_main_args_handle (MonoError *error);
int
mono_runtime_run_main_checked (MonoMethod *method, int argc, char* argv[],
MonoError *error);
int
mono_runtime_try_run_main (MonoMethod *method, int argc, char* argv[],
MonoObject **exc);
int
mono_runtime_exec_main_checked (MonoMethod *method, MonoArray *args, MonoError *error);
int
mono_runtime_try_exec_main (MonoMethod *method, MonoArray *args, MonoObject **exc);
MonoAssembly*
mono_try_assembly_resolve_handle (MonoAssemblyLoadContext *alc, MonoStringHandle fname, MonoAssembly *requesting, MonoError *error);
gboolean
mono_runtime_object_init_handle (MonoObjectHandle this_obj, MonoError *error);
/* GC write barriers support */
void
mono_gc_wbarrier_object_copy_handle (MonoObjectHandle obj, MonoObjectHandle src);
MonoMethod*
mono_class_get_virtual_method (MonoClass *klass, MonoMethod *method, MonoError *error);
MonoStringHandle
mono_string_empty_handle (void);
/*
* mono_object_get_data:
*
* Return a pointer to the beginning of data inside a MonoObject.
*/
static inline gpointer
mono_object_get_data (MonoObject *o)
{
return (guint8*)o + MONO_ABI_SIZEOF (MonoObject);
}
#define mono_handle_get_data_unsafe(handle) ((gpointer)((guint8*)MONO_HANDLE_RAW (handle) + MONO_ABI_SIZEOF (MonoObject)))
MONO_COMPONENT_API gpointer
mono_vtype_get_field_addr (gpointer vtype, MonoClassField *field);
#define MONO_OBJECT_SETREF_INTERNAL(obj,fieldname,value) do { \
mono_gc_wbarrier_set_field_internal ((MonoObject*)(obj), &((obj)->fieldname), (MonoObject*)value); \
/*(obj)->fieldname = (value);*/ \
} while (0)
/* This should be used if 's' can reside on the heap */
#define MONO_STRUCT_SETREF_INTERNAL(s,field,value) do { \
mono_gc_wbarrier_generic_store_internal (&((s)->field), (MonoObject*)(value)); \
} while (0)
static inline gunichar2*
mono_string_chars_internal (MonoString *s)
{
MONO_REQ_GC_UNSAFE_MODE;
return s->chars;
}
static inline int
mono_string_length_internal (MonoString *s)
{
MONO_REQ_GC_UNSAFE_MODE;
return s->length;
}
MonoString*
mono_string_empty_internal (MonoDomain *domain);
char *
mono_string_to_utf8len (MonoStringHandle s, gsize *utf8len, MonoError *error);
MONO_COMPONENT_API char*
mono_string_to_utf8_checked_internal (MonoString *string_obj, MonoError *error);
mono_bool
mono_string_equal_internal (MonoString *s1, MonoString *s2);
unsigned
mono_string_hash_internal (MonoString *s);
MONO_COMPONENT_API int
mono_object_hash_internal (MonoObject* obj);
ICALL_EXPORT
void
mono_value_copy_internal (void* dest, const void* src, MonoClass *klass);
void
mono_value_copy_array_internal (MonoArray *dest, int dest_idx, const void* src, int count);
MONO_PROFILER_API MonoVTable* mono_object_get_vtable_internal (MonoObject *obj);
MonoDomain*
mono_object_get_domain_internal (MonoObject *obj);
static inline gpointer
mono_object_unbox_internal (MonoObject *obj)
{
/* add assert for valuetypes? */
g_assert (m_class_is_valuetype (mono_object_class (obj)));
return mono_object_get_data (obj);
}
ICALL_EXPORT
void
mono_monitor_exit_internal (MonoObject *obj);
MONO_PROFILER_API unsigned mono_object_get_size_internal (MonoObject *o);
MONO_PROFILER_API MonoDomain* mono_vtable_domain_internal (MonoVTable *vtable);
MONO_PROFILER_API MonoClass* mono_vtable_class_internal (MonoVTable *vtable);
MONO_COMPONENT_API MonoMethod*
mono_object_get_virtual_method_internal (MonoObject *obj, MonoMethod *method);
MonoMethod*
mono_get_delegate_invoke_internal (MonoClass *klass);
MonoMethod*
mono_get_delegate_begin_invoke_internal (MonoClass *klass);
MonoMethod*
mono_get_delegate_end_invoke_internal (MonoClass *klass);
void
mono_unhandled_exception_internal (MonoObject *exc);
void
mono_print_unhandled_exception_internal (MonoObject *exc);
void
mono_raise_exception_internal (MonoException *ex);
void
mono_field_set_value_internal (MonoObject *obj, MonoClassField *field, void *value);
MONO_COMPONENT_API void
mono_field_static_set_value_internal (MonoVTable *vt, MonoClassField *field, void *value);
void
mono_field_get_value_internal (MonoObject *obj, MonoClassField *field, void *value);
MonoMethod* mono_get_context_capture_method (void);
guint8*
mono_runtime_get_aotid_arr (void);
/* GC handles support
*
* A handle can be created to refer to a managed object and either prevent it
* from being garbage collected or moved or to be able to know if it has been
* collected or not (weak references).
* mono_gchandle_new () is used to prevent an object from being garbage collected
* until mono_gchandle_free() is called. Use a TRUE value for the pinned argument to
* prevent the object from being moved (this should be avoided as much as possible
* and this should be used only for shorts periods of time or performance will suffer).
* To create a weakref use mono_gchandle_new_weakref (): track_resurrection should
* usually be false (see the GC docs for more details).
* mono_gchandle_get_target () can be used to get the object referenced by both kinds
* of handle: for a weakref handle, if an object has been collected, it will return NULL.
*/
MonoGCHandle
mono_gchandle_new_internal (MonoObject *obj, mono_bool pinned);
MONO_COMPONENT_API MonoGCHandle
mono_gchandle_new_weakref_internal (MonoObject *obj, mono_bool track_resurrection);
MONO_COMPONENT_API
MonoObject*
mono_gchandle_get_target_internal (MonoGCHandle gchandle);
MONO_COMPONENT_API void mono_gchandle_free_internal (MonoGCHandle gchandle);
/* Reference queue support
*
* A reference queue is used to get notifications of when objects are collected.
* Call mono_gc_reference_queue_new to create a new queue and pass the callback that
* will be invoked when registered objects are collected.
* Call mono_gc_reference_queue_add to register a pair of objects and data within a queue.
* The callback will be triggered once an object is both unreachable and finalized.
*/
MonoReferenceQueue*
mono_gc_reference_queue_new_internal (mono_reference_queue_callback callback);
void
mono_gc_reference_queue_free_internal (MonoReferenceQueue *queue);
mono_bool
mono_gc_reference_queue_add_internal (MonoReferenceQueue *queue, MonoObject *obj, void *user_data);
#define mono_gc_reference_queue_add_handle(queue, obj, user_data) \
(mono_gc_reference_queue_add_internal ((queue), MONO_HANDLE_RAW (MONO_HANDLE_CAST (MonoObject, obj)), (user_data)))
/* GC write barriers support */
void
mono_gc_wbarrier_set_field_internal (MonoObject *obj, void* field_ptr, MonoObject* value);
void
mono_gc_wbarrier_set_arrayref_internal (MonoArray *arr, void* slot_ptr, MonoObject* value);
void
mono_gc_wbarrier_arrayref_copy_internal (void* dest_ptr, const void* src_ptr, int count);
MONO_COMPONENT_API void
mono_gc_wbarrier_generic_store_internal (void volatile* ptr, MonoObject* value);
void
mono_gc_wbarrier_generic_store_atomic_internal (void *ptr, MonoObject *value);
ICALL_EXPORT
void
mono_gc_wbarrier_generic_nostore_internal (void* ptr);
void
mono_gc_wbarrier_value_copy_internal (void* dest, const void* src, int count, MonoClass *klass);
void
mono_gc_wbarrier_object_copy_internal (MonoObject* obj, MonoObject *src);
MONO_COMPONENT_API char *
mono_runtime_get_managed_cmd_line (void);
#ifdef HOST_WASM
int
mono_string_instance_is_interned (MonoString *str);
#endif
gpointer
mono_method_get_unmanaged_wrapper_ftnptr_internal (MonoMethod *method, gboolean only_unmanaged_callers_only, MonoError *error);
#endif /* __MONO_OBJECT_INTERNALS_H__ */
| -1 |
dotnet/runtime | 65,922 | Rely on PGO for isinst/castclass | Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | EgorBo | "2022-02-26T22:13:46Z" | "2022-04-01T21:25:56Z" | 063d08018602291f8dbd9d9b6055b8e1af874cb8 | f249a3d58b0b2bb55adca1a56c5a39cf446437b7 | Rely on PGO for isinst/castclass. Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | ./src/mono/mono/utils/linux_magic.h | /**
* \file
*/
#ifndef __LINUX_MAGIC_H
#define __LINUX_MAGIC_H
#if __linux__
#if HAVE_LINUX_MAGIC_H
#include <linux/magic.h>
#endif
#ifndef ADFS_SUPER_MAGIC
#define ADFS_SUPER_MAGIC 0xadf5
#endif
#ifndef AFFS_SUPER_MAGIC
#define AFFS_SUPER_MAGIC 0xadff
#endif
#ifndef AFS_SUPER_MAGIC
#define AFS_SUPER_MAGIC 0x5346414F
#endif
#ifndef AUTOFS_SUPER_MAGIC
#define AUTOFS_SUPER_MAGIC 0x0187
#endif
#ifndef AUTOFS_SBI_MAGIC
#define AUTOFS_SBI_MAGIC 0x6d4a556d
#endif
#ifndef CODA_SUPER_MAGIC
#define CODA_SUPER_MAGIC 0x73757245
#endif
#ifndef CRAMFS_MAGIC
#define CRAMFS_MAGIC 0x28cd3d45
#endif
#ifndef CRAMFS_MAGIC_WEND
#define CRAMFS_MAGIC_WEND 0x453dcd28
#endif
#ifndef DEBUGFS_MAGIC
#define DEBUGFS_MAGIC 0x64626720
#endif
#ifndef SYSFS_MAGIC
#define SYSFS_MAGIC 0x62656572
#endif
#ifndef SECURITYFS_MAGIC
#define SECURITYFS_MAGIC 0x73636673
#endif
#ifndef SELINUX_MAGIC
#define SELINUX_MAGIC 0xf97cff8c
#endif
#ifndef RAMFS_MAGIC
#define RAMFS_MAGIC 0x858458f6
#endif
#ifndef TMPFS_MAGIC
#define TMPFS_MAGIC 0x01021994
#endif
#ifndef HUGETLBFS_MAGIC
#define HUGETLBFS_MAGIC 0x958458f6
#endif
#ifndef SQUASHFS_MAGIC
#define SQUASHFS_MAGIC 0x73717368
#endif
#ifndef EFS_SUPER_MAGIC
#define EFS_SUPER_MAGIC 0x414A53
#endif
#ifndef EXT2_SUPER_MAGIC
#define EXT2_SUPER_MAGIC 0xEF53
#endif
#ifndef EXT3_SUPER_MAGIC
#define EXT3_SUPER_MAGIC 0xEF53
#endif
#ifndef XENFS_SUPER_MAGIC
#define XENFS_SUPER_MAGIC 0xabba1974
#endif
#ifndef EXT4_SUPER_MAGIC
#define EXT4_SUPER_MAGIC 0xEF53
#endif
#ifndef BTRFS_SUPER_MAGIC
#define BTRFS_SUPER_MAGIC 0x9123683E
#endif
#ifndef HPFS_SUPER_MAGIC
#define HPFS_SUPER_MAGIC 0xf995e849
#endif
#ifndef ISOFS_SUPER_MAGIC
#define ISOFS_SUPER_MAGIC 0x9660
#endif
#ifndef JFFS2_SUPER_MAGIC
#define JFFS2_SUPER_MAGIC 0x72b6
#endif
#ifndef JFS_SUPER_MAGIC
#define JFS_SUPER_MAGIC 0x3153464a
#endif
#ifndef ANON_INODE_FS_MAGIC
#define ANON_INODE_FS_MAGIC 0x09041934
#endif
#ifndef MINIX_SUPER_MAGIC
#define MINIX_SUPER_MAGIC 0x137F
#endif
#ifndef MINIX_SUPER_MAGIC2
#define MINIX_SUPER_MAGIC2 0x138F
#endif
#ifndef MINIX2_SUPER_MAGIC
#define MINIX2_SUPER_MAGIC 0x2468
#endif
#ifndef MINIX2_SUPER_MAGIC2
#define MINIX2_SUPER_MAGIC2 0x2478
#endif
#ifndef MINIX3_SUPER_MAGIC
#define MINIX3_SUPER_MAGIC 0x4d5a
#endif
#ifndef MSDOS_SUPER_MAGIC
#define MSDOS_SUPER_MAGIC 0x4d44
#endif
#ifndef NCP_SUPER_MAGIC
#define NCP_SUPER_MAGIC 0x564c
#endif
#ifndef NFS_SUPER_MAGIC
#define NFS_SUPER_MAGIC 0x6969
#endif
#ifndef OPENPROM_SUPER_MAGIC
#define OPENPROM_SUPER_MAGIC 0x9fa1
#endif
#ifndef PROC_SUPER_MAGIC
#define PROC_SUPER_MAGIC 0x9fa0
#endif
#ifndef QNX4_SUPER_MAGIC
#define QNX4_SUPER_MAGIC 0x002f
#endif
#ifndef REISERFS_SUPER_MAGIC
#define REISERFS_SUPER_MAGIC 0x52654973
#endif
#ifndef SMB_SUPER_MAGIC
#define SMB_SUPER_MAGIC 0x517B
#endif
#ifndef USBDEVICE_SUPER_MAGIC
#define USBDEVICE_SUPER_MAGIC 0x9fa2
#endif
#ifndef CGROUP_SUPER_MAGIC
#define CGROUP_SUPER_MAGIC 0x27e0eb
#endif
#ifndef FUTEXFS_SUPER_MAGIC
#define FUTEXFS_SUPER_MAGIC 0xBAD1DEA
#endif
#ifndef DEVPTS_SUPER_MAGIC
#define DEVPTS_SUPER_MAGIC 0x1cd1
#endif
#ifndef CIFS_MAGIC_NUMBER
#define CIFS_MAGIC_NUMBER 0xFF534D42
#endif
#ifndef BEFS_SUPER_MAGIC1
#define BEFS_SUPER_MAGIC1 0x42465331
#endif
#ifndef BEFS_SUPER_MAGIC2
#define BEFS_SUPER_MAGIC2 0xdd121031
#endif
#ifndef BEFS_SUPER_MAGIC3
#define BEFS_SUPER_MAGIC3 0x15b6830e
#endif
#ifndef BFS_MAGIC
#define BFS_MAGIC 0x1BADFACE
#endif
#ifndef NTFS_SB_MAGIC
#define NTFS_SB_MAGIC 0x5346544e
#endif
enum {
MONO_SYSV_FSTYPE_NONE = 0,
MONO_SYSV_FSTYPE_XENIX,
MONO_SYSV_FSTYPE_SYSV4,
MONO_SYSV_FSTYPE_SYSV2,
MONO_SYSV_FSTYPE_COH,
};
#ifndef SYSV_MAGIC_BASE
#define SYSV_MAGIC_BASE 0x012FF7B3
#endif
#ifndef XENIX_SUPER_MAGIC
#define XENIX_SUPER_MAGIC (SYSV_MAGIC_BASE+MONO_SYSV_FSTYPE_XENIX)
#endif
#ifndef SYSV4_SUPER_MAGIC
#define SYSV4_SUPER_MAGIC (SYSV_MAGIC_BASE+MONO_SYSV_FSTYPE_SYSV4)
#endif
#ifndef SYSV2_SUPER_MAGIC
#define SYSV2_SUPER_MAGIC (SYSV_MAGIC_BASE+MONO_SYSV_FSTYPE_SYSV2)
#endif
#ifndef COH_SUPER_MAGIC
#define COH_SUPER_MAGIC (SYSV_MAGIC_BASE+MONO_SYSV_FSTYPE_COH)
#endif
#ifndef UFS_MAGIC
#define UFS_MAGIC 0x00011954
#endif
#ifndef UFS_MAGIC_BW
#define UFS_MAGIC_BW 0x0f242697
#endif
#ifndef UFS2_MAGIC
#define UFS2_MAGIC 0x19540119
#endif
#ifndef UFS_CIGAM
#define UFS_CIGAM 0x54190100
#endif
#ifndef UDF_SUPER_MAGIC
#define UDF_SUPER_MAGIC 0x15013346
#endif
#ifndef XFS_SB_MAGIC
#define XFS_SB_MAGIC 0x58465342
#endif
#ifndef FUSE_SUPER_MAGIC
#define FUSE_SUPER_MAGIC 0x65735546
#endif
#ifndef V9FS_MAGIC
#define V9FS_MAGIC 0x01021997
#endif
#ifndef CEPH_SUPER_MAGIC
#define CEPH_SUPER_MAGIC 0x00c36400
#endif
#ifndef CONFIGFS_MAGIC
#define CONFIGFS_MAGIC 0x62656570
#endif
#ifndef ECRYPTFS_SUPER_MAGIC
#define ECRYPTFS_SUPER_MAGIC 0xf15f
#endif
#ifndef EXOFS_SUPER_MAGIC
#define EXOFS_SUPER_MAGIC 0x5df5
#endif
#ifndef VXFS_SUPER_MAGIC
#define VXFS_SUPER_MAGIC 0xa501fcf5
#endif
#ifndef VXFS_OLT_MAGIC
#define VXFS_OLT_MAGIC 0xa504fcf5
#endif
#ifndef GFS2_MAGIC
#define GFS2_MAGIC 0x01161970
#endif
#ifndef HFS_SUPER_MAGIC
#define HFS_SUPER_MAGIC 0x4244
#endif
#ifndef HFSPLUS_SUPER_MAGIC
#define HFSPLUS_SUPER_MAGIC 0x482b
#endif
#ifndef LOGFS_MAGIC_U32
#define LOGFS_MAGIC_U32 0xc97e8168
#endif
#ifndef OCFS2_SUPER_MAGIC
#define OCFS2_SUPER_MAGIC 0x7461636f
#endif
#ifndef OMFS_MAGIC
#define OMFS_MAGIC 0xc2993d87
#endif
#ifndef UBIFS_SUPER_MAGIC
#define UBIFS_SUPER_MAGIC 0x24051905
#endif
#ifndef ROMFS_MAGIC
#define ROMFS_MAGIC 0x7275
#endif
#endif
#endif
| /**
* \file
*/
#ifndef __LINUX_MAGIC_H
#define __LINUX_MAGIC_H
#if __linux__
#if HAVE_LINUX_MAGIC_H
#include <linux/magic.h>
#endif
#ifndef ADFS_SUPER_MAGIC
#define ADFS_SUPER_MAGIC 0xadf5
#endif
#ifndef AFFS_SUPER_MAGIC
#define AFFS_SUPER_MAGIC 0xadff
#endif
#ifndef AFS_SUPER_MAGIC
#define AFS_SUPER_MAGIC 0x5346414F
#endif
#ifndef AUTOFS_SUPER_MAGIC
#define AUTOFS_SUPER_MAGIC 0x0187
#endif
#ifndef AUTOFS_SBI_MAGIC
#define AUTOFS_SBI_MAGIC 0x6d4a556d
#endif
#ifndef CODA_SUPER_MAGIC
#define CODA_SUPER_MAGIC 0x73757245
#endif
#ifndef CRAMFS_MAGIC
#define CRAMFS_MAGIC 0x28cd3d45
#endif
#ifndef CRAMFS_MAGIC_WEND
#define CRAMFS_MAGIC_WEND 0x453dcd28
#endif
#ifndef DEBUGFS_MAGIC
#define DEBUGFS_MAGIC 0x64626720
#endif
#ifndef SYSFS_MAGIC
#define SYSFS_MAGIC 0x62656572
#endif
#ifndef SECURITYFS_MAGIC
#define SECURITYFS_MAGIC 0x73636673
#endif
#ifndef SELINUX_MAGIC
#define SELINUX_MAGIC 0xf97cff8c
#endif
#ifndef RAMFS_MAGIC
#define RAMFS_MAGIC 0x858458f6
#endif
#ifndef TMPFS_MAGIC
#define TMPFS_MAGIC 0x01021994
#endif
#ifndef HUGETLBFS_MAGIC
#define HUGETLBFS_MAGIC 0x958458f6
#endif
#ifndef SQUASHFS_MAGIC
#define SQUASHFS_MAGIC 0x73717368
#endif
#ifndef EFS_SUPER_MAGIC
#define EFS_SUPER_MAGIC 0x414A53
#endif
#ifndef EXT2_SUPER_MAGIC
#define EXT2_SUPER_MAGIC 0xEF53
#endif
#ifndef EXT3_SUPER_MAGIC
#define EXT3_SUPER_MAGIC 0xEF53
#endif
#ifndef XENFS_SUPER_MAGIC
#define XENFS_SUPER_MAGIC 0xabba1974
#endif
#ifndef EXT4_SUPER_MAGIC
#define EXT4_SUPER_MAGIC 0xEF53
#endif
#ifndef BTRFS_SUPER_MAGIC
#define BTRFS_SUPER_MAGIC 0x9123683E
#endif
#ifndef HPFS_SUPER_MAGIC
#define HPFS_SUPER_MAGIC 0xf995e849
#endif
#ifndef ISOFS_SUPER_MAGIC
#define ISOFS_SUPER_MAGIC 0x9660
#endif
#ifndef JFFS2_SUPER_MAGIC
#define JFFS2_SUPER_MAGIC 0x72b6
#endif
#ifndef JFS_SUPER_MAGIC
#define JFS_SUPER_MAGIC 0x3153464a
#endif
#ifndef ANON_INODE_FS_MAGIC
#define ANON_INODE_FS_MAGIC 0x09041934
#endif
#ifndef MINIX_SUPER_MAGIC
#define MINIX_SUPER_MAGIC 0x137F
#endif
#ifndef MINIX_SUPER_MAGIC2
#define MINIX_SUPER_MAGIC2 0x138F
#endif
#ifndef MINIX2_SUPER_MAGIC
#define MINIX2_SUPER_MAGIC 0x2468
#endif
#ifndef MINIX2_SUPER_MAGIC2
#define MINIX2_SUPER_MAGIC2 0x2478
#endif
#ifndef MINIX3_SUPER_MAGIC
#define MINIX3_SUPER_MAGIC 0x4d5a
#endif
#ifndef MSDOS_SUPER_MAGIC
#define MSDOS_SUPER_MAGIC 0x4d44
#endif
#ifndef NCP_SUPER_MAGIC
#define NCP_SUPER_MAGIC 0x564c
#endif
#ifndef NFS_SUPER_MAGIC
#define NFS_SUPER_MAGIC 0x6969
#endif
#ifndef OPENPROM_SUPER_MAGIC
#define OPENPROM_SUPER_MAGIC 0x9fa1
#endif
#ifndef PROC_SUPER_MAGIC
#define PROC_SUPER_MAGIC 0x9fa0
#endif
#ifndef QNX4_SUPER_MAGIC
#define QNX4_SUPER_MAGIC 0x002f
#endif
#ifndef REISERFS_SUPER_MAGIC
#define REISERFS_SUPER_MAGIC 0x52654973
#endif
#ifndef SMB_SUPER_MAGIC
#define SMB_SUPER_MAGIC 0x517B
#endif
#ifndef USBDEVICE_SUPER_MAGIC
#define USBDEVICE_SUPER_MAGIC 0x9fa2
#endif
#ifndef CGROUP_SUPER_MAGIC
#define CGROUP_SUPER_MAGIC 0x27e0eb
#endif
#ifndef FUTEXFS_SUPER_MAGIC
#define FUTEXFS_SUPER_MAGIC 0xBAD1DEA
#endif
#ifndef DEVPTS_SUPER_MAGIC
#define DEVPTS_SUPER_MAGIC 0x1cd1
#endif
#ifndef CIFS_MAGIC_NUMBER
#define CIFS_MAGIC_NUMBER 0xFF534D42
#endif
#ifndef BEFS_SUPER_MAGIC1
#define BEFS_SUPER_MAGIC1 0x42465331
#endif
#ifndef BEFS_SUPER_MAGIC2
#define BEFS_SUPER_MAGIC2 0xdd121031
#endif
#ifndef BEFS_SUPER_MAGIC3
#define BEFS_SUPER_MAGIC3 0x15b6830e
#endif
#ifndef BFS_MAGIC
#define BFS_MAGIC 0x1BADFACE
#endif
#ifndef NTFS_SB_MAGIC
#define NTFS_SB_MAGIC 0x5346544e
#endif
enum {
MONO_SYSV_FSTYPE_NONE = 0,
MONO_SYSV_FSTYPE_XENIX,
MONO_SYSV_FSTYPE_SYSV4,
MONO_SYSV_FSTYPE_SYSV2,
MONO_SYSV_FSTYPE_COH,
};
#ifndef SYSV_MAGIC_BASE
#define SYSV_MAGIC_BASE 0x012FF7B3
#endif
#ifndef XENIX_SUPER_MAGIC
#define XENIX_SUPER_MAGIC (SYSV_MAGIC_BASE+MONO_SYSV_FSTYPE_XENIX)
#endif
#ifndef SYSV4_SUPER_MAGIC
#define SYSV4_SUPER_MAGIC (SYSV_MAGIC_BASE+MONO_SYSV_FSTYPE_SYSV4)
#endif
#ifndef SYSV2_SUPER_MAGIC
#define SYSV2_SUPER_MAGIC (SYSV_MAGIC_BASE+MONO_SYSV_FSTYPE_SYSV2)
#endif
#ifndef COH_SUPER_MAGIC
#define COH_SUPER_MAGIC (SYSV_MAGIC_BASE+MONO_SYSV_FSTYPE_COH)
#endif
#ifndef UFS_MAGIC
#define UFS_MAGIC 0x00011954
#endif
#ifndef UFS_MAGIC_BW
#define UFS_MAGIC_BW 0x0f242697
#endif
#ifndef UFS2_MAGIC
#define UFS2_MAGIC 0x19540119
#endif
#ifndef UFS_CIGAM
#define UFS_CIGAM 0x54190100
#endif
#ifndef UDF_SUPER_MAGIC
#define UDF_SUPER_MAGIC 0x15013346
#endif
#ifndef XFS_SB_MAGIC
#define XFS_SB_MAGIC 0x58465342
#endif
#ifndef FUSE_SUPER_MAGIC
#define FUSE_SUPER_MAGIC 0x65735546
#endif
#ifndef V9FS_MAGIC
#define V9FS_MAGIC 0x01021997
#endif
#ifndef CEPH_SUPER_MAGIC
#define CEPH_SUPER_MAGIC 0x00c36400
#endif
#ifndef CONFIGFS_MAGIC
#define CONFIGFS_MAGIC 0x62656570
#endif
#ifndef ECRYPTFS_SUPER_MAGIC
#define ECRYPTFS_SUPER_MAGIC 0xf15f
#endif
#ifndef EXOFS_SUPER_MAGIC
#define EXOFS_SUPER_MAGIC 0x5df5
#endif
#ifndef VXFS_SUPER_MAGIC
#define VXFS_SUPER_MAGIC 0xa501fcf5
#endif
#ifndef VXFS_OLT_MAGIC
#define VXFS_OLT_MAGIC 0xa504fcf5
#endif
#ifndef GFS2_MAGIC
#define GFS2_MAGIC 0x01161970
#endif
#ifndef HFS_SUPER_MAGIC
#define HFS_SUPER_MAGIC 0x4244
#endif
#ifndef HFSPLUS_SUPER_MAGIC
#define HFSPLUS_SUPER_MAGIC 0x482b
#endif
#ifndef LOGFS_MAGIC_U32
#define LOGFS_MAGIC_U32 0xc97e8168
#endif
#ifndef OCFS2_SUPER_MAGIC
#define OCFS2_SUPER_MAGIC 0x7461636f
#endif
#ifndef OMFS_MAGIC
#define OMFS_MAGIC 0xc2993d87
#endif
#ifndef UBIFS_SUPER_MAGIC
#define UBIFS_SUPER_MAGIC 0x24051905
#endif
#ifndef ROMFS_MAGIC
#define ROMFS_MAGIC 0x7275
#endif
#endif
#endif
| -1 |
dotnet/runtime | 65,922 | Rely on PGO for isinst/castclass | Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | EgorBo | "2022-02-26T22:13:46Z" | "2022-04-01T21:25:56Z" | 063d08018602291f8dbd9d9b6055b8e1af874cb8 | f249a3d58b0b2bb55adca1a56c5a39cf446437b7 | Rely on PGO for isinst/castclass. Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | ./src/coreclr/pal/tests/palsuite/c_runtime/swscanf/test9/test9.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================================
**
** Source: test9.c
**
** Purpose: Tests swscanf with characters
**
**
**==========================================================================*/
#include <palsuite.h>
#include "../swscanf.h"
PALTEST(c_runtime_swscanf_test9_paltest_swscanf_test9, "c_runtime/swscanf/test9/paltest_swscanf_test9")
{
if (PAL_Initialize(argc, argv))
{
return FAIL;
}
DoWCharTest(convert("1234"), convert("%c"), convert("1"), 1);
DoWCharTest(convert("1234"), convert("%c"), convert("1"), 1);
DoWCharTest(convert("abc"), convert("%2c"), convert("ab"), 2);
DoWCharTest(convert(" ab"), convert("%c"), convert(" "), 1);
DoCharTest(convert("ab"), convert("%hc"), "a", 1);
DoWCharTest(convert("ab"), convert("%lc"), convert("a"), 1);
DoWCharTest(convert("ab"), convert("%Lc"), convert("a"), 1);
DoWCharTest(convert("ab"), convert("%I64c"), convert("a"), 1);
PAL_Terminate();
return PASS;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================================
**
** Source: test9.c
**
** Purpose: Tests swscanf with characters
**
**
**==========================================================================*/
#include <palsuite.h>
#include "../swscanf.h"
PALTEST(c_runtime_swscanf_test9_paltest_swscanf_test9, "c_runtime/swscanf/test9/paltest_swscanf_test9")
{
if (PAL_Initialize(argc, argv))
{
return FAIL;
}
DoWCharTest(convert("1234"), convert("%c"), convert("1"), 1);
DoWCharTest(convert("1234"), convert("%c"), convert("1"), 1);
DoWCharTest(convert("abc"), convert("%2c"), convert("ab"), 2);
DoWCharTest(convert(" ab"), convert("%c"), convert(" "), 1);
DoCharTest(convert("ab"), convert("%hc"), "a", 1);
DoWCharTest(convert("ab"), convert("%lc"), convert("a"), 1);
DoWCharTest(convert("ab"), convert("%Lc"), convert("a"), 1);
DoWCharTest(convert("ab"), convert("%I64c"), convert("a"), 1);
PAL_Terminate();
return PASS;
}
| -1 |
dotnet/runtime | 65,922 | Rely on PGO for isinst/castclass | Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | EgorBo | "2022-02-26T22:13:46Z" | "2022-04-01T21:25:56Z" | 063d08018602291f8dbd9d9b6055b8e1af874cb8 | f249a3d58b0b2bb55adca1a56c5a39cf446437b7 | Rely on PGO for isinst/castclass. Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | ./src/coreclr/pal/src/exception/machmessage.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*++
Module Name:
machmessage.h
Abstract:
Abstraction over Mach messages used during exception handling.
--*/
#include <mach/mach.h>
#include <mach/mach_error.h>
#include <mach/thread_status.h>
using namespace CorUnix;
#if HAVE_MACH_EXCEPTIONS
// The vast majority of Mach calls we make in this module are critical: we cannot recover from failures of
// these methods (principally because we're handling hardware exceptions in the context of a single dedicated
// handler thread). The following macro encapsulates checking the return code from Mach methods and emitting
// some useful data and aborting the process on failure.
#define CHECK_MACH(_msg, machret) do { \
if (machret != KERN_SUCCESS) \
{ \
char _szError[1024]; \
snprintf(_szError, ARRAY_SIZE(_szError), "%s: %u: %s", __FUNCTION__, __LINE__, _msg); \
mach_error(_szError, machret); \
abort(); \
} \
} while (false)
// This macro terminates the process with some useful debug info as above, but for the general failure points
// that have nothing to do with Mach.
#define NONPAL_RETAIL_ASSERT(_msg, ...) do { \
fprintf(stdout, "%s: %u: " _msg "\n", __FUNCTION__, __LINE__, ## __VA_ARGS__); \
fflush(stdout); \
abort(); \
} while (false)
#define NONPAL_RETAIL_ASSERTE(_expr) do { \
if (!(_expr)) \
NONPAL_RETAIL_ASSERT("ASSERT: %s\n", #_expr); \
} while (false)
#ifdef _DEBUG
#define NONPAL_TRACE_ENABLED EnvironGetenv("NONPAL_TRACING", /* copyValue */ false)
#define NONPAL_ASSERT(_msg, ...) NONPAL_RETAIL_ASSERT(_msg, __VA_ARGS__)
// Assert macro that doesn't rely on the PAL.
#define NONPAL_ASSERTE(_expr) do { \
if (!(_expr)) \
NONPAL_RETAIL_ASSERT("ASSERT: %s\n", #_expr); \
} while (false)
// Debug-only output with printf-style formatting.
#define NONPAL_TRACE(_format, ...) do { \
if (NONPAL_TRACE_ENABLED) { fprintf(stdout, "NONPAL_TRACE: " _format, ## __VA_ARGS__); fflush(stdout); } \
} while (false)
#else // _DEBUG
#define NONPAL_TRACE_ENABLED false
#define NONPAL_ASSERT(_msg, ...)
#define NONPAL_ASSERTE(_expr)
#define NONPAL_TRACE(_format, ...)
#endif // _DEBUG
class MachMessage;
// Contains all the exception and thread state information needed to forward the exception.
struct MachExceptionInfo
{
exception_type_t ExceptionType;
mach_msg_type_number_t SubcodeCount;
mach_exception_data_type_t Subcodes[2];
#if defined(HOST_AMD64)
x86_thread_state_t ThreadState;
x86_float_state_t FloatState;
x86_debug_state_t DebugState;
#elif defined(HOST_ARM64)
arm_thread_state64_t ThreadState;
arm_neon_state64_t FloatState;
arm_debug_state64_t DebugState;
#else
#error Unexpected architecture
#endif
MachExceptionInfo(mach_port_t thread, MachMessage& message);
void RestoreState(mach_port_t thread);
};
// Abstraction of a subset of Mach message types. Provides accessors that hide the subtle differences in the
// message layout of similar message types.
class MachMessage
{
public:
// The message types handled by this class. The values are the actual type codes set in the Mach message
// header.
enum MessageType
{
SET_THREAD_MESSAGE_ID = 1,
FORWARD_EXCEPTION_MESSAGE_ID = 2,
NOTIFY_SEND_ONCE_MESSAGE_ID = 71,
EXCEPTION_RAISE_64_MESSAGE_ID = 2405,
EXCEPTION_RAISE_STATE_64_MESSAGE_ID = 2406,
EXCEPTION_RAISE_STATE_IDENTITY_64_MESSAGE_ID = 2407,
EXCEPTION_RAISE_REPLY_64_MESSAGE_ID = 2505,
EXCEPTION_RAISE_STATE_REPLY_64_MESSAGE_ID = 2506,
EXCEPTION_RAISE_STATE_IDENTITY_REPLY_64_MESSAGE_ID = 2507
};
// Construct an empty message. Use Receive() to form a message that can be inspected or SendSetThread(),
// ForwardNotification() or ReplyToNotification() to construct a message and sent it.
MachMessage();
// Listen for the next message on the given port and initialize this class with the contents. The message
// type must match one of the MessageTypes indicated above (or the process will be aborted).
void Receive(mach_port_t hPort);
// Indicate whether a received message belongs to a particular semantic class.
bool IsSetThreadRequest(); // Message is a request to set the context of a particular thread
bool IsForwardExceptionRequest(); // Message is a request to forward the exception
bool IsSendOnceDestroyedNotify(); // Message is a notification that a send-once message was destroyed by the receiver
bool IsExceptionNotification(); // Message is a notification of an exception
bool IsExceptionReply(); // Message is a reply to the notification of an exception
// Get properties of a received message header.
MessageType GetMessageType(); // The message type
const char *GetMessageTypeName(); // An ASCII representation of the message type for logging purposes
mach_port_t GetLocalPort(); // The destination port the message was sent to
mach_port_t GetRemotePort(); // The source port the message came from (if a reply is expected)
// Get the properties of a set thread request. Fills in the provided context structure with the context
// from the message and returns the target thread to which the context should be applied.
thread_act_t GetThreadContext(CONTEXT *pContext);
// Returns the pal thread instance for the forward exception message
CPalThread *GetPalThread();
// Returns the exception info from the forward exception message
MachExceptionInfo *GetExceptionInfo();
// Get properties of the type-specific portion of the message. The following properties are supported by
// exception notification messages only.
thread_act_t GetThread(); // Get the faulting thread
exception_type_t GetException(); // Get the exception type (e.g. EXC_BAD_ACCESS)
int GetExceptionCodeCount(); // Get the number of exception sub-codes
mach_exception_data_type_t GetExceptionCode(int iIndex); // Get the exception sub-code at the given index
// Fetch the thread state flavor from a notification or reply message (return THREAD_STATE_NONE for the
// messages that don't contain a thread state).
thread_state_flavor_t GetThreadStateFlavor();
// Get the thread state with the given flavor from the exception or exception reply message. If the
// message doesn't contain a thread state or the flavor of the state in the message doesn't match, the
// state will be fetched directly from the target thread instead (which can be computed implicitly for
// exception messages or passed explicitly for reply messages).
mach_msg_type_number_t GetThreadState(thread_state_flavor_t eFlavor, thread_state_t pState, thread_act_t thread = NULL);
// Fetch the return code from a reply type message.
kern_return_t GetReturnCode();
// Initialize and send a request to set the register context of a particular thread.
void SendSetThread(mach_port_t hServerPort, CONTEXT *pContext);
// Initialize and send a request to forward the exception message to the notification thread
void SendForwardException(mach_port_t hServerPort, MachExceptionInfo *pExceptionInfo, CPalThread *ppalThread);
// Initialize the message (overwriting any previous content) to represent a forwarded version of the given
// exception notification message and send that message to the chain-back handler previously registered
// for the exception type being notified. The new message takes account of the fact that the target
// handler may not have requested the same notification behavior or flavor as our handler.
void ForwardNotification(MachExceptionHandler *pHandler, MachMessage& message);
// Initialize the message (overwriting any previous content) to represent a reply to the given exception
// notification and send that reply back to the original sender of the notification. This is used when our
// handler handles the exception rather than forwarding it to a chain-back handler.
void ReplyToNotification(MachMessage& message, kern_return_t eResult);
private:
// The maximum size in bytes of any Mach message we can send or receive. Calculating an exact size for
// this is non trivial (basically because of the security trailers that Mach appends) but the current
// value has proven to be more than enough so far.
static const size_t kcbMaxMessageSize = 1500;
// The following are structures describing the formats of the Mach messages we understand.
// Request to set the register context on a particular thread.
// SET_THREAD_MESSAGE_ID
struct set_thread_request_t
{
thread_act_t thread;
CONTEXT new_context;
};
// Request to forward the exception notification
// FORWARD_EXCEPTION_MESSAGE_ID
struct forward_exception_request_t
{
thread_act_t thread;
CPalThread *ppalThread;
MachExceptionInfo exception_info;
};
#pragma pack(4)
// EXCEPTION_RAISE_64_MESSAGE_ID
struct exception_raise_notification_64_t
{
mach_msg_body_t msgh_body;
mach_msg_port_descriptor_t thread_port;
mach_msg_port_descriptor_t task_port;
NDR_record_t ndr;
exception_type_t exception;
mach_msg_type_number_t code_count;
mach_exception_data_type_t code[2];
};
// EXCEPTION_RAISE_REPLY_64_MESSAGE_ID
struct exception_raise_reply_64_t
{
NDR_record_t ndr;
kern_return_t ret;
};
// EXCEPTION_RAISE_STATE_64_MESSAGE_ID
struct exception_raise_state_notification_64_t
{
NDR_record_t ndr;
exception_type_t exception;
mach_msg_type_number_t code_count;
mach_exception_data_type_t code[2];
thread_state_flavor_t flavor;
mach_msg_type_number_t old_state_count;
natural_t old_state[THREAD_STATE_MAX];
};
// EXCEPTION_RAISE_STATE_REPLY_64_MESSAGE_ID
struct exception_raise_state_reply_64_t
{
NDR_record_t ndr;
kern_return_t ret;
thread_state_flavor_t flavor;
mach_msg_type_number_t new_state_count;
natural_t new_state[THREAD_STATE_MAX];
};
// EXCEPTION_RAISE_STATE_IDENTITY_64_MESSAGE_ID
struct exception_raise_state_identity_notification_64_t
{
mach_msg_body_t msgh_body;
mach_msg_port_descriptor_t thread_port;
mach_msg_port_descriptor_t task_port;
NDR_record_t ndr;
exception_type_t exception;
mach_msg_type_number_t code_count;
mach_exception_data_type_t code[2];
thread_state_flavor_t flavor;
mach_msg_type_number_t old_state_count;
natural_t old_state[THREAD_STATE_MAX];
};
// EXCEPTION_RAISE_STATE_IDENTITY_REPLY_64_MESSAGE_ID
struct exception_raise_state_identity_reply_64_t
{
NDR_record_t ndr;
kern_return_t ret;
thread_state_flavor_t flavor;
mach_msg_type_number_t new_state_count;
natural_t new_state[THREAD_STATE_MAX];
};
#pragma pack()
// All the above messages are sent with a standard Mach header prepended. This structure unifies the
// message formats.
struct mach_message_t
{
mach_msg_header_t header;
union
{
set_thread_request_t set_thread;
forward_exception_request_t forward_exception;
exception_raise_notification_64_t raise_64;
exception_raise_state_notification_64_t raise_state_64;
exception_raise_state_identity_notification_64_t raise_state_identity_64;
exception_raise_reply_64_t raise_reply_64;
exception_raise_state_reply_64_t raise_state_reply_64;
exception_raise_state_identity_reply_64_t raise_state_identity_reply_64;
} data;
} __attribute__((packed));;
// Re-initializes this data structure (to the same state as default construction, containing no message).
void ResetMessage();
// Initialize those fields of a message that are invariant. This method expects that the msgh_id field has
// been filled in prior to the call so it can determine which non-header fields to initialize.
void InitFixedFields();
// Initialize the size field of the message header (msgh_size) based on the message type and other fields.
// This should be called after all other fields have been initialized.
void InitMessageSize();
// Do the work of getting ports from the message.
// * fCalculate -- calculate the thread port if the message did not contain it.
// * fValidate -- failfast if the message was not one expected to have a (calculable) thread port.
void GetPorts(bool fCalculate, bool fValidThread);
// Given a thread's register context, locate and return the Mach port representing that thread. Only the
// x86_THREAD_STATE and x86_THREAD_STATE32 state flavors are supported.
thread_act_t GetThreadFromState(thread_state_flavor_t eFlavor, thread_state_t pState);
// Transform an exception handler behavior type into the corresponding Mach message ID for the
// notification.
mach_msg_id_t MapBehaviorToNotificationType(exception_behavior_t eBehavior);
// Transform a Mach message ID for an exception notification into the corresponding ID for the reply.
mach_msg_id_t MapNotificationToReplyType(mach_msg_id_t eNotificationType);
// The following methods initialize fields on the message prior to transmission. Each is valid for either
// notification, replies or both. If a particular setter is defined for replies, say, then it will be a
// no-op for any replies which don't contain that field. This makes transforming between notifications and
// replies of different types simpler (we can copy a super-set of all fields between the two, but only
// those operations that make sense will do any work).
// Defined for notifications:
void SetThread(thread_act_t thread);
void SetException(exception_type_t eException);
void SetExceptionCodeCount(int cCodes);
void SetExceptionCode(int iIndex, mach_exception_data_type_t iCode);
// Defined for replies:
void SetReturnCode(kern_return_t eReturnCode);
// Defined for both notifications and replies.
void SetThreadState(thread_state_flavor_t eFlavor, thread_state_t pState, mach_msg_type_number_t count);
// Maximally sized buffer for the message to be received into or transmitted out of this class.
unsigned char m_rgMessageBuffer[kcbMaxMessageSize];
// Initialized by ResetMessage() to point to the buffer above. Gives a typed view of the encapsulated Mach
// message.
mach_message_t *m_pMessage;
// Cached value of GetThread() or MACH_PORT_NULL if that has not been computed yet.
thread_act_t m_hThread;
// Cached value of the task port or MACH_PORT_NULL if the message doesn't have one.
mach_port_t m_hTask;
// Considered whether we are responsible for the deallocation of the ports in
// this message. It is true for messages we receive, and false for messages we send.
bool m_fPortsOwned;
};
#endif // HAVE_MACH_EXCEPTIONS
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*++
Module Name:
machmessage.h
Abstract:
Abstraction over Mach messages used during exception handling.
--*/
#include <mach/mach.h>
#include <mach/mach_error.h>
#include <mach/thread_status.h>
using namespace CorUnix;
#if HAVE_MACH_EXCEPTIONS
// The vast majority of Mach calls we make in this module are critical: we cannot recover from failures of
// these methods (principally because we're handling hardware exceptions in the context of a single dedicated
// handler thread). The following macro encapsulates checking the return code from Mach methods and emitting
// some useful data and aborting the process on failure.
#define CHECK_MACH(_msg, machret) do { \
if (machret != KERN_SUCCESS) \
{ \
char _szError[1024]; \
snprintf(_szError, ARRAY_SIZE(_szError), "%s: %u: %s", __FUNCTION__, __LINE__, _msg); \
mach_error(_szError, machret); \
abort(); \
} \
} while (false)
// This macro terminates the process with some useful debug info as above, but for the general failure points
// that have nothing to do with Mach.
#define NONPAL_RETAIL_ASSERT(_msg, ...) do { \
fprintf(stdout, "%s: %u: " _msg "\n", __FUNCTION__, __LINE__, ## __VA_ARGS__); \
fflush(stdout); \
abort(); \
} while (false)
#define NONPAL_RETAIL_ASSERTE(_expr) do { \
if (!(_expr)) \
NONPAL_RETAIL_ASSERT("ASSERT: %s\n", #_expr); \
} while (false)
#ifdef _DEBUG
#define NONPAL_TRACE_ENABLED EnvironGetenv("NONPAL_TRACING", /* copyValue */ false)
#define NONPAL_ASSERT(_msg, ...) NONPAL_RETAIL_ASSERT(_msg, __VA_ARGS__)
// Assert macro that doesn't rely on the PAL.
#define NONPAL_ASSERTE(_expr) do { \
if (!(_expr)) \
NONPAL_RETAIL_ASSERT("ASSERT: %s\n", #_expr); \
} while (false)
// Debug-only output with printf-style formatting.
#define NONPAL_TRACE(_format, ...) do { \
if (NONPAL_TRACE_ENABLED) { fprintf(stdout, "NONPAL_TRACE: " _format, ## __VA_ARGS__); fflush(stdout); } \
} while (false)
#else // _DEBUG
#define NONPAL_TRACE_ENABLED false
#define NONPAL_ASSERT(_msg, ...)
#define NONPAL_ASSERTE(_expr)
#define NONPAL_TRACE(_format, ...)
#endif // _DEBUG
class MachMessage;
// Contains all the exception and thread state information needed to forward the exception.
struct MachExceptionInfo
{
exception_type_t ExceptionType;
mach_msg_type_number_t SubcodeCount;
mach_exception_data_type_t Subcodes[2];
#if defined(HOST_AMD64)
x86_thread_state_t ThreadState;
x86_float_state_t FloatState;
x86_debug_state_t DebugState;
#elif defined(HOST_ARM64)
arm_thread_state64_t ThreadState;
arm_neon_state64_t FloatState;
arm_debug_state64_t DebugState;
#else
#error Unexpected architecture
#endif
MachExceptionInfo(mach_port_t thread, MachMessage& message);
void RestoreState(mach_port_t thread);
};
// Abstraction of a subset of Mach message types. Provides accessors that hide the subtle differences in the
// message layout of similar message types.
class MachMessage
{
public:
// The message types handled by this class. The values are the actual type codes set in the Mach message
// header.
enum MessageType
{
SET_THREAD_MESSAGE_ID = 1,
FORWARD_EXCEPTION_MESSAGE_ID = 2,
NOTIFY_SEND_ONCE_MESSAGE_ID = 71,
EXCEPTION_RAISE_64_MESSAGE_ID = 2405,
EXCEPTION_RAISE_STATE_64_MESSAGE_ID = 2406,
EXCEPTION_RAISE_STATE_IDENTITY_64_MESSAGE_ID = 2407,
EXCEPTION_RAISE_REPLY_64_MESSAGE_ID = 2505,
EXCEPTION_RAISE_STATE_REPLY_64_MESSAGE_ID = 2506,
EXCEPTION_RAISE_STATE_IDENTITY_REPLY_64_MESSAGE_ID = 2507
};
// Construct an empty message. Use Receive() to form a message that can be inspected or SendSetThread(),
// ForwardNotification() or ReplyToNotification() to construct a message and sent it.
MachMessage();
// Listen for the next message on the given port and initialize this class with the contents. The message
// type must match one of the MessageTypes indicated above (or the process will be aborted).
void Receive(mach_port_t hPort);
// Indicate whether a received message belongs to a particular semantic class.
bool IsSetThreadRequest(); // Message is a request to set the context of a particular thread
bool IsForwardExceptionRequest(); // Message is a request to forward the exception
bool IsSendOnceDestroyedNotify(); // Message is a notification that a send-once message was destroyed by the receiver
bool IsExceptionNotification(); // Message is a notification of an exception
bool IsExceptionReply(); // Message is a reply to the notification of an exception
// Get properties of a received message header.
MessageType GetMessageType(); // The message type
const char *GetMessageTypeName(); // An ASCII representation of the message type for logging purposes
mach_port_t GetLocalPort(); // The destination port the message was sent to
mach_port_t GetRemotePort(); // The source port the message came from (if a reply is expected)
// Get the properties of a set thread request. Fills in the provided context structure with the context
// from the message and returns the target thread to which the context should be applied.
thread_act_t GetThreadContext(CONTEXT *pContext);
// Returns the pal thread instance for the forward exception message
CPalThread *GetPalThread();
// Returns the exception info from the forward exception message
MachExceptionInfo *GetExceptionInfo();
// Get properties of the type-specific portion of the message. The following properties are supported by
// exception notification messages only.
thread_act_t GetThread(); // Get the faulting thread
exception_type_t GetException(); // Get the exception type (e.g. EXC_BAD_ACCESS)
int GetExceptionCodeCount(); // Get the number of exception sub-codes
mach_exception_data_type_t GetExceptionCode(int iIndex); // Get the exception sub-code at the given index
// Fetch the thread state flavor from a notification or reply message (return THREAD_STATE_NONE for the
// messages that don't contain a thread state).
thread_state_flavor_t GetThreadStateFlavor();
// Get the thread state with the given flavor from the exception or exception reply message. If the
// message doesn't contain a thread state or the flavor of the state in the message doesn't match, the
// state will be fetched directly from the target thread instead (which can be computed implicitly for
// exception messages or passed explicitly for reply messages).
mach_msg_type_number_t GetThreadState(thread_state_flavor_t eFlavor, thread_state_t pState, thread_act_t thread = NULL);
// Fetch the return code from a reply type message.
kern_return_t GetReturnCode();
// Initialize and send a request to set the register context of a particular thread.
void SendSetThread(mach_port_t hServerPort, CONTEXT *pContext);
// Initialize and send a request to forward the exception message to the notification thread
void SendForwardException(mach_port_t hServerPort, MachExceptionInfo *pExceptionInfo, CPalThread *ppalThread);
// Initialize the message (overwriting any previous content) to represent a forwarded version of the given
// exception notification message and send that message to the chain-back handler previously registered
// for the exception type being notified. The new message takes account of the fact that the target
// handler may not have requested the same notification behavior or flavor as our handler.
void ForwardNotification(MachExceptionHandler *pHandler, MachMessage& message);
// Initialize the message (overwriting any previous content) to represent a reply to the given exception
// notification and send that reply back to the original sender of the notification. This is used when our
// handler handles the exception rather than forwarding it to a chain-back handler.
void ReplyToNotification(MachMessage& message, kern_return_t eResult);
private:
// The maximum size in bytes of any Mach message we can send or receive. Calculating an exact size for
// this is non trivial (basically because of the security trailers that Mach appends) but the current
// value has proven to be more than enough so far.
static const size_t kcbMaxMessageSize = 1500;
// The following are structures describing the formats of the Mach messages we understand.
// Request to set the register context on a particular thread.
// SET_THREAD_MESSAGE_ID
struct set_thread_request_t
{
thread_act_t thread;
CONTEXT new_context;
};
// Request to forward the exception notification
// FORWARD_EXCEPTION_MESSAGE_ID
struct forward_exception_request_t
{
thread_act_t thread;
CPalThread *ppalThread;
MachExceptionInfo exception_info;
};
#pragma pack(4)
// EXCEPTION_RAISE_64_MESSAGE_ID
struct exception_raise_notification_64_t
{
mach_msg_body_t msgh_body;
mach_msg_port_descriptor_t thread_port;
mach_msg_port_descriptor_t task_port;
NDR_record_t ndr;
exception_type_t exception;
mach_msg_type_number_t code_count;
mach_exception_data_type_t code[2];
};
// EXCEPTION_RAISE_REPLY_64_MESSAGE_ID
struct exception_raise_reply_64_t
{
NDR_record_t ndr;
kern_return_t ret;
};
// EXCEPTION_RAISE_STATE_64_MESSAGE_ID
struct exception_raise_state_notification_64_t
{
NDR_record_t ndr;
exception_type_t exception;
mach_msg_type_number_t code_count;
mach_exception_data_type_t code[2];
thread_state_flavor_t flavor;
mach_msg_type_number_t old_state_count;
natural_t old_state[THREAD_STATE_MAX];
};
// EXCEPTION_RAISE_STATE_REPLY_64_MESSAGE_ID
struct exception_raise_state_reply_64_t
{
NDR_record_t ndr;
kern_return_t ret;
thread_state_flavor_t flavor;
mach_msg_type_number_t new_state_count;
natural_t new_state[THREAD_STATE_MAX];
};
// EXCEPTION_RAISE_STATE_IDENTITY_64_MESSAGE_ID
struct exception_raise_state_identity_notification_64_t
{
mach_msg_body_t msgh_body;
mach_msg_port_descriptor_t thread_port;
mach_msg_port_descriptor_t task_port;
NDR_record_t ndr;
exception_type_t exception;
mach_msg_type_number_t code_count;
mach_exception_data_type_t code[2];
thread_state_flavor_t flavor;
mach_msg_type_number_t old_state_count;
natural_t old_state[THREAD_STATE_MAX];
};
// EXCEPTION_RAISE_STATE_IDENTITY_REPLY_64_MESSAGE_ID
struct exception_raise_state_identity_reply_64_t
{
NDR_record_t ndr;
kern_return_t ret;
thread_state_flavor_t flavor;
mach_msg_type_number_t new_state_count;
natural_t new_state[THREAD_STATE_MAX];
};
#pragma pack()
// All the above messages are sent with a standard Mach header prepended. This structure unifies the
// message formats.
struct mach_message_t
{
mach_msg_header_t header;
union
{
set_thread_request_t set_thread;
forward_exception_request_t forward_exception;
exception_raise_notification_64_t raise_64;
exception_raise_state_notification_64_t raise_state_64;
exception_raise_state_identity_notification_64_t raise_state_identity_64;
exception_raise_reply_64_t raise_reply_64;
exception_raise_state_reply_64_t raise_state_reply_64;
exception_raise_state_identity_reply_64_t raise_state_identity_reply_64;
} data;
} __attribute__((packed));;
// Re-initializes this data structure (to the same state as default construction, containing no message).
void ResetMessage();
// Initialize those fields of a message that are invariant. This method expects that the msgh_id field has
// been filled in prior to the call so it can determine which non-header fields to initialize.
void InitFixedFields();
// Initialize the size field of the message header (msgh_size) based on the message type and other fields.
// This should be called after all other fields have been initialized.
void InitMessageSize();
// Do the work of getting ports from the message.
// * fCalculate -- calculate the thread port if the message did not contain it.
// * fValidate -- failfast if the message was not one expected to have a (calculable) thread port.
void GetPorts(bool fCalculate, bool fValidThread);
// Given a thread's register context, locate and return the Mach port representing that thread. Only the
// x86_THREAD_STATE and x86_THREAD_STATE32 state flavors are supported.
thread_act_t GetThreadFromState(thread_state_flavor_t eFlavor, thread_state_t pState);
// Transform an exception handler behavior type into the corresponding Mach message ID for the
// notification.
mach_msg_id_t MapBehaviorToNotificationType(exception_behavior_t eBehavior);
// Transform a Mach message ID for an exception notification into the corresponding ID for the reply.
mach_msg_id_t MapNotificationToReplyType(mach_msg_id_t eNotificationType);
// The following methods initialize fields on the message prior to transmission. Each is valid for either
// notification, replies or both. If a particular setter is defined for replies, say, then it will be a
// no-op for any replies which don't contain that field. This makes transforming between notifications and
// replies of different types simpler (we can copy a super-set of all fields between the two, but only
// those operations that make sense will do any work).
// Defined for notifications:
void SetThread(thread_act_t thread);
void SetException(exception_type_t eException);
void SetExceptionCodeCount(int cCodes);
void SetExceptionCode(int iIndex, mach_exception_data_type_t iCode);
// Defined for replies:
void SetReturnCode(kern_return_t eReturnCode);
// Defined for both notifications and replies.
void SetThreadState(thread_state_flavor_t eFlavor, thread_state_t pState, mach_msg_type_number_t count);
// Maximally sized buffer for the message to be received into or transmitted out of this class.
unsigned char m_rgMessageBuffer[kcbMaxMessageSize];
// Initialized by ResetMessage() to point to the buffer above. Gives a typed view of the encapsulated Mach
// message.
mach_message_t *m_pMessage;
// Cached value of GetThread() or MACH_PORT_NULL if that has not been computed yet.
thread_act_t m_hThread;
// Cached value of the task port or MACH_PORT_NULL if the message doesn't have one.
mach_port_t m_hTask;
// Considered whether we are responsible for the deallocation of the ports in
// this message. It is true for messages we receive, and false for messages we send.
bool m_fPortsOwned;
};
#endif // HAVE_MACH_EXCEPTIONS
| -1 |
dotnet/runtime | 65,922 | Rely on PGO for isinst/castclass | Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | EgorBo | "2022-02-26T22:13:46Z" | "2022-04-01T21:25:56Z" | 063d08018602291f8dbd9d9b6055b8e1af874cb8 | f249a3d58b0b2bb55adca1a56c5a39cf446437b7 | Rely on PGO for isinst/castclass. Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | ./src/coreclr/pal/tests/palsuite/miscellaneous/GetEnvironmentVariableA/test3/test.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================
**
** Source : test.c
**
** Purpose: Test for GetEnvironmentVariable() function
** Pass a nonexisting environment variable and a null to
** the function to check return values.
**
**
**=========================================================*/
#include <palsuite.h>
#define BUFFER_SIZE 5000
#define SMALL_BUFFER_SIZE 5
PALTEST(miscellaneous_GetEnvironmentVariableA_test3_paltest_getenvironmentvariablea_test3, "miscellaneous/GetEnvironmentVariableA/test3/paltest_getenvironmentvariablea_test3")
{
int ReturnValueForNonExisting = 0;
int ReturnValueForNull = 0;
char pResultBuffer[BUFFER_SIZE];
char pSmallBuffer[SMALL_BUFFER_SIZE];
if(0 != (PAL_Initialize(argc, argv)))
{
return FAIL;
}
/* This variable doesn't exist, it should return 0 */
ReturnValueForNonExisting = GetEnvironmentVariable("NonExistingVariable",
pSmallBuffer,
SMALL_BUFFER_SIZE);
if(ReturnValueForNonExisting != 0)
{
Fail("ERROR: The return should have been 0, but it was %d. "
"The function attempted to get an Environment Variable that "
"doesn't exist and should return 0 as a result.\n",
ReturnValueForNonExisting);
}
/* Passing a NULL string should return 0 */
ReturnValueForNull = GetEnvironmentVariable(NULL,
pResultBuffer,
BUFFER_SIZE);
if(ReturnValueForNull != 0)
{
Fail("ERROR: The return should have been 0, but it was %d. "
"The function attempted to get a NULL pointer and should return "
"0 as a result.\n",ReturnValueForNull);
}
PAL_Terminate();
return PASS;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================
**
** Source : test.c
**
** Purpose: Test for GetEnvironmentVariable() function
** Pass a nonexisting environment variable and a null to
** the function to check return values.
**
**
**=========================================================*/
#include <palsuite.h>
#define BUFFER_SIZE 5000
#define SMALL_BUFFER_SIZE 5
PALTEST(miscellaneous_GetEnvironmentVariableA_test3_paltest_getenvironmentvariablea_test3, "miscellaneous/GetEnvironmentVariableA/test3/paltest_getenvironmentvariablea_test3")
{
int ReturnValueForNonExisting = 0;
int ReturnValueForNull = 0;
char pResultBuffer[BUFFER_SIZE];
char pSmallBuffer[SMALL_BUFFER_SIZE];
if(0 != (PAL_Initialize(argc, argv)))
{
return FAIL;
}
/* This variable doesn't exist, it should return 0 */
ReturnValueForNonExisting = GetEnvironmentVariable("NonExistingVariable",
pSmallBuffer,
SMALL_BUFFER_SIZE);
if(ReturnValueForNonExisting != 0)
{
Fail("ERROR: The return should have been 0, but it was %d. "
"The function attempted to get an Environment Variable that "
"doesn't exist and should return 0 as a result.\n",
ReturnValueForNonExisting);
}
/* Passing a NULL string should return 0 */
ReturnValueForNull = GetEnvironmentVariable(NULL,
pResultBuffer,
BUFFER_SIZE);
if(ReturnValueForNull != 0)
{
Fail("ERROR: The return should have been 0, but it was %d. "
"The function attempted to get a NULL pointer and should return "
"0 as a result.\n",ReturnValueForNull);
}
PAL_Terminate();
return PASS;
}
| -1 |
dotnet/runtime | 65,922 | Rely on PGO for isinst/castclass | Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | EgorBo | "2022-02-26T22:13:46Z" | "2022-04-01T21:25:56Z" | 063d08018602291f8dbd9d9b6055b8e1af874cb8 | f249a3d58b0b2bb55adca1a56c5a39cf446437b7 | Rely on PGO for isinst/castclass. Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | ./src/coreclr/pal/tests/palsuite/threading/ExitThread/test3/test3.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*=============================================================================
**
** Source: test3.c
**
** Purpose: Test to ensure ExitThread() results in any loaded dynamic
** libraries having their entry point called with a THREAD_DETACH
** notification.
**
** Dependencies: PAL_Initialize
** PAL_Terminate
** Fail
** GetCurrentDirectoryW
** CreateThread
** ResumeThread
** LoadLibrary
** FreeLibrary
** GetProcAddress
** WaitForSingleObject
** GetLastError
** strlen
** strncpy
**
**
**===========================================================================*/
#include <palsuite.h>
/* SHLEXT is defined only for Unix variants */
#if defined(SHLEXT)
#define rgchLibraryFile "dllmain"SHLEXT
#define szFunction "GetDetachCount"
#else
#define rgchLibraryFile "dllmain"
#define szFunction "_GetDetachCount@0"
#endif
/* define our test function type */
typedef int ( PALAPI *LPTESTFUNC )( void );
/**
* ThreadFunc
*
* Dummy thread function for causing DLL thread notifications.
*/
DWORD PALAPI ThreadFunc_ExitThread_test3( LPVOID param )
{
/* simulate some brief "work" */
int i;
for( i=0; i<100000; i++ )
;
ExitThread( 0 );
return (0);
}
/* main program entry point */
int __cdecl main( int argc, char **argv )
{
/* local variables */
HANDLE hLib = NULL;
LPTESTFUNC pFunc;
int detachCount1 = 0;
int detachCount2 = 0;
HANDLE hThread = NULL;
DWORD IDThread;
/* initialize the PAL */
if( PAL_Initialize(argc, argv) != 0 )
{
return( FAIL );
}
/* Load the test library */
hLib = LoadLibrary( rgchLibraryFile );
if(hLib == NULL)
{
Fail("ERROR: Unable to load library %s\n", rgchLibraryFile );
}
/* Get the address of our test function in the dll */
pFunc = (LPTESTFUNC)GetProcAddress( hLib, szFunction );
if( pFunc == NULL )
{
Trace( "ERROR:%lu%:Unable to load function \"%s\" library \"%s\"\n",
GetLastError(),
szFunction,
rgchLibraryFile );
if( ! FreeLibrary( hLib ) ) {
Trace( "FreeLibrary() failed with error code %lu\n",
GetLastError() );
}
Fail( "Exiting\n" );
}
/* Execute the test function to get the detach count */
detachCount1 = pFunc();
/* run another dummy thread to cause notification of the library */
hThread = CreateThread( NULL, /* no security attributes */
0, /* use default stack size */
(LPTHREAD_START_ROUTINE) ThreadFunc_ExitThread_test3, /* thread function */
(LPVOID) NULL, /* pass thread index as */
/* function argument */
CREATE_SUSPENDED, /* create suspended */
&IDThread ); /* returns thread id */
/* Check the return value for success. */
if( hThread == NULL )
{
/* error creating thread */
Trace( "Unexpected CreateThread error %d\n",
GetLastError() );
if( ! FreeLibrary( hLib ) ) {
Trace( "FreeLibrary() failed with error code %lu\n",
GetLastError() );
}
Fail( "Exiting\n" );
}
/* Resume the suspended thread */
ResumeThread( hThread );
/* wait for the thread to complete */
WaitForSingleObject( hThread, INFINITE );
/* Execute the test function to get the new detach count */
detachCount2 = pFunc();
/* Unload the test library */
if( !FreeLibrary( hLib ) )
{
Fail( "ERROR:%u: Unable to free library \"%s\"\n",
GetLastError(),
rgchLibraryFile );
}
/* validate the result */
if( detachCount2 != (detachCount1 + 1) )
{
Fail( "FAIL: unexpected DLL detach count %d, expected %d\n",
detachCount2,
(detachCount1 + 1) );
}
/* terminate the PAL */
PAL_Terminate();
/* return success */
return PASS;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*=============================================================================
**
** Source: test3.c
**
** Purpose: Test to ensure ExitThread() results in any loaded dynamic
** libraries having their entry point called with a THREAD_DETACH
** notification.
**
** Dependencies: PAL_Initialize
** PAL_Terminate
** Fail
** GetCurrentDirectoryW
** CreateThread
** ResumeThread
** LoadLibrary
** FreeLibrary
** GetProcAddress
** WaitForSingleObject
** GetLastError
** strlen
** strncpy
**
**
**===========================================================================*/
#include <palsuite.h>
/* SHLEXT is defined only for Unix variants */
#if defined(SHLEXT)
#define rgchLibraryFile "dllmain"SHLEXT
#define szFunction "GetDetachCount"
#else
#define rgchLibraryFile "dllmain"
#define szFunction "_GetDetachCount@0"
#endif
/* define our test function type */
typedef int ( PALAPI *LPTESTFUNC )( void );
/**
* ThreadFunc
*
* Dummy thread function for causing DLL thread notifications.
*/
DWORD PALAPI ThreadFunc_ExitThread_test3( LPVOID param )
{
/* simulate some brief "work" */
int i;
for( i=0; i<100000; i++ )
;
ExitThread( 0 );
return (0);
}
/* main program entry point */
int __cdecl main( int argc, char **argv )
{
/* local variables */
HANDLE hLib = NULL;
LPTESTFUNC pFunc;
int detachCount1 = 0;
int detachCount2 = 0;
HANDLE hThread = NULL;
DWORD IDThread;
/* initialize the PAL */
if( PAL_Initialize(argc, argv) != 0 )
{
return( FAIL );
}
/* Load the test library */
hLib = LoadLibrary( rgchLibraryFile );
if(hLib == NULL)
{
Fail("ERROR: Unable to load library %s\n", rgchLibraryFile );
}
/* Get the address of our test function in the dll */
pFunc = (LPTESTFUNC)GetProcAddress( hLib, szFunction );
if( pFunc == NULL )
{
Trace( "ERROR:%lu%:Unable to load function \"%s\" library \"%s\"\n",
GetLastError(),
szFunction,
rgchLibraryFile );
if( ! FreeLibrary( hLib ) ) {
Trace( "FreeLibrary() failed with error code %lu\n",
GetLastError() );
}
Fail( "Exiting\n" );
}
/* Execute the test function to get the detach count */
detachCount1 = pFunc();
/* run another dummy thread to cause notification of the library */
hThread = CreateThread( NULL, /* no security attributes */
0, /* use default stack size */
(LPTHREAD_START_ROUTINE) ThreadFunc_ExitThread_test3, /* thread function */
(LPVOID) NULL, /* pass thread index as */
/* function argument */
CREATE_SUSPENDED, /* create suspended */
&IDThread ); /* returns thread id */
/* Check the return value for success. */
if( hThread == NULL )
{
/* error creating thread */
Trace( "Unexpected CreateThread error %d\n",
GetLastError() );
if( ! FreeLibrary( hLib ) ) {
Trace( "FreeLibrary() failed with error code %lu\n",
GetLastError() );
}
Fail( "Exiting\n" );
}
/* Resume the suspended thread */
ResumeThread( hThread );
/* wait for the thread to complete */
WaitForSingleObject( hThread, INFINITE );
/* Execute the test function to get the new detach count */
detachCount2 = pFunc();
/* Unload the test library */
if( !FreeLibrary( hLib ) )
{
Fail( "ERROR:%u: Unable to free library \"%s\"\n",
GetLastError(),
rgchLibraryFile );
}
/* validate the result */
if( detachCount2 != (detachCount1 + 1) )
{
Fail( "FAIL: unexpected DLL detach count %d, expected %d\n",
detachCount2,
(detachCount1 + 1) );
}
/* terminate the PAL */
PAL_Terminate();
/* return success */
return PASS;
}
| -1 |
dotnet/runtime | 65,922 | Rely on PGO for isinst/castclass | Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | EgorBo | "2022-02-26T22:13:46Z" | "2022-04-01T21:25:56Z" | 063d08018602291f8dbd9d9b6055b8e1af874cb8 | f249a3d58b0b2bb55adca1a56c5a39cf446437b7 | Rely on PGO for isinst/castclass. Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | ./src/coreclr/inc/debugreturn.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#ifndef _DEBUGRETURN_H_
#define _DEBUGRETURN_H_
// Note that with OACR Prefast is run over checked (_DEBUG is defined) sources
// so we have to first check the _PREFAST_ define followed by the _DEBUG define
//
#ifdef _PREFAST_
// Use prefast to detect gotos out of no-return blocks. The gotos out of no-return blocks
// should be reported as memory leaks by prefast. The (nothrow) is because PREfix sees the
// throw from the new statement, and doesn't like these macros used in a destructor (and
// the NULL returned by failure works just fine in delete[])
#define DEBUG_ASSURE_NO_RETURN_BEGIN(arg) { char* __noReturnInThisBlock_##arg = ::new (nothrow) char[1];
#define DEBUG_ASSURE_NO_RETURN_END(arg) ::delete[] __noReturnInThisBlock_##arg; }
#define DEBUG_OK_TO_RETURN_BEGIN(arg) { ::delete[] __noReturnInThisBlock_##arg;
#define DEBUG_OK_TO_RETURN_END(arg) __noReturnInThisBlock_##arg = ::new (nothrow) char[1]; }
#define DEBUG_ASSURE_SAFE_TO_RETURN TRUE
#define return return
#else // !_PREFAST_
// This is disabled in build 190024315 (a pre-release build after VS 2015 Update 3) and
// earlier because those builds only support C++11 constexpr, which doesn't allow the
// use of 'if' statements within the body of a constexpr function. Later builds support
// C++14 constexpr.
#if defined(_DEBUG) && !defined(JIT_BUILD) && (!defined(_MSC_FULL_VER) || _MSC_FULL_VER > 190024315)
// Code to generate a compile-time error if return statements appear where they
// shouldn't.
//
// Here's the way it works...
//
// We create two classes with a safe_to_return() method. The method is static,
// returns void, and does nothing. One class has the method as public, the other
// as private. We introduce a global scope typedef for __ReturnOK that refers to
// the class with the public method. So, by default, the expression
//
// __ReturnOK::safe_to_return()
//
// quietly compiles and does nothing. When we enter a block in which we want to
// inhibit returns, we introduce a new typedef that defines __ReturnOK as the
// class with the private method. Inside this scope,
//
// __ReturnOK::safe_to_return()
//
// generates a compile-time error.
//
// To cause the method to be called, we have to #define the return keyword.
// The simplest working version would be
//
// #define return if (0) __ReturnOK::safe_to_return(); else return
//
// but we've used
//
// #define return for (;1;__ReturnOK::safe_to_return()) return
//
// because it happens to generate somewhat faster code in a checked build. (They
// both introduce no overhead in a fastchecked build.)
//
class __SafeToReturn {
public:
static int safe_to_return() {return 0;};
static int used() {return 0;};
};
class __YouCannotUseAReturnStatementHere {
private:
// If you got here, and you're wondering what you did wrong -- you're using
// a return statement where it's not allowed. Likely, it's inside one of:
// GCPROTECT_BEGIN ... GCPROTECT_END
// HELPER_METHOD_FRAME_BEGIN ... HELPER_METHOD_FRAME_END
//
static int safe_to_return() {return 0;};
public:
// Some compilers warn if all member functions in a class are private
// or if a typedef is unused. Rather than disable the warning, we'll work
// around it here.
static int used() {return 0;};
};
typedef __SafeToReturn __ReturnOK;
// Use this to ensure that it is safe to return from a given scope
#define DEBUG_ASSURE_SAFE_TO_RETURN __ReturnOK::safe_to_return()
// Unfortunately, the only way to make this work is to #define all return statements --
// even the ones at global scope. This actually generates better code that appears.
// The call is dead, and does not appear in the generated code, even in a checked
// build. (And, in fastchecked, there is no penalty at all.)
//
#ifdef _MSC_VER
#define return if (0 && __ReturnOK::safe_to_return()) { } else return
#else // _MSC_VER
#define return for (;1;__ReturnOK::safe_to_return()) return
#endif // _MSC_VER
#define DEBUG_ASSURE_NO_RETURN_BEGIN(arg) { typedef __YouCannotUseAReturnStatementHere __ReturnOK; if (0 && __ReturnOK::used()) { } else {
#define DEBUG_ASSURE_NO_RETURN_END(arg) } }
#define DEBUG_OK_TO_RETURN_BEGIN(arg) { typedef __SafeToReturn __ReturnOK; if (0 && __ReturnOK::used()) { } else {
#define DEBUG_OK_TO_RETURN_END(arg) } }
#else // defined(_DEBUG) && !defined(JIT_BUILD) && (!defined(_MSC_FULL_VER) || _MSC_FULL_VER > 190024315)
#define DEBUG_ASSURE_SAFE_TO_RETURN TRUE
#define DEBUG_ASSURE_NO_RETURN_BEGIN(arg) {
#define DEBUG_ASSURE_NO_RETURN_END(arg) }
#define DEBUG_OK_TO_RETURN_BEGIN(arg) {
#define DEBUG_OK_TO_RETURN_END(arg) }
#endif // defined(_DEBUG) && !defined(JIT_BUILD) && (!defined(_MSC_FULL_VER) || _MSC_FULL_VER > 190024315)
#endif // !_PREFAST_
#endif // _DEBUGRETURN_H_
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#ifndef _DEBUGRETURN_H_
#define _DEBUGRETURN_H_
// Note that with OACR Prefast is run over checked (_DEBUG is defined) sources
// so we have to first check the _PREFAST_ define followed by the _DEBUG define
//
#ifdef _PREFAST_
// Use prefast to detect gotos out of no-return blocks. The gotos out of no-return blocks
// should be reported as memory leaks by prefast. The (nothrow) is because PREfix sees the
// throw from the new statement, and doesn't like these macros used in a destructor (and
// the NULL returned by failure works just fine in delete[])
#define DEBUG_ASSURE_NO_RETURN_BEGIN(arg) { char* __noReturnInThisBlock_##arg = ::new (nothrow) char[1];
#define DEBUG_ASSURE_NO_RETURN_END(arg) ::delete[] __noReturnInThisBlock_##arg; }
#define DEBUG_OK_TO_RETURN_BEGIN(arg) { ::delete[] __noReturnInThisBlock_##arg;
#define DEBUG_OK_TO_RETURN_END(arg) __noReturnInThisBlock_##arg = ::new (nothrow) char[1]; }
#define DEBUG_ASSURE_SAFE_TO_RETURN TRUE
#define return return
#else // !_PREFAST_
// This is disabled in build 190024315 (a pre-release build after VS 2015 Update 3) and
// earlier because those builds only support C++11 constexpr, which doesn't allow the
// use of 'if' statements within the body of a constexpr function. Later builds support
// C++14 constexpr.
#if defined(_DEBUG) && !defined(JIT_BUILD) && (!defined(_MSC_FULL_VER) || _MSC_FULL_VER > 190024315)
// Code to generate a compile-time error if return statements appear where they
// shouldn't.
//
// Here's the way it works...
//
// We create two classes with a safe_to_return() method. The method is static,
// returns void, and does nothing. One class has the method as public, the other
// as private. We introduce a global scope typedef for __ReturnOK that refers to
// the class with the public method. So, by default, the expression
//
// __ReturnOK::safe_to_return()
//
// quietly compiles and does nothing. When we enter a block in which we want to
// inhibit returns, we introduce a new typedef that defines __ReturnOK as the
// class with the private method. Inside this scope,
//
// __ReturnOK::safe_to_return()
//
// generates a compile-time error.
//
// To cause the method to be called, we have to #define the return keyword.
// The simplest working version would be
//
// #define return if (0) __ReturnOK::safe_to_return(); else return
//
// but we've used
//
// #define return for (;1;__ReturnOK::safe_to_return()) return
//
// because it happens to generate somewhat faster code in a checked build. (They
// both introduce no overhead in a fastchecked build.)
//
class __SafeToReturn {
public:
static int safe_to_return() {return 0;};
static int used() {return 0;};
};
class __YouCannotUseAReturnStatementHere {
private:
// If you got here, and you're wondering what you did wrong -- you're using
// a return statement where it's not allowed. Likely, it's inside one of:
// GCPROTECT_BEGIN ... GCPROTECT_END
// HELPER_METHOD_FRAME_BEGIN ... HELPER_METHOD_FRAME_END
//
static int safe_to_return() {return 0;};
public:
// Some compilers warn if all member functions in a class are private
// or if a typedef is unused. Rather than disable the warning, we'll work
// around it here.
static int used() {return 0;};
};
typedef __SafeToReturn __ReturnOK;
// Use this to ensure that it is safe to return from a given scope
#define DEBUG_ASSURE_SAFE_TO_RETURN __ReturnOK::safe_to_return()
// Unfortunately, the only way to make this work is to #define all return statements --
// even the ones at global scope. This actually generates better code that appears.
// The call is dead, and does not appear in the generated code, even in a checked
// build. (And, in fastchecked, there is no penalty at all.)
//
#ifdef _MSC_VER
#define return if (0 && __ReturnOK::safe_to_return()) { } else return
#else // _MSC_VER
#define return for (;1;__ReturnOK::safe_to_return()) return
#endif // _MSC_VER
#define DEBUG_ASSURE_NO_RETURN_BEGIN(arg) { typedef __YouCannotUseAReturnStatementHere __ReturnOK; if (0 && __ReturnOK::used()) { } else {
#define DEBUG_ASSURE_NO_RETURN_END(arg) } }
#define DEBUG_OK_TO_RETURN_BEGIN(arg) { typedef __SafeToReturn __ReturnOK; if (0 && __ReturnOK::used()) { } else {
#define DEBUG_OK_TO_RETURN_END(arg) } }
#else // defined(_DEBUG) && !defined(JIT_BUILD) && (!defined(_MSC_FULL_VER) || _MSC_FULL_VER > 190024315)
#define DEBUG_ASSURE_SAFE_TO_RETURN TRUE
#define DEBUG_ASSURE_NO_RETURN_BEGIN(arg) {
#define DEBUG_ASSURE_NO_RETURN_END(arg) }
#define DEBUG_OK_TO_RETURN_BEGIN(arg) {
#define DEBUG_OK_TO_RETURN_END(arg) }
#endif // defined(_DEBUG) && !defined(JIT_BUILD) && (!defined(_MSC_FULL_VER) || _MSC_FULL_VER > 190024315)
#endif // !_PREFAST_
#endif // _DEBUGRETURN_H_
| -1 |
dotnet/runtime | 65,922 | Rely on PGO for isinst/castclass | Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | EgorBo | "2022-02-26T22:13:46Z" | "2022-04-01T21:25:56Z" | 063d08018602291f8dbd9d9b6055b8e1af874cb8 | f249a3d58b0b2bb55adca1a56c5a39cf446437b7 | Rely on PGO for isinst/castclass. Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | ./src/coreclr/classlibnative/bcltype/oavariant.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// File: OAVariant.cpp
//
//
// Purpose: Wrapper for Ole Automation compatable math ops.
// Calls through to OleAut.dll
//
//
#include <common.h>
#ifdef FEATURE_COMINTEROP
#include <oleauto.h>
#include "excep.h"
#include "oavariant.h"
#include "comdatetime.h" // DateTime <-> OleAut date conversions
#include "interoputil.h"
#include "interopconverter.h"
#include "excep.h"
#include "string.h"
#include "comutilnative.h" // for COMDate
#define INVALID_MAPPING (BYTE)(-1)
static const BYTE CVtoVTTable [] =
{
VT_EMPTY, // CV_EMPTY
VT_VOID, // CV_VOID
VT_BOOL, // CV_BOOLEAN
VT_UI2, // CV_CHAR
VT_I1, // CV_I1
VT_UI1, // CV_U1
VT_I2, // CV_I2
VT_UI2, // CV_U2
VT_I4, // CV_I4
VT_UI4, // CV_U4
VT_I8, // CV_I8
VT_UI8, // CV_U8
VT_R4, // CV_R4
VT_R8, // CV_R8
VT_BSTR, // CV_STRING
INVALID_MAPPING, // CV_PTR
VT_DATE, // CV_DATETIME
INVALID_MAPPING, // CV_TIMESPAN
VT_UNKNOWN, // CV_OBJECT
VT_DECIMAL, // CV_DECIMAL
VT_CY, // CV_CURRENCY
INVALID_MAPPING, // CV_ENUM
INVALID_MAPPING, // CV_MISSING
VT_NULL, // CV_NULL
INVALID_MAPPING // CV_LAST
};
static const BYTE VTtoCVTable[] =
{
CV_EMPTY, // VT_EMPTY
CV_NULL, // VT_NULL
CV_I2, // VT_I2
CV_I4, // VT_I4
CV_R4, // VT_R4
CV_R8, // VT_R8
CV_CURRENCY,// VT_CY
CV_DATETIME,// VT_DATE
CV_STRING, // VT_BSTR
INVALID_MAPPING, // VT_DISPATCH
INVALID_MAPPING, // VT_ERROR
CV_BOOLEAN, // VT_BOOL
CV_OBJECT, // VT_VARIANT
CV_OBJECT, // VT_UNKNOWN
CV_DECIMAL, // VT_DECIMAL
INVALID_MAPPING, // An unused enum table entry
CV_I1, // VT_I1
CV_U1, // VT_UI1
CV_U2, // VT_UI2
CV_U4, // VT_UI4
CV_I8, // VT_I8
CV_U8, // VT_UI8
CV_I4, // VT_INT
CV_U4, // VT_UINT
CV_VOID // VT_VOID
};
// Need translations from CVType to VARENUM and vice versa. CVTypes
// is defined in COMVariant.h. VARENUM is defined in OleAut's variant.h
// Assumption here is we will only deal with VARIANTs and not other OLE
// constructs such as property sets or safe arrays.
VARENUM COMOAVariant::CVtoVT(const CVTypes cv)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
PRECONDITION(cv >= 0 && cv < CV_LAST);
}
CONTRACTL_END;
if (CVtoVTTable[cv] == INVALID_MAPPING)
COMPlusThrow(kNotSupportedException, W("NotSupported_ChangeType"));
return (VARENUM) CVtoVTTable[cv];
}
// Need translations from CVType to VARENUM and vice versa. CVTypes
// is defined in COMVariant.h. VARENUM is defined in OleAut's variant.h
CVTypes COMOAVariant::VTtoCV(const VARENUM vt)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
PRECONDITION(vt < VT_VOID);
}
CONTRACTL_END;
if (vt <0 || vt > VT_VOID || VTtoCVTable[vt]==INVALID_MAPPING)
COMPlusThrow(kNotSupportedException, W("NotSupported_ChangeType"));
return (CVTypes) VTtoCVTable[vt];
}
// Converts a COM+ Variant to an OleAut Variant. Returns true if
// there was a native object allocated by this method that must be freed,
// else false.
bool COMOAVariant::ToOAVariant(const VariantData * const var, VARIANT * oa)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
INJECT_FAULT(COMPlusThrowOM());
PRECONDITION(CheckPointer(var));
PRECONDITION(CheckPointer(oa));
}
CONTRACTL_END;
SafeVariantInit(oa);
UINT64 * dest = (UINT64*) &V_UI1(oa);
*dest = 0;
WCHAR * chars;
int strLen;
// Set the data field of the OA Variant to be either the object reference
// or the data (ie int) that it needs.
switch (var->GetType())
{
case CV_STRING:
if (var->GetObjRef() == NULL)
{
V_BSTR(oa) = NULL;
V_VT(oa) = static_cast<VARTYPE>(CVtoVT(var->GetType()));
// OA perf feature: VarClear calls SysFreeString(null), which access violates.
return false;
}
((STRINGREF) (var->GetObjRef()))->RefInterpretGetStringValuesDangerousForGC(&chars, &strLen);
V_BSTR(oa) = SysAllocStringLen(chars, strLen);
if (V_BSTR(oa) == NULL)
COMPlusThrowOM();
V_VT(oa) = static_cast<VARTYPE>(CVtoVT(var->GetType()));
return true;
case CV_CHAR:
chars = (WCHAR*) var->GetData();
V_BSTR(oa) = SysAllocStringLen(chars, 1);
if (V_BSTR(oa) == NULL)
COMPlusThrowOM();
// We should override the VTtoVT default of VT_UI2 for this case.
V_VT(oa) = VT_BSTR;
return true;
case CV_DATETIME:
V_DATE(oa) = COMDateTime::TicksToDoubleDate(var->GetDataAsInt64());
V_VT(oa) = static_cast<VARTYPE>(CVtoVT(var->GetType()));
return false;
case CV_BOOLEAN:
V_BOOL(oa) = (var->GetDataAsInt64()==0 ? VARIANT_FALSE : VARIANT_TRUE);
V_VT(oa) = static_cast<VARTYPE>(CVtoVT(var->GetType()));
return false;
case CV_DECIMAL:
{
OBJECTREF obj = var->GetObjRef();
DECIMAL * d = (DECIMAL*) obj->GetData();
// DECIMALs and Variants are the same size. Variants are a union between
// all the normal Variant fields (vt, bval, etc) and a Decimal. Decimals
// also have the first 2 bytes reserved, for a VT field.
V_DECIMAL(oa) = *d;
V_VT(oa) = VT_DECIMAL;
return false;
}
case CV_OBJECT:
{
OBJECTREF obj = var->GetObjRef();
GCPROTECT_BEGIN(obj)
{
IUnknown *pUnk = NULL;
// Convert the object to an IDispatch/IUnknown pointer.
ComIpType FetchedIpType = ComIpType_None;
pUnk = GetComIPFromObjectRef(&obj, ComIpType_Both, &FetchedIpType);
V_UNKNOWN(oa) = pUnk;
V_VT(oa) = static_cast<VARTYPE>(FetchedIpType == ComIpType_Dispatch ? VT_DISPATCH : VT_UNKNOWN);
}
GCPROTECT_END();
return true;
}
default:
*dest = var->GetDataAsInt64();
V_VT(oa) = static_cast<VARTYPE>(CVtoVT(var->GetType()));
return false;
}
}
// Converts an OleAut Variant into a COM+ Variant.
// NOte that we pass the VariantData Byref so that if GC happens, 'var' gets updated
void COMOAVariant::FromOAVariant(const VARIANT * const oa, VariantData * const& var)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
INJECT_FAULT(COMPlusThrowOM());
PRECONDITION(CheckPointer(oa));
}
CONTRACTL_END;
// Clear the return variant value. It's allocated on
// the stack and we only want valid state data in there.
memset(var, 0, sizeof(VariantData));
CVTypes type = VTtoCV((VARENUM) V_VT(oa));
var->SetType(type);
switch (type)
{
case CV_STRING:
{
// BSTRs have an int with the string buffer length (not the string length)
// followed by the data. The pointer to the BSTR points to the start of the
// characters, NOT the start of the BSTR.
WCHAR * chars = V_BSTR(oa);
int strLen = SysStringLen(V_BSTR(oa));
STRINGREF str = StringObject::NewString(chars, strLen);
var->SetObjRef((OBJECTREF)str);
break;
}
case CV_DATETIME:
var->SetDataAsInt64(COMDateTime::DoubleDateToTicks(V_DATE(oa)));
break;
case CV_BOOLEAN:
var->SetDataAsInt64(V_BOOL(oa)==VARIANT_FALSE ? 0 : 1);
break;
case CV_DECIMAL:
{
MethodTable * pDecimalMT = GetTypeHandleForCVType(CV_DECIMAL).GetMethodTable();
_ASSERTE(pDecimalMT);
OBJECTREF pDecimalRef = AllocateObject(pDecimalMT);
*(DECIMAL *) pDecimalRef->GetData() = V_DECIMAL(oa);
var->SetObjRef(pDecimalRef);
break;
}
// All types less than 4 bytes need an explicit cast from their original
// type to be sign extended to 8 bytes. This makes Variant's ToInt32
// function simpler for these types.
case CV_I1:
var->SetDataAsInt64(V_I1(oa));
break;
case CV_U1:
var->SetDataAsInt64(V_UI1(oa));
break;
case CV_I2:
var->SetDataAsInt64(V_I2(oa));
break;
case CV_U2:
var->SetDataAsInt64(V_UI2(oa));
break;
case CV_EMPTY:
case CV_NULL:
// Must set up the Variant's m_or to the appropriate classes.
// Note that OleAut doesn't have any VT_MISSING.
VariantData::NewVariant(var, type, NULL
DEBUG_ARG(TRUE));
break;
case CV_OBJECT:
{
// Convert the IUnknown pointer to an OBJECTREF.
OBJECTREF oref = NULL;
GCPROTECT_BEGIN(oref);
GetObjectRefFromComIP(&oref, V_UNKNOWN(oa));
var->SetObjRef(oref);
GCPROTECT_END();
break;
}
default:
// Copy all the bits there, and make sure we don't do any float to int conversions.
void * src = (void*) &(V_UI1(oa));
var->SetData(src);
}
}
void COMOAVariant::OAFailed(const HRESULT hr)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
PRECONDITION(FAILED(hr));
}
CONTRACTL_END;
switch (hr)
{
case E_OUTOFMEMORY:
COMPlusThrowOM();
case DISP_E_BADVARTYPE:
COMPlusThrow(kNotSupportedException, W("NotSupported_OleAutBadVarType"));
case DISP_E_DIVBYZERO:
COMPlusThrow(kDivideByZeroException);
case DISP_E_OVERFLOW:
COMPlusThrow(kOverflowException);
case DISP_E_TYPEMISMATCH:
COMPlusThrow(kInvalidCastException, W("InvalidCast_OATypeMismatch"));
case E_INVALIDARG:
COMPlusThrow(kArgumentException);
break;
default:
_ASSERTE(!"Unrecognized HResult - OAVariantLib routine failed in an unexpected way!");
COMPlusThrowHR(hr);
}
}
FCIMPL6(void, COMOAVariant::ChangeTypeEx, VariantData *result, VariantData *op, LCID lcid, void *targetType, int cvType, INT16 flags)
{
CONTRACTL
{
FCALL_CHECK;
PRECONDITION(CheckPointer(result));
}
CONTRACTL_END;
HELPER_METHOD_FRAME_BEGIN_0();
GCPROTECT_BEGININTERIOR (result);
BOOL fConverted = FALSE;
TypeHandle thTarget = TypeHandle::FromPtr(targetType);
if (cvType == CV_OBJECT && IsTypeRefOrDef(g_ColorClassName, thTarget.GetModule(), thTarget.GetCl()))
{
if (op->GetType() == CV_I4 || op->GetType() == CV_U4)
{
// Int32/UInt32 can be converted to System.Drawing.Color
SYSTEMCOLOR SystemColor;
ConvertOleColorToSystemColor(op->GetDataAsUInt32(), &SystemColor);
result->SetObjRef(thTarget.AsMethodTable()->Box(&SystemColor));
result->SetType(CV_OBJECT);
fConverted = TRUE;
}
}
if (!fConverted)
{
VariantHolder ret;
VariantHolder vOp;
VARENUM vt = CVtoVT((CVTypes) cvType);
ToOAVariant(op, &vOp);
HRESULT hr = SafeVariantChangeTypeEx(&ret, &vOp, lcid, flags, static_cast<VARTYPE>(vt));
if (FAILED(hr))
OAFailed(hr);
if ((CVTypes) cvType == CV_CHAR)
{
result->SetType(CV_CHAR);
result->SetDataAsUInt16(V_UI2(&ret));
}
else
{
FromOAVariant(&ret, result);
}
}
GCPROTECT_END ();
HELPER_METHOD_FRAME_END();
}
FCIMPLEND
#endif // FEATURE_COMINTEROP
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// File: OAVariant.cpp
//
//
// Purpose: Wrapper for Ole Automation compatable math ops.
// Calls through to OleAut.dll
//
//
#include <common.h>
#ifdef FEATURE_COMINTEROP
#include <oleauto.h>
#include "excep.h"
#include "oavariant.h"
#include "comdatetime.h" // DateTime <-> OleAut date conversions
#include "interoputil.h"
#include "interopconverter.h"
#include "excep.h"
#include "string.h"
#include "comutilnative.h" // for COMDate
#define INVALID_MAPPING (BYTE)(-1)
static const BYTE CVtoVTTable [] =
{
VT_EMPTY, // CV_EMPTY
VT_VOID, // CV_VOID
VT_BOOL, // CV_BOOLEAN
VT_UI2, // CV_CHAR
VT_I1, // CV_I1
VT_UI1, // CV_U1
VT_I2, // CV_I2
VT_UI2, // CV_U2
VT_I4, // CV_I4
VT_UI4, // CV_U4
VT_I8, // CV_I8
VT_UI8, // CV_U8
VT_R4, // CV_R4
VT_R8, // CV_R8
VT_BSTR, // CV_STRING
INVALID_MAPPING, // CV_PTR
VT_DATE, // CV_DATETIME
INVALID_MAPPING, // CV_TIMESPAN
VT_UNKNOWN, // CV_OBJECT
VT_DECIMAL, // CV_DECIMAL
VT_CY, // CV_CURRENCY
INVALID_MAPPING, // CV_ENUM
INVALID_MAPPING, // CV_MISSING
VT_NULL, // CV_NULL
INVALID_MAPPING // CV_LAST
};
static const BYTE VTtoCVTable[] =
{
CV_EMPTY, // VT_EMPTY
CV_NULL, // VT_NULL
CV_I2, // VT_I2
CV_I4, // VT_I4
CV_R4, // VT_R4
CV_R8, // VT_R8
CV_CURRENCY,// VT_CY
CV_DATETIME,// VT_DATE
CV_STRING, // VT_BSTR
INVALID_MAPPING, // VT_DISPATCH
INVALID_MAPPING, // VT_ERROR
CV_BOOLEAN, // VT_BOOL
CV_OBJECT, // VT_VARIANT
CV_OBJECT, // VT_UNKNOWN
CV_DECIMAL, // VT_DECIMAL
INVALID_MAPPING, // An unused enum table entry
CV_I1, // VT_I1
CV_U1, // VT_UI1
CV_U2, // VT_UI2
CV_U4, // VT_UI4
CV_I8, // VT_I8
CV_U8, // VT_UI8
CV_I4, // VT_INT
CV_U4, // VT_UINT
CV_VOID // VT_VOID
};
// Need translations from CVType to VARENUM and vice versa. CVTypes
// is defined in COMVariant.h. VARENUM is defined in OleAut's variant.h
// Assumption here is we will only deal with VARIANTs and not other OLE
// constructs such as property sets or safe arrays.
VARENUM COMOAVariant::CVtoVT(const CVTypes cv)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
PRECONDITION(cv >= 0 && cv < CV_LAST);
}
CONTRACTL_END;
if (CVtoVTTable[cv] == INVALID_MAPPING)
COMPlusThrow(kNotSupportedException, W("NotSupported_ChangeType"));
return (VARENUM) CVtoVTTable[cv];
}
// Need translations from CVType to VARENUM and vice versa. CVTypes
// is defined in COMVariant.h. VARENUM is defined in OleAut's variant.h
CVTypes COMOAVariant::VTtoCV(const VARENUM vt)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
PRECONDITION(vt < VT_VOID);
}
CONTRACTL_END;
if (vt <0 || vt > VT_VOID || VTtoCVTable[vt]==INVALID_MAPPING)
COMPlusThrow(kNotSupportedException, W("NotSupported_ChangeType"));
return (CVTypes) VTtoCVTable[vt];
}
// Converts a COM+ Variant to an OleAut Variant. Returns true if
// there was a native object allocated by this method that must be freed,
// else false.
bool COMOAVariant::ToOAVariant(const VariantData * const var, VARIANT * oa)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
INJECT_FAULT(COMPlusThrowOM());
PRECONDITION(CheckPointer(var));
PRECONDITION(CheckPointer(oa));
}
CONTRACTL_END;
SafeVariantInit(oa);
UINT64 * dest = (UINT64*) &V_UI1(oa);
*dest = 0;
WCHAR * chars;
int strLen;
// Set the data field of the OA Variant to be either the object reference
// or the data (ie int) that it needs.
switch (var->GetType())
{
case CV_STRING:
if (var->GetObjRef() == NULL)
{
V_BSTR(oa) = NULL;
V_VT(oa) = static_cast<VARTYPE>(CVtoVT(var->GetType()));
// OA perf feature: VarClear calls SysFreeString(null), which access violates.
return false;
}
((STRINGREF) (var->GetObjRef()))->RefInterpretGetStringValuesDangerousForGC(&chars, &strLen);
V_BSTR(oa) = SysAllocStringLen(chars, strLen);
if (V_BSTR(oa) == NULL)
COMPlusThrowOM();
V_VT(oa) = static_cast<VARTYPE>(CVtoVT(var->GetType()));
return true;
case CV_CHAR:
chars = (WCHAR*) var->GetData();
V_BSTR(oa) = SysAllocStringLen(chars, 1);
if (V_BSTR(oa) == NULL)
COMPlusThrowOM();
// We should override the VTtoVT default of VT_UI2 for this case.
V_VT(oa) = VT_BSTR;
return true;
case CV_DATETIME:
V_DATE(oa) = COMDateTime::TicksToDoubleDate(var->GetDataAsInt64());
V_VT(oa) = static_cast<VARTYPE>(CVtoVT(var->GetType()));
return false;
case CV_BOOLEAN:
V_BOOL(oa) = (var->GetDataAsInt64()==0 ? VARIANT_FALSE : VARIANT_TRUE);
V_VT(oa) = static_cast<VARTYPE>(CVtoVT(var->GetType()));
return false;
case CV_DECIMAL:
{
OBJECTREF obj = var->GetObjRef();
DECIMAL * d = (DECIMAL*) obj->GetData();
// DECIMALs and Variants are the same size. Variants are a union between
// all the normal Variant fields (vt, bval, etc) and a Decimal. Decimals
// also have the first 2 bytes reserved, for a VT field.
V_DECIMAL(oa) = *d;
V_VT(oa) = VT_DECIMAL;
return false;
}
case CV_OBJECT:
{
OBJECTREF obj = var->GetObjRef();
GCPROTECT_BEGIN(obj)
{
IUnknown *pUnk = NULL;
// Convert the object to an IDispatch/IUnknown pointer.
ComIpType FetchedIpType = ComIpType_None;
pUnk = GetComIPFromObjectRef(&obj, ComIpType_Both, &FetchedIpType);
V_UNKNOWN(oa) = pUnk;
V_VT(oa) = static_cast<VARTYPE>(FetchedIpType == ComIpType_Dispatch ? VT_DISPATCH : VT_UNKNOWN);
}
GCPROTECT_END();
return true;
}
default:
*dest = var->GetDataAsInt64();
V_VT(oa) = static_cast<VARTYPE>(CVtoVT(var->GetType()));
return false;
}
}
// Converts an OleAut Variant into a COM+ Variant.
// NOte that we pass the VariantData Byref so that if GC happens, 'var' gets updated
void COMOAVariant::FromOAVariant(const VARIANT * const oa, VariantData * const& var)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
INJECT_FAULT(COMPlusThrowOM());
PRECONDITION(CheckPointer(oa));
}
CONTRACTL_END;
// Clear the return variant value. It's allocated on
// the stack and we only want valid state data in there.
memset(var, 0, sizeof(VariantData));
CVTypes type = VTtoCV((VARENUM) V_VT(oa));
var->SetType(type);
switch (type)
{
case CV_STRING:
{
// BSTRs have an int with the string buffer length (not the string length)
// followed by the data. The pointer to the BSTR points to the start of the
// characters, NOT the start of the BSTR.
WCHAR * chars = V_BSTR(oa);
int strLen = SysStringLen(V_BSTR(oa));
STRINGREF str = StringObject::NewString(chars, strLen);
var->SetObjRef((OBJECTREF)str);
break;
}
case CV_DATETIME:
var->SetDataAsInt64(COMDateTime::DoubleDateToTicks(V_DATE(oa)));
break;
case CV_BOOLEAN:
var->SetDataAsInt64(V_BOOL(oa)==VARIANT_FALSE ? 0 : 1);
break;
case CV_DECIMAL:
{
MethodTable * pDecimalMT = GetTypeHandleForCVType(CV_DECIMAL).GetMethodTable();
_ASSERTE(pDecimalMT);
OBJECTREF pDecimalRef = AllocateObject(pDecimalMT);
*(DECIMAL *) pDecimalRef->GetData() = V_DECIMAL(oa);
var->SetObjRef(pDecimalRef);
break;
}
// All types less than 4 bytes need an explicit cast from their original
// type to be sign extended to 8 bytes. This makes Variant's ToInt32
// function simpler for these types.
case CV_I1:
var->SetDataAsInt64(V_I1(oa));
break;
case CV_U1:
var->SetDataAsInt64(V_UI1(oa));
break;
case CV_I2:
var->SetDataAsInt64(V_I2(oa));
break;
case CV_U2:
var->SetDataAsInt64(V_UI2(oa));
break;
case CV_EMPTY:
case CV_NULL:
// Must set up the Variant's m_or to the appropriate classes.
// Note that OleAut doesn't have any VT_MISSING.
VariantData::NewVariant(var, type, NULL
DEBUG_ARG(TRUE));
break;
case CV_OBJECT:
{
// Convert the IUnknown pointer to an OBJECTREF.
OBJECTREF oref = NULL;
GCPROTECT_BEGIN(oref);
GetObjectRefFromComIP(&oref, V_UNKNOWN(oa));
var->SetObjRef(oref);
GCPROTECT_END();
break;
}
default:
// Copy all the bits there, and make sure we don't do any float to int conversions.
void * src = (void*) &(V_UI1(oa));
var->SetData(src);
}
}
void COMOAVariant::OAFailed(const HRESULT hr)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
PRECONDITION(FAILED(hr));
}
CONTRACTL_END;
switch (hr)
{
case E_OUTOFMEMORY:
COMPlusThrowOM();
case DISP_E_BADVARTYPE:
COMPlusThrow(kNotSupportedException, W("NotSupported_OleAutBadVarType"));
case DISP_E_DIVBYZERO:
COMPlusThrow(kDivideByZeroException);
case DISP_E_OVERFLOW:
COMPlusThrow(kOverflowException);
case DISP_E_TYPEMISMATCH:
COMPlusThrow(kInvalidCastException, W("InvalidCast_OATypeMismatch"));
case E_INVALIDARG:
COMPlusThrow(kArgumentException);
break;
default:
_ASSERTE(!"Unrecognized HResult - OAVariantLib routine failed in an unexpected way!");
COMPlusThrowHR(hr);
}
}
FCIMPL6(void, COMOAVariant::ChangeTypeEx, VariantData *result, VariantData *op, LCID lcid, void *targetType, int cvType, INT16 flags)
{
CONTRACTL
{
FCALL_CHECK;
PRECONDITION(CheckPointer(result));
}
CONTRACTL_END;
HELPER_METHOD_FRAME_BEGIN_0();
GCPROTECT_BEGININTERIOR (result);
BOOL fConverted = FALSE;
TypeHandle thTarget = TypeHandle::FromPtr(targetType);
if (cvType == CV_OBJECT && IsTypeRefOrDef(g_ColorClassName, thTarget.GetModule(), thTarget.GetCl()))
{
if (op->GetType() == CV_I4 || op->GetType() == CV_U4)
{
// Int32/UInt32 can be converted to System.Drawing.Color
SYSTEMCOLOR SystemColor;
ConvertOleColorToSystemColor(op->GetDataAsUInt32(), &SystemColor);
result->SetObjRef(thTarget.AsMethodTable()->Box(&SystemColor));
result->SetType(CV_OBJECT);
fConverted = TRUE;
}
}
if (!fConverted)
{
VariantHolder ret;
VariantHolder vOp;
VARENUM vt = CVtoVT((CVTypes) cvType);
ToOAVariant(op, &vOp);
HRESULT hr = SafeVariantChangeTypeEx(&ret, &vOp, lcid, flags, static_cast<VARTYPE>(vt));
if (FAILED(hr))
OAFailed(hr);
if ((CVTypes) cvType == CV_CHAR)
{
result->SetType(CV_CHAR);
result->SetDataAsUInt16(V_UI2(&ret));
}
else
{
FromOAVariant(&ret, result);
}
}
GCPROTECT_END ();
HELPER_METHOD_FRAME_END();
}
FCIMPLEND
#endif // FEATURE_COMINTEROP
| -1 |
dotnet/runtime | 65,922 | Rely on PGO for isinst/castclass | Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | EgorBo | "2022-02-26T22:13:46Z" | "2022-04-01T21:25:56Z" | 063d08018602291f8dbd9d9b6055b8e1af874cb8 | f249a3d58b0b2bb55adca1a56c5a39cf446437b7 | Rely on PGO for isinst/castclass. Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | ./src/libraries/System.Security.Cryptography/src/System/Security/Cryptography/X509Certificates/X509Chain.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Diagnostics;
using System.Runtime.Versioning;
using SafeX509ChainHandle = Microsoft.Win32.SafeHandles.SafeX509ChainHandle;
namespace System.Security.Cryptography.X509Certificates
{
public class X509Chain : IDisposable
{
private X509ChainPolicy? _chainPolicy;
private volatile X509ChainStatus[]? _lazyChainStatus;
private X509ChainElementCollection? _chainElements;
private IChainPal? _pal;
private bool _useMachineContext;
private readonly object _syncRoot = new object();
public X509Chain() { }
public X509Chain(bool useMachineContext)
{
_useMachineContext = useMachineContext;
}
[SupportedOSPlatform("windows")]
public X509Chain(IntPtr chainContext)
{
_pal = ChainPal.FromHandle(chainContext);
Debug.Assert(_pal != null);
_chainElements = new X509ChainElementCollection(_pal.ChainElements!);
}
public static X509Chain Create()
{
return new X509Chain();
}
public X509ChainElementCollection ChainElements
{
get
{
if (_chainElements == null)
_chainElements = new X509ChainElementCollection();
return _chainElements;
}
}
public X509ChainPolicy ChainPolicy
{
get
{
if (_chainPolicy == null)
_chainPolicy = new X509ChainPolicy();
return _chainPolicy;
}
set
{
ArgumentNullException.ThrowIfNull(value);
_chainPolicy = value;
}
}
public X509ChainStatus[] ChainStatus
{
get
{
// We give the user a reference to the array since we'll never access it.
X509ChainStatus[]? chainStatus = _lazyChainStatus;
if (chainStatus == null)
chainStatus = _lazyChainStatus = (_pal == null ? Array.Empty<X509ChainStatus>() : _pal.ChainStatus!);
return chainStatus;
}
}
public IntPtr ChainContext
{
get
{
SafeX509ChainHandle? handle = SafeHandle;
if (handle == null)
{
// This case will only exist for Unix
return IntPtr.Zero;
}
// For .NET Framework compat, we may return an invalid handle here (IntPtr.Zero)
return handle.DangerousGetHandle();
}
}
public SafeX509ChainHandle? SafeHandle
{
get
{
if (_pal == null)
return SafeX509ChainHandle.InvalidHandle;
return _pal.SafeHandle;
}
}
[UnsupportedOSPlatform("browser")]
public bool Build(X509Certificate2 certificate)
{
return Build(certificate, true);
}
internal bool Build(X509Certificate2 certificate, bool throwOnException)
{
lock (_syncRoot)
{
if (certificate == null || certificate.Pal == null)
throw new ArgumentException(SR.Cryptography_InvalidContextHandle, nameof(certificate));
if (_chainPolicy != null && _chainPolicy.CustomTrustStore != null)
{
if (_chainPolicy.TrustMode == X509ChainTrustMode.System && _chainPolicy.CustomTrustStore.Count > 0)
throw new CryptographicException(SR.Cryptography_CustomTrustCertsInSystemMode);
foreach (X509Certificate2 customCertificate in _chainPolicy.CustomTrustStore)
{
if (customCertificate == null || customCertificate.Handle == IntPtr.Zero)
{
throw new CryptographicException(SR.Cryptography_InvalidTrustCertificate);
}
}
}
Reset();
X509ChainPolicy chainPolicy = ChainPolicy;
_pal = ChainPal.BuildChain(
_useMachineContext,
certificate.Pal,
chainPolicy._extraStore,
chainPolicy._applicationPolicy!,
chainPolicy._certificatePolicy!,
chainPolicy.RevocationMode,
chainPolicy.RevocationFlag,
chainPolicy._customTrustStore,
chainPolicy.TrustMode,
chainPolicy.VerificationTime,
chainPolicy.UrlRetrievalTimeout,
chainPolicy.DisableCertificateDownloads);
if (_pal == null)
return false;
_chainElements = new X509ChainElementCollection(_pal.ChainElements!);
Exception? verificationException;
bool? verified = _pal.Verify(chainPolicy.VerificationFlags, out verificationException);
if (!verified.HasValue)
{
if (throwOnException)
{
throw verificationException!;
}
else
{
verified = false;
}
}
return verified.Value;
}
}
public void Dispose()
{
Dispose(true);
GC.SuppressFinalize(this);
}
protected virtual void Dispose(bool disposing)
{
if (disposing)
{
Reset();
}
}
public void Reset()
{
// _chainPolicy is not reset for .NET Framework compat
_lazyChainStatus = null;
_chainElements = null;
_useMachineContext = false;
IChainPal? pal = _pal;
_pal = null;
if (pal != null)
pal.Dispose();
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Diagnostics;
using System.Runtime.Versioning;
using SafeX509ChainHandle = Microsoft.Win32.SafeHandles.SafeX509ChainHandle;
namespace System.Security.Cryptography.X509Certificates
{
public class X509Chain : IDisposable
{
private X509ChainPolicy? _chainPolicy;
private volatile X509ChainStatus[]? _lazyChainStatus;
private X509ChainElementCollection? _chainElements;
private IChainPal? _pal;
private bool _useMachineContext;
private readonly object _syncRoot = new object();
public X509Chain() { }
public X509Chain(bool useMachineContext)
{
_useMachineContext = useMachineContext;
}
[SupportedOSPlatform("windows")]
public X509Chain(IntPtr chainContext)
{
_pal = ChainPal.FromHandle(chainContext);
Debug.Assert(_pal != null);
_chainElements = new X509ChainElementCollection(_pal.ChainElements!);
}
public static X509Chain Create()
{
return new X509Chain();
}
public X509ChainElementCollection ChainElements
{
get
{
if (_chainElements == null)
_chainElements = new X509ChainElementCollection();
return _chainElements;
}
}
public X509ChainPolicy ChainPolicy
{
get
{
if (_chainPolicy == null)
_chainPolicy = new X509ChainPolicy();
return _chainPolicy;
}
set
{
ArgumentNullException.ThrowIfNull(value);
_chainPolicy = value;
}
}
public X509ChainStatus[] ChainStatus
{
get
{
// We give the user a reference to the array since we'll never access it.
X509ChainStatus[]? chainStatus = _lazyChainStatus;
if (chainStatus == null)
chainStatus = _lazyChainStatus = (_pal == null ? Array.Empty<X509ChainStatus>() : _pal.ChainStatus!);
return chainStatus;
}
}
public IntPtr ChainContext
{
get
{
SafeX509ChainHandle? handle = SafeHandle;
if (handle == null)
{
// This case will only exist for Unix
return IntPtr.Zero;
}
// For .NET Framework compat, we may return an invalid handle here (IntPtr.Zero)
return handle.DangerousGetHandle();
}
}
public SafeX509ChainHandle? SafeHandle
{
get
{
if (_pal == null)
return SafeX509ChainHandle.InvalidHandle;
return _pal.SafeHandle;
}
}
[UnsupportedOSPlatform("browser")]
public bool Build(X509Certificate2 certificate)
{
return Build(certificate, true);
}
internal bool Build(X509Certificate2 certificate, bool throwOnException)
{
lock (_syncRoot)
{
if (certificate == null || certificate.Pal == null)
throw new ArgumentException(SR.Cryptography_InvalidContextHandle, nameof(certificate));
if (_chainPolicy != null && _chainPolicy.CustomTrustStore != null)
{
if (_chainPolicy.TrustMode == X509ChainTrustMode.System && _chainPolicy.CustomTrustStore.Count > 0)
throw new CryptographicException(SR.Cryptography_CustomTrustCertsInSystemMode);
foreach (X509Certificate2 customCertificate in _chainPolicy.CustomTrustStore)
{
if (customCertificate == null || customCertificate.Handle == IntPtr.Zero)
{
throw new CryptographicException(SR.Cryptography_InvalidTrustCertificate);
}
}
}
Reset();
X509ChainPolicy chainPolicy = ChainPolicy;
_pal = ChainPal.BuildChain(
_useMachineContext,
certificate.Pal,
chainPolicy._extraStore,
chainPolicy._applicationPolicy!,
chainPolicy._certificatePolicy!,
chainPolicy.RevocationMode,
chainPolicy.RevocationFlag,
chainPolicy._customTrustStore,
chainPolicy.TrustMode,
chainPolicy.VerificationTime,
chainPolicy.UrlRetrievalTimeout,
chainPolicy.DisableCertificateDownloads);
if (_pal == null)
return false;
_chainElements = new X509ChainElementCollection(_pal.ChainElements!);
Exception? verificationException;
bool? verified = _pal.Verify(chainPolicy.VerificationFlags, out verificationException);
if (!verified.HasValue)
{
if (throwOnException)
{
throw verificationException!;
}
else
{
verified = false;
}
}
return verified.Value;
}
}
public void Dispose()
{
Dispose(true);
GC.SuppressFinalize(this);
}
protected virtual void Dispose(bool disposing)
{
if (disposing)
{
Reset();
}
}
public void Reset()
{
// _chainPolicy is not reset for .NET Framework compat
_lazyChainStatus = null;
_chainElements = null;
_useMachineContext = false;
IChainPal? pal = _pal;
_pal = null;
if (pal != null)
pal.Dispose();
}
}
}
| -1 |
dotnet/runtime | 65,922 | Rely on PGO for isinst/castclass | Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | EgorBo | "2022-02-26T22:13:46Z" | "2022-04-01T21:25:56Z" | 063d08018602291f8dbd9d9b6055b8e1af874cb8 | f249a3d58b0b2bb55adca1a56c5a39cf446437b7 | Rely on PGO for isinst/castclass. Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | ./src/libraries/Common/tests/System/Xml/BaseLibManaged/Globalization.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
namespace WebData.BaseLib
{
public class WebDataBaseLibException : System.Exception
{
public WebDataBaseLibException(string sExceptionMsg) : base(sExceptionMsg)
{
}
}
public class StringGen
{
public int seed;
private string _fileLocation = string.Empty;
protected static int[] cWhitespaceMap = {
0x20, 0x9, 0xD, 0xA
};
protected static int[] cBaseCharMap = {
//Base Chars::
0x0041, 0x005A, 0x0061, 0x007A, 0x00C0, 0x00D6, 0x00D8, 0x00F6, 0x00F8, 0x00FF,
0x0100, 0x0131, 0x0134, 0x013E, 0x0141, 0x0148, 0x014A, 0x017E, 0x0180, 0x01C3,
0x01CD, 0x01F0, 0x01F4, 0x01F5, 0x01FA, 0x0217, 0x0250, 0x02A8, 0x02BB, 0x02C1,
0x0386, 0x0386, 0x0388, 0x038A, 0x038C, 0x038C, 0x038E, 0x03A1, 0x03A3, 0x03CE, 0x03D0, 0x03D6,
0x03DA, 0x03DA, 0x03DC, 0x03DC, 0x03DE, 0x03DE, 0x03E0, 0x03E0, 0x03E2, 0x03F3, 0x0401, 0x040C, 0x040E, 0x044F,
0x0451, 0x045C, 0x045E, 0x0481, 0x0490, 0x04C4, 0x04C7, 0x04C8, 0x04CB, 0x04CC,
0x04D0, 0x04EB, 0x04EE, 0x04F5, 0x04F8, 0x04F9, 0x0531, 0x0556, 0x0559, 0x0559,
0x0561, 0x0586, 0x05D0, 0x05EA, 0x05F0, 0x05F2, 0x0621, 0x063A, 0x0641, 0x064A,
0x0671, 0x06B7, 0x06BA, 0x06BE, 0x06C0, 0x06CE, 0x06D0, 0x06D3, 0x06D5, 0x06D5,
0x06E5, 0x06E6, 0x0905, 0x0939, 0x093D, 0x093D, 0x0958, 0x0961, 0x0985, 0x098C,
0x098F, 0x0990, 0x0993, 0x09A8, 0x09AA, 0x09B0, 0x09B2, 0x09B2, 0x09B6, 0x09B9,
0x09DC, 0x09DD, 0x09DF, 0x09E1, 0x09F0, 0x09F1, 0x0A05, 0x0A0A, 0x0A0F, 0x0A10,
0x0A13, 0x0A28, 0x0A2A, 0x0A30, 0x0A32, 0x0A33, 0x0A35, 0x0A36, 0x0A38, 0x0A39,
0x0A59, 0x0A5C, 0x0A5E, 0x0A5E, 0x0A72, 0x0A74, 0x0A85, 0x0A8B, 0x0A8D, 0x0A8D, 0x0A8F, 0x0A91,
0x0A93, 0x0AA8, 0x0AAA, 0x0AB0, 0x0AB2, 0x0AB3, 0x0AB5, 0x0AB9, 0x0ABD, 0x0ABD, 0x0AE0, 0x0AE0,
0x0B05, 0x0B0C, 0x0B0F, 0x0B10, 0x0B13, 0x0B28, 0x0B2A, 0x0B30, 0x0B32, 0x0B33,
0x0B36, 0x0B39, 0x0B3D, 0x0B3D, 0x0B5C, 0x0B5D, 0x0B5F, 0x0B61, 0x0B85, 0x0B8A,
0x0B8E, 0x0B90, 0x0B92, 0x0B95, 0x0B99, 0x0B9A, 0x0B9C, 0x0B9C, 0x0B9E, 0x0B9F,
0x0BA3, 0x0BA4, 0x0BA8, 0x0BAA, 0x0BAE, 0x0BB5, 0x0BB7, 0x0BB9, 0x0C05, 0x0C0C,
0x0C0E, 0x0C10, 0x0C12, 0x0C28, 0x0C2A, 0x0C33, 0x0C35, 0x0C39, 0x0C60, 0x0C61,
0x0C85, 0x0C8C, 0x0C8E, 0x0C90, 0x0C92, 0x0CA8, 0x0CAA, 0x0CB3, 0x0CB5, 0x0CB9,
0x0CDE, 0x0CDE, 0x0CE0, 0x0CE1, 0x0D05, 0x0D0C, 0x0D0E, 0x0D10, 0x0D12, 0x0D28,
0x0D2A, 0x0D39, 0x0D60, 0x0D61, 0x0E01, 0x0E2E, 0x0E30, 0x0E30, 0x0E32, 0x0E33,
0x0E40, 0x0E45, 0x0E81, 0x0E82, 0x0E84, 0x0E84, 0x0E87, 0x0E88, 0x0E8A, 0x0E8A, 0x0E8D, 0x0E8D,
0x0E94, 0x0E97, 0x0E99, 0x0E9F, 0x0EA1, 0x0EA3, 0x0EA5, 0x0EA5, 0x0EA7, 0x0EA7, 0x0EAA, 0x0EAB,
0x0EAD, 0x0EAE, 0x0EB0, 0x0EB0, 0x0EB2, 0x0EB3, 0x0EBD, 0x0EBD, 0x0EC0, 0x0EC4, 0x0F40, 0x0F47,
0x0F49, 0x0F69, 0x10A0, 0x10C5, 0x10D0, 0x10F6, 0x1100, 0x1100, 0x1102, 0x1103,
0x1105, 0x1107, 0x1109, 0x1109, 0x110B, 0x110C, 0x110E, 0x1112, 0x113C, 0x113C, 0x113E, 0x113E, 0x1140, 0x1140,
0x114C, 0x114C, 0x114E, 0x114E, 0x1150, 0x1150, 0x1154, 0x1155, 0x1159, 0x1159, 0x115F, 0x1161, 0x1163, 0x1163, 0x1165, 0x1165,
0x1167, 0x1167, 0x1169, 0x1169, 0x116D, 0x116E, 0x1172, 0x1173, 0x1175, 0x1175, 0x119E, 0x119E, 0x11A8, 0x11A8, 0x11AB, 0x11AB,
0x11AE, 0x11AF, 0x11B7, 0x11B8, 0x11BA, 0x11BA, 0x11BC, 0x11C2, 0x11EB, 0x11EB, 0x11F0, 0x11F0, 0x11F9, 0x11F9,
0x1E00, 0x1E9B, 0x1EA0, 0x1EF9, 0x1F00, 0x1F15, 0x1F18, 0x1F1D, 0x1F20, 0x1F45,
0x1F48, 0x1F4D, 0x1F50, 0x1F57, 0x1F59, 0x1F59, 0x1F5B, 0x1F5B, 0x1F5D, 0x1F5D, 0x1F5F, 0x1F7D,
0x1F80, 0x1FB4, 0x1FB6, 0x1FBC, 0x1FBE, 0x1FBE, 0x1FC2, 0x1FC4, 0x1FC6, 0x1FCC,
0x1FD0, 0x1FD3, 0x1FD6, 0x1FDB, 0x1FE0, 0x1FEC, 0x1FF2, 0x1FF4, 0x1FF6, 0x1FFC,
0x2126, 0x2126, 0x212A, 0x212B, 0x212E, 0x212E, 0x2180, 0x2182, 0x3041, 0x3094, 0x30A1, 0x30FA,
0x3105, 0x312C, 0xAC00, 0xD7A3,
//Ideographic::
0x4E00, 0x9FA5, 0x3007, 0x3007, 0x3021, 0x3029,
//Combining Chars::
0x0300, 0x0345, 0x0360, 0x0361, 0x0483, 0x0486, 0x0591, 0x05A1, 0x05A3, 0x05B9,
0x05BB, 0x05BD, 0x05BF, 0x05BF, 0x05C1, 0x05C2, 0x05C4, 0x05C4, 0x064B, 0x0652, 0x0670, 0x0670,
0x06D6, 0x06DC, 0x06DD, 0x06DF, 0x06E0, 0x06E4, 0x06E7, 0x06E8, 0x06EA, 0x06ED,
0x0901, 0x0903, 0x093C, 0x093C, 0x093E, 0x094C, 0x094D, 0x094D, 0x0951, 0x0954, 0x0962, 0x0963,
0x0981, 0x0983, 0x09BC, 0x09BC, 0x09BE, 0x09BE, 0x09BF, 0x09BF, 0x09C0, 0x09C4, 0x09C7, 0x09C8,
0x09CB, 0x09CD, 0x09D7, 0x09D7, 0x09E2, 0x09E3, 0x0A02, 0x0A02, 0x0A3C, 0x0A3C, 0x0A3E, 0x0A3E, 0x0A3F, 0x0A3F,
0x0A40, 0x0A42, 0x0A47, 0x0A48, 0x0A4B, 0x0A4D, 0x0A70, 0x0A71, 0x0A81, 0x0A83,
0x0ABC, 0x0ABC, 0x0ABE, 0x0AC5, 0x0AC7, 0x0AC9, 0x0ACB, 0x0ACD, 0x0B01, 0x0B03, 0x0B3C, 0x0B3C,
0x0B3E, 0x0B43, 0x0B47, 0x0B48, 0x0B4B, 0x0B4D, 0x0B56, 0x0B57, 0x0B82, 0x0B83,
0x0BBE, 0x0BC2, 0x0BC6, 0x0BC8, 0x0BCA, 0x0BCD, 0x0BD7, 0x0BD7, 0x0C01, 0x0C03,
0x0C3E, 0x0C44, 0x0C46, 0x0C48, 0x0C4A, 0x0C4D, 0x0C55, 0x0C56, 0x0C82, 0x0C83,
0x0CBE, 0x0CC4, 0x0CC6, 0x0CC8, 0x0CCA, 0x0CCD, 0x0CD5, 0x0CD6, 0x0D02, 0x0D03,
0x0D3E, 0x0D43, 0x0D46, 0x0D48, 0x0D4A, 0x0D4D, 0x0D57, 0x0D57, 0x0E31, 0x0E31, 0x0E34, 0x0E3A,
0x0E47, 0x0E4E, 0x0EB1, 0x0EB1, 0x0EB4, 0x0EB9, 0x0EBB, 0x0EBC, 0x0EC8, 0x0ECD,
0x0F18, 0x0F19, 0x0F35, 0x0F35, 0x0F37, 0x0F37, 0x0F39, 0x0F39, 0x0F3E, 0x0F3E, 0x0F3F, 0x0F3F, 0x0F71, 0x0F84,
0x0F86, 0x0F8B, 0x0F90, 0x0F95, 0x0F97, 0x0F97, 0x0F99, 0x0FAD, 0x0FB1, 0x0FB7, 0x0FB9, 0x0FB9,
0x20D0, 0x20DC, 0x20E1, 0x20E1, 0x302A, 0x302F, 0x3099, 0x3099, 0x309A, 0x309A,
//Digit::
0x0030, 0x0039, 0x0660, 0x0669, 0x06F0, 0x06F9, 0x0966, 0x096F, 0x09E6, 0x09EF,
0x0A66, 0x0A6F, 0x0AE6, 0x0AEF, 0x0B66, 0x0B6F, 0x0BE7, 0x0BEF, 0x0C66, 0x0C6F,
0x0CE6, 0x0CEF, 0x0D66, 0x0D6F, 0x0E50, 0x0E59, 0x0ED0, 0x0ED9, 0x0F20, 0x0F29,
//Extender::
0x00B7, 0x00B7, 0x02D0, 0x02D0, 0x02D1, 0x02D1, 0x0387, 0x0387, 0x0640, 0x0640, 0x0E46, 0x0E46, 0x0EC6, 0x0EC6, 0x3005, 0x3005, 0x3031, 0x3035,
0x309D, 0x309E, 0x30FC, 0x30FE
};
public static string GetIllegalXmlString(int iMaxChar, bool bAbsolute)
{
return WebData.BaseLib.StringGen.GetIllegalXmlStringWithSeed(iMaxChar, bAbsolute, 0);
}
public static string GetIllegalXmlStringWithSeed(int iMaxChar, bool bAbsolute, int iSeed)
{
int i = 0;
Random cRandom;
if (iSeed != 0)
{
cRandom = new Random(iSeed);
}
else
{
cRandom = new Random();
}
string sResult = string.Empty;
int iStrLen = bAbsolute ? iMaxChar : cRandom.Next(1, iMaxChar);
//get the maximum number of illegal characters in the valid range.
int iSum = 0;
for (i = 0; i < cBaseCharMap.Length; i += 2)
{
//special processing for 0
if (i == 0)
{
iSum += cBaseCharMap[i] - 1;
}
else
{
//as the data in the table is not sequential, we
//we need to take an educated guess if this is a non interval
//we will take that the rage shouldn't be more than 100 chars
//as well as the range shouldn't be negative
if ((cBaseCharMap[i] - cBaseCharMap[i - 1] - 1 > 0) &&
(cBaseCharMap[i] - cBaseCharMap[i - 1] - 1 < 100))
{
iSum += cBaseCharMap[i] - cBaseCharMap[i - 1] - 1;
}
}
}
for (int iChar = 0; iChar < iStrLen; iChar++)
{
//get a rand number out of that
int iRandNum = cRandom.Next(iSum);
//get the right char under the number we just got
i = 0;
int iVal = 0;
while (true)
{
iVal = iRandNum;
//special case for 0
if (i == 0)
{
iRandNum -= cBaseCharMap[i] - 1;
}
else
{
if (cBaseCharMap[i] - cBaseCharMap[i - 1] - 1 > 0)
{
iRandNum -= cBaseCharMap[i] - cBaseCharMap[i - 1] - 1;
}
}
if (iRandNum >= 0)
{
i += 2;
}
else
{
break;
}
}
//special case for i=0
if (i == 0)
{
sResult += (char)iVal;
}
else
{
sResult += (char)(cBaseCharMap[i - 1] + iVal);
}
} //for string
return sResult;
}
} //StringGen
}//namespace
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
namespace WebData.BaseLib
{
public class WebDataBaseLibException : System.Exception
{
public WebDataBaseLibException(string sExceptionMsg) : base(sExceptionMsg)
{
}
}
public class StringGen
{
public int seed;
private string _fileLocation = string.Empty;
protected static int[] cWhitespaceMap = {
0x20, 0x9, 0xD, 0xA
};
protected static int[] cBaseCharMap = {
//Base Chars::
0x0041, 0x005A, 0x0061, 0x007A, 0x00C0, 0x00D6, 0x00D8, 0x00F6, 0x00F8, 0x00FF,
0x0100, 0x0131, 0x0134, 0x013E, 0x0141, 0x0148, 0x014A, 0x017E, 0x0180, 0x01C3,
0x01CD, 0x01F0, 0x01F4, 0x01F5, 0x01FA, 0x0217, 0x0250, 0x02A8, 0x02BB, 0x02C1,
0x0386, 0x0386, 0x0388, 0x038A, 0x038C, 0x038C, 0x038E, 0x03A1, 0x03A3, 0x03CE, 0x03D0, 0x03D6,
0x03DA, 0x03DA, 0x03DC, 0x03DC, 0x03DE, 0x03DE, 0x03E0, 0x03E0, 0x03E2, 0x03F3, 0x0401, 0x040C, 0x040E, 0x044F,
0x0451, 0x045C, 0x045E, 0x0481, 0x0490, 0x04C4, 0x04C7, 0x04C8, 0x04CB, 0x04CC,
0x04D0, 0x04EB, 0x04EE, 0x04F5, 0x04F8, 0x04F9, 0x0531, 0x0556, 0x0559, 0x0559,
0x0561, 0x0586, 0x05D0, 0x05EA, 0x05F0, 0x05F2, 0x0621, 0x063A, 0x0641, 0x064A,
0x0671, 0x06B7, 0x06BA, 0x06BE, 0x06C0, 0x06CE, 0x06D0, 0x06D3, 0x06D5, 0x06D5,
0x06E5, 0x06E6, 0x0905, 0x0939, 0x093D, 0x093D, 0x0958, 0x0961, 0x0985, 0x098C,
0x098F, 0x0990, 0x0993, 0x09A8, 0x09AA, 0x09B0, 0x09B2, 0x09B2, 0x09B6, 0x09B9,
0x09DC, 0x09DD, 0x09DF, 0x09E1, 0x09F0, 0x09F1, 0x0A05, 0x0A0A, 0x0A0F, 0x0A10,
0x0A13, 0x0A28, 0x0A2A, 0x0A30, 0x0A32, 0x0A33, 0x0A35, 0x0A36, 0x0A38, 0x0A39,
0x0A59, 0x0A5C, 0x0A5E, 0x0A5E, 0x0A72, 0x0A74, 0x0A85, 0x0A8B, 0x0A8D, 0x0A8D, 0x0A8F, 0x0A91,
0x0A93, 0x0AA8, 0x0AAA, 0x0AB0, 0x0AB2, 0x0AB3, 0x0AB5, 0x0AB9, 0x0ABD, 0x0ABD, 0x0AE0, 0x0AE0,
0x0B05, 0x0B0C, 0x0B0F, 0x0B10, 0x0B13, 0x0B28, 0x0B2A, 0x0B30, 0x0B32, 0x0B33,
0x0B36, 0x0B39, 0x0B3D, 0x0B3D, 0x0B5C, 0x0B5D, 0x0B5F, 0x0B61, 0x0B85, 0x0B8A,
0x0B8E, 0x0B90, 0x0B92, 0x0B95, 0x0B99, 0x0B9A, 0x0B9C, 0x0B9C, 0x0B9E, 0x0B9F,
0x0BA3, 0x0BA4, 0x0BA8, 0x0BAA, 0x0BAE, 0x0BB5, 0x0BB7, 0x0BB9, 0x0C05, 0x0C0C,
0x0C0E, 0x0C10, 0x0C12, 0x0C28, 0x0C2A, 0x0C33, 0x0C35, 0x0C39, 0x0C60, 0x0C61,
0x0C85, 0x0C8C, 0x0C8E, 0x0C90, 0x0C92, 0x0CA8, 0x0CAA, 0x0CB3, 0x0CB5, 0x0CB9,
0x0CDE, 0x0CDE, 0x0CE0, 0x0CE1, 0x0D05, 0x0D0C, 0x0D0E, 0x0D10, 0x0D12, 0x0D28,
0x0D2A, 0x0D39, 0x0D60, 0x0D61, 0x0E01, 0x0E2E, 0x0E30, 0x0E30, 0x0E32, 0x0E33,
0x0E40, 0x0E45, 0x0E81, 0x0E82, 0x0E84, 0x0E84, 0x0E87, 0x0E88, 0x0E8A, 0x0E8A, 0x0E8D, 0x0E8D,
0x0E94, 0x0E97, 0x0E99, 0x0E9F, 0x0EA1, 0x0EA3, 0x0EA5, 0x0EA5, 0x0EA7, 0x0EA7, 0x0EAA, 0x0EAB,
0x0EAD, 0x0EAE, 0x0EB0, 0x0EB0, 0x0EB2, 0x0EB3, 0x0EBD, 0x0EBD, 0x0EC0, 0x0EC4, 0x0F40, 0x0F47,
0x0F49, 0x0F69, 0x10A0, 0x10C5, 0x10D0, 0x10F6, 0x1100, 0x1100, 0x1102, 0x1103,
0x1105, 0x1107, 0x1109, 0x1109, 0x110B, 0x110C, 0x110E, 0x1112, 0x113C, 0x113C, 0x113E, 0x113E, 0x1140, 0x1140,
0x114C, 0x114C, 0x114E, 0x114E, 0x1150, 0x1150, 0x1154, 0x1155, 0x1159, 0x1159, 0x115F, 0x1161, 0x1163, 0x1163, 0x1165, 0x1165,
0x1167, 0x1167, 0x1169, 0x1169, 0x116D, 0x116E, 0x1172, 0x1173, 0x1175, 0x1175, 0x119E, 0x119E, 0x11A8, 0x11A8, 0x11AB, 0x11AB,
0x11AE, 0x11AF, 0x11B7, 0x11B8, 0x11BA, 0x11BA, 0x11BC, 0x11C2, 0x11EB, 0x11EB, 0x11F0, 0x11F0, 0x11F9, 0x11F9,
0x1E00, 0x1E9B, 0x1EA0, 0x1EF9, 0x1F00, 0x1F15, 0x1F18, 0x1F1D, 0x1F20, 0x1F45,
0x1F48, 0x1F4D, 0x1F50, 0x1F57, 0x1F59, 0x1F59, 0x1F5B, 0x1F5B, 0x1F5D, 0x1F5D, 0x1F5F, 0x1F7D,
0x1F80, 0x1FB4, 0x1FB6, 0x1FBC, 0x1FBE, 0x1FBE, 0x1FC2, 0x1FC4, 0x1FC6, 0x1FCC,
0x1FD0, 0x1FD3, 0x1FD6, 0x1FDB, 0x1FE0, 0x1FEC, 0x1FF2, 0x1FF4, 0x1FF6, 0x1FFC,
0x2126, 0x2126, 0x212A, 0x212B, 0x212E, 0x212E, 0x2180, 0x2182, 0x3041, 0x3094, 0x30A1, 0x30FA,
0x3105, 0x312C, 0xAC00, 0xD7A3,
//Ideographic::
0x4E00, 0x9FA5, 0x3007, 0x3007, 0x3021, 0x3029,
//Combining Chars::
0x0300, 0x0345, 0x0360, 0x0361, 0x0483, 0x0486, 0x0591, 0x05A1, 0x05A3, 0x05B9,
0x05BB, 0x05BD, 0x05BF, 0x05BF, 0x05C1, 0x05C2, 0x05C4, 0x05C4, 0x064B, 0x0652, 0x0670, 0x0670,
0x06D6, 0x06DC, 0x06DD, 0x06DF, 0x06E0, 0x06E4, 0x06E7, 0x06E8, 0x06EA, 0x06ED,
0x0901, 0x0903, 0x093C, 0x093C, 0x093E, 0x094C, 0x094D, 0x094D, 0x0951, 0x0954, 0x0962, 0x0963,
0x0981, 0x0983, 0x09BC, 0x09BC, 0x09BE, 0x09BE, 0x09BF, 0x09BF, 0x09C0, 0x09C4, 0x09C7, 0x09C8,
0x09CB, 0x09CD, 0x09D7, 0x09D7, 0x09E2, 0x09E3, 0x0A02, 0x0A02, 0x0A3C, 0x0A3C, 0x0A3E, 0x0A3E, 0x0A3F, 0x0A3F,
0x0A40, 0x0A42, 0x0A47, 0x0A48, 0x0A4B, 0x0A4D, 0x0A70, 0x0A71, 0x0A81, 0x0A83,
0x0ABC, 0x0ABC, 0x0ABE, 0x0AC5, 0x0AC7, 0x0AC9, 0x0ACB, 0x0ACD, 0x0B01, 0x0B03, 0x0B3C, 0x0B3C,
0x0B3E, 0x0B43, 0x0B47, 0x0B48, 0x0B4B, 0x0B4D, 0x0B56, 0x0B57, 0x0B82, 0x0B83,
0x0BBE, 0x0BC2, 0x0BC6, 0x0BC8, 0x0BCA, 0x0BCD, 0x0BD7, 0x0BD7, 0x0C01, 0x0C03,
0x0C3E, 0x0C44, 0x0C46, 0x0C48, 0x0C4A, 0x0C4D, 0x0C55, 0x0C56, 0x0C82, 0x0C83,
0x0CBE, 0x0CC4, 0x0CC6, 0x0CC8, 0x0CCA, 0x0CCD, 0x0CD5, 0x0CD6, 0x0D02, 0x0D03,
0x0D3E, 0x0D43, 0x0D46, 0x0D48, 0x0D4A, 0x0D4D, 0x0D57, 0x0D57, 0x0E31, 0x0E31, 0x0E34, 0x0E3A,
0x0E47, 0x0E4E, 0x0EB1, 0x0EB1, 0x0EB4, 0x0EB9, 0x0EBB, 0x0EBC, 0x0EC8, 0x0ECD,
0x0F18, 0x0F19, 0x0F35, 0x0F35, 0x0F37, 0x0F37, 0x0F39, 0x0F39, 0x0F3E, 0x0F3E, 0x0F3F, 0x0F3F, 0x0F71, 0x0F84,
0x0F86, 0x0F8B, 0x0F90, 0x0F95, 0x0F97, 0x0F97, 0x0F99, 0x0FAD, 0x0FB1, 0x0FB7, 0x0FB9, 0x0FB9,
0x20D0, 0x20DC, 0x20E1, 0x20E1, 0x302A, 0x302F, 0x3099, 0x3099, 0x309A, 0x309A,
//Digit::
0x0030, 0x0039, 0x0660, 0x0669, 0x06F0, 0x06F9, 0x0966, 0x096F, 0x09E6, 0x09EF,
0x0A66, 0x0A6F, 0x0AE6, 0x0AEF, 0x0B66, 0x0B6F, 0x0BE7, 0x0BEF, 0x0C66, 0x0C6F,
0x0CE6, 0x0CEF, 0x0D66, 0x0D6F, 0x0E50, 0x0E59, 0x0ED0, 0x0ED9, 0x0F20, 0x0F29,
//Extender::
0x00B7, 0x00B7, 0x02D0, 0x02D0, 0x02D1, 0x02D1, 0x0387, 0x0387, 0x0640, 0x0640, 0x0E46, 0x0E46, 0x0EC6, 0x0EC6, 0x3005, 0x3005, 0x3031, 0x3035,
0x309D, 0x309E, 0x30FC, 0x30FE
};
public static string GetIllegalXmlString(int iMaxChar, bool bAbsolute)
{
return WebData.BaseLib.StringGen.GetIllegalXmlStringWithSeed(iMaxChar, bAbsolute, 0);
}
public static string GetIllegalXmlStringWithSeed(int iMaxChar, bool bAbsolute, int iSeed)
{
int i = 0;
Random cRandom;
if (iSeed != 0)
{
cRandom = new Random(iSeed);
}
else
{
cRandom = new Random();
}
string sResult = string.Empty;
int iStrLen = bAbsolute ? iMaxChar : cRandom.Next(1, iMaxChar);
//get the maximum number of illegal characters in the valid range.
int iSum = 0;
for (i = 0; i < cBaseCharMap.Length; i += 2)
{
//special processing for 0
if (i == 0)
{
iSum += cBaseCharMap[i] - 1;
}
else
{
//as the data in the table is not sequential, we
//we need to take an educated guess if this is a non interval
//we will take that the rage shouldn't be more than 100 chars
//as well as the range shouldn't be negative
if ((cBaseCharMap[i] - cBaseCharMap[i - 1] - 1 > 0) &&
(cBaseCharMap[i] - cBaseCharMap[i - 1] - 1 < 100))
{
iSum += cBaseCharMap[i] - cBaseCharMap[i - 1] - 1;
}
}
}
for (int iChar = 0; iChar < iStrLen; iChar++)
{
//get a rand number out of that
int iRandNum = cRandom.Next(iSum);
//get the right char under the number we just got
i = 0;
int iVal = 0;
while (true)
{
iVal = iRandNum;
//special case for 0
if (i == 0)
{
iRandNum -= cBaseCharMap[i] - 1;
}
else
{
if (cBaseCharMap[i] - cBaseCharMap[i - 1] - 1 > 0)
{
iRandNum -= cBaseCharMap[i] - cBaseCharMap[i - 1] - 1;
}
}
if (iRandNum >= 0)
{
i += 2;
}
else
{
break;
}
}
//special case for i=0
if (i == 0)
{
sResult += (char)iVal;
}
else
{
sResult += (char)(cBaseCharMap[i - 1] + iVal);
}
} //for string
return sResult;
}
} //StringGen
}//namespace
| -1 |
dotnet/runtime | 65,922 | Rely on PGO for isinst/castclass | Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | EgorBo | "2022-02-26T22:13:46Z" | "2022-04-01T21:25:56Z" | 063d08018602291f8dbd9d9b6055b8e1af874cb8 | f249a3d58b0b2bb55adca1a56c5a39cf446437b7 | Rely on PGO for isinst/castclass. Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | ./src/tests/JIT/HardwareIntrinsics/Arm/AdvSimd/ShiftLogicalSaturate.Vector128.SByte.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
using System.Runtime.Intrinsics.Arm;
namespace JIT.HardwareIntrinsics.Arm
{
public static partial class Program
{
private static void ShiftLogicalSaturate_Vector128_SByte()
{
var test = new SimpleBinaryOpTest__ShiftLogicalSaturate_Vector128_SByte();
if (test.IsSupported)
{
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates basic functionality works, using Load
test.RunBasicScenario_Load();
}
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates calling via reflection works, using Load
test.RunReflectionScenario_Load();
}
// Validates passing a static member works
test.RunClsVarScenario();
if (AdvSimd.IsSupported)
{
// Validates passing a static member works, using pinning and Load
test.RunClsVarScenario_Load();
}
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates passing a local works, using Load
test.RunLclVarScenario_Load();
}
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local class works, using pinning and Load
test.RunClassLclFldScenario_Load();
}
// Validates passing an instance member of a class works
test.RunClassFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a class works, using pinning and Load
test.RunClassFldScenario_Load();
}
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local struct works, using pinning and Load
test.RunStructLclFldScenario_Load();
}
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a struct works, using pinning and Load
test.RunStructFldScenario_Load();
}
}
else
{
// Validates we throw on unsupported hardware
test.RunUnsupportedScenario();
}
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class SimpleBinaryOpTest__ShiftLogicalSaturate_Vector128_SByte
{
private struct DataTable
{
private byte[] inArray1;
private byte[] inArray2;
private byte[] outArray;
private GCHandle inHandle1;
private GCHandle inHandle2;
private GCHandle outHandle;
private ulong alignment;
public DataTable(SByte[] inArray1, SByte[] inArray2, SByte[] outArray, int alignment)
{
int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<SByte>();
int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<SByte>();
int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<SByte>();
if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray)
{
throw new ArgumentException("Invalid value of alignment");
}
this.inArray1 = new byte[alignment * 2];
this.inArray2 = new byte[alignment * 2];
this.outArray = new byte[alignment * 2];
this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned);
this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<SByte, byte>(ref inArray1[0]), (uint)sizeOfinArray1);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<SByte, byte>(ref inArray2[0]), (uint)sizeOfinArray2);
}
public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment);
public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle1.Free();
inHandle2.Free();
outHandle.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
{
return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
}
}
private struct TestStruct
{
public Vector128<SByte> _fld1;
public Vector128<SByte> _fld2;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSByte(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref testStruct._fld1), ref Unsafe.As<SByte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref testStruct._fld2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>());
return testStruct;
}
public void RunStructFldScenario(SimpleBinaryOpTest__ShiftLogicalSaturate_Vector128_SByte testClass)
{
var result = AdvSimd.ShiftLogicalSaturate(_fld1, _fld2);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr);
}
public void RunStructFldScenario_Load(SimpleBinaryOpTest__ShiftLogicalSaturate_Vector128_SByte testClass)
{
fixed (Vector128<SByte>* pFld1 = &_fld1)
fixed (Vector128<SByte>* pFld2 = &_fld2)
{
var result = AdvSimd.ShiftLogicalSaturate(
AdvSimd.LoadVector128((SByte*)(pFld1)),
AdvSimd.LoadVector128((SByte*)(pFld2))
);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr);
}
}
}
private static readonly int LargestVectorSize = 16;
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<SByte>>() / sizeof(SByte);
private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<SByte>>() / sizeof(SByte);
private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<SByte>>() / sizeof(SByte);
private static SByte[] _data1 = new SByte[Op1ElementCount];
private static SByte[] _data2 = new SByte[Op2ElementCount];
private static Vector128<SByte> _clsVar1;
private static Vector128<SByte> _clsVar2;
private Vector128<SByte> _fld1;
private Vector128<SByte> _fld2;
private DataTable _dataTable;
static SimpleBinaryOpTest__ShiftLogicalSaturate_Vector128_SByte()
{
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSByte(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref _clsVar1), ref Unsafe.As<SByte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref _clsVar2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>());
}
public SimpleBinaryOpTest__ShiftLogicalSaturate_Vector128_SByte()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSByte(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref _fld1), ref Unsafe.As<SByte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref _fld2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSByte(); }
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); }
_dataTable = new DataTable(_data1, _data2, new SByte[RetElementCount], LargestVectorSize);
}
public bool IsSupported => AdvSimd.IsSupported;
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = AdvSimd.ShiftLogicalSaturate(
Unsafe.Read<Vector128<SByte>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector128<SByte>>(_dataTable.inArray2Ptr)
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunBasicScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load));
var result = AdvSimd.ShiftLogicalSaturate(
AdvSimd.LoadVector128((SByte*)(_dataTable.inArray1Ptr)),
AdvSimd.LoadVector128((SByte*)(_dataTable.inArray2Ptr))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.ShiftLogicalSaturate), new Type[] { typeof(Vector128<SByte>), typeof(Vector128<SByte>) })
.Invoke(null, new object[] {
Unsafe.Read<Vector128<SByte>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector128<SByte>>(_dataTable.inArray2Ptr)
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<SByte>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load));
var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.ShiftLogicalSaturate), new Type[] { typeof(Vector128<SByte>), typeof(Vector128<SByte>) })
.Invoke(null, new object[] {
AdvSimd.LoadVector128((SByte*)(_dataTable.inArray1Ptr)),
AdvSimd.LoadVector128((SByte*)(_dataTable.inArray2Ptr))
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<SByte>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = AdvSimd.ShiftLogicalSaturate(
_clsVar1,
_clsVar2
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr);
}
public void RunClsVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load));
fixed (Vector128<SByte>* pClsVar1 = &_clsVar1)
fixed (Vector128<SByte>* pClsVar2 = &_clsVar2)
{
var result = AdvSimd.ShiftLogicalSaturate(
AdvSimd.LoadVector128((SByte*)(pClsVar1)),
AdvSimd.LoadVector128((SByte*)(pClsVar2))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr);
}
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var op1 = Unsafe.Read<Vector128<SByte>>(_dataTable.inArray1Ptr);
var op2 = Unsafe.Read<Vector128<SByte>>(_dataTable.inArray2Ptr);
var result = AdvSimd.ShiftLogicalSaturate(op1, op2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, _dataTable.outArrayPtr);
}
public void RunLclVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load));
var op1 = AdvSimd.LoadVector128((SByte*)(_dataTable.inArray1Ptr));
var op2 = AdvSimd.LoadVector128((SByte*)(_dataTable.inArray2Ptr));
var result = AdvSimd.ShiftLogicalSaturate(op1, op2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new SimpleBinaryOpTest__ShiftLogicalSaturate_Vector128_SByte();
var result = AdvSimd.ShiftLogicalSaturate(test._fld1, test._fld2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load));
var test = new SimpleBinaryOpTest__ShiftLogicalSaturate_Vector128_SByte();
fixed (Vector128<SByte>* pFld1 = &test._fld1)
fixed (Vector128<SByte>* pFld2 = &test._fld2)
{
var result = AdvSimd.ShiftLogicalSaturate(
AdvSimd.LoadVector128((SByte*)(pFld1)),
AdvSimd.LoadVector128((SByte*)(pFld2))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = AdvSimd.ShiftLogicalSaturate(_fld1, _fld2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr);
}
public void RunClassFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load));
fixed (Vector128<SByte>* pFld1 = &_fld1)
fixed (Vector128<SByte>* pFld2 = &_fld2)
{
var result = AdvSimd.ShiftLogicalSaturate(
AdvSimd.LoadVector128((SByte*)(pFld1)),
AdvSimd.LoadVector128((SByte*)(pFld2))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr);
}
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = AdvSimd.ShiftLogicalSaturate(test._fld1, test._fld2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunStructLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load));
var test = TestStruct.Create();
var result = AdvSimd.ShiftLogicalSaturate(
AdvSimd.LoadVector128((SByte*)(&test._fld1)),
AdvSimd.LoadVector128((SByte*)(&test._fld2))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
public void RunStructFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load));
var test = TestStruct.Create();
test.RunStructFldScenario_Load(this);
}
public void RunUnsupportedScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario));
bool succeeded = false;
try
{
RunBasicScenario_UnsafeRead();
}
catch (PlatformNotSupportedException)
{
succeeded = true;
}
if (!succeeded)
{
Succeeded = false;
}
}
private void ValidateResult(Vector128<SByte> op1, Vector128<SByte> op2, void* result, [CallerMemberName] string method = "")
{
SByte[] inArray1 = new SByte[Op1ElementCount];
SByte[] inArray2 = new SByte[Op2ElementCount];
SByte[] outArray = new SByte[RetElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<SByte, byte>(ref inArray1[0]), op1);
Unsafe.WriteUnaligned(ref Unsafe.As<SByte, byte>(ref inArray2[0]), op2);
Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<SByte>>());
ValidateResult(inArray1, inArray2, outArray, method);
}
private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "")
{
SByte[] inArray1 = new SByte[Op1ElementCount];
SByte[] inArray2 = new SByte[Op2ElementCount];
SByte[] outArray = new SByte[RetElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<SByte>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector128<SByte>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<SByte>>());
ValidateResult(inArray1, inArray2, outArray, method);
}
private void ValidateResult(SByte[] left, SByte[] right, SByte[] result, [CallerMemberName] string method = "")
{
bool succeeded = true;
for (var i = 0; i < RetElementCount; i++)
{
if (Helpers.ShiftLogicalSaturate(left[i], right[i]) != result[i])
{
succeeded = false;
break;
}
}
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.ShiftLogicalSaturate)}<SByte>(Vector128<SByte>, Vector128<SByte>): {method} failed:");
TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})");
TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})");
TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
using System.Runtime.Intrinsics.Arm;
namespace JIT.HardwareIntrinsics.Arm
{
public static partial class Program
{
private static void ShiftLogicalSaturate_Vector128_SByte()
{
var test = new SimpleBinaryOpTest__ShiftLogicalSaturate_Vector128_SByte();
if (test.IsSupported)
{
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates basic functionality works, using Load
test.RunBasicScenario_Load();
}
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates calling via reflection works, using Load
test.RunReflectionScenario_Load();
}
// Validates passing a static member works
test.RunClsVarScenario();
if (AdvSimd.IsSupported)
{
// Validates passing a static member works, using pinning and Load
test.RunClsVarScenario_Load();
}
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates passing a local works, using Load
test.RunLclVarScenario_Load();
}
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local class works, using pinning and Load
test.RunClassLclFldScenario_Load();
}
// Validates passing an instance member of a class works
test.RunClassFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a class works, using pinning and Load
test.RunClassFldScenario_Load();
}
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local struct works, using pinning and Load
test.RunStructLclFldScenario_Load();
}
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a struct works, using pinning and Load
test.RunStructFldScenario_Load();
}
}
else
{
// Validates we throw on unsupported hardware
test.RunUnsupportedScenario();
}
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class SimpleBinaryOpTest__ShiftLogicalSaturate_Vector128_SByte
{
private struct DataTable
{
private byte[] inArray1;
private byte[] inArray2;
private byte[] outArray;
private GCHandle inHandle1;
private GCHandle inHandle2;
private GCHandle outHandle;
private ulong alignment;
public DataTable(SByte[] inArray1, SByte[] inArray2, SByte[] outArray, int alignment)
{
int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<SByte>();
int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<SByte>();
int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<SByte>();
if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray)
{
throw new ArgumentException("Invalid value of alignment");
}
this.inArray1 = new byte[alignment * 2];
this.inArray2 = new byte[alignment * 2];
this.outArray = new byte[alignment * 2];
this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned);
this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<SByte, byte>(ref inArray1[0]), (uint)sizeOfinArray1);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<SByte, byte>(ref inArray2[0]), (uint)sizeOfinArray2);
}
public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment);
public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle1.Free();
inHandle2.Free();
outHandle.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
{
return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
}
}
private struct TestStruct
{
public Vector128<SByte> _fld1;
public Vector128<SByte> _fld2;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSByte(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref testStruct._fld1), ref Unsafe.As<SByte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref testStruct._fld2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>());
return testStruct;
}
public void RunStructFldScenario(SimpleBinaryOpTest__ShiftLogicalSaturate_Vector128_SByte testClass)
{
var result = AdvSimd.ShiftLogicalSaturate(_fld1, _fld2);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr);
}
public void RunStructFldScenario_Load(SimpleBinaryOpTest__ShiftLogicalSaturate_Vector128_SByte testClass)
{
fixed (Vector128<SByte>* pFld1 = &_fld1)
fixed (Vector128<SByte>* pFld2 = &_fld2)
{
var result = AdvSimd.ShiftLogicalSaturate(
AdvSimd.LoadVector128((SByte*)(pFld1)),
AdvSimd.LoadVector128((SByte*)(pFld2))
);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr);
}
}
}
private static readonly int LargestVectorSize = 16;
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<SByte>>() / sizeof(SByte);
private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<SByte>>() / sizeof(SByte);
private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<SByte>>() / sizeof(SByte);
private static SByte[] _data1 = new SByte[Op1ElementCount];
private static SByte[] _data2 = new SByte[Op2ElementCount];
private static Vector128<SByte> _clsVar1;
private static Vector128<SByte> _clsVar2;
private Vector128<SByte> _fld1;
private Vector128<SByte> _fld2;
private DataTable _dataTable;
static SimpleBinaryOpTest__ShiftLogicalSaturate_Vector128_SByte()
{
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSByte(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref _clsVar1), ref Unsafe.As<SByte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref _clsVar2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>());
}
public SimpleBinaryOpTest__ShiftLogicalSaturate_Vector128_SByte()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSByte(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref _fld1), ref Unsafe.As<SByte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref _fld2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSByte(); }
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); }
_dataTable = new DataTable(_data1, _data2, new SByte[RetElementCount], LargestVectorSize);
}
public bool IsSupported => AdvSimd.IsSupported;
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = AdvSimd.ShiftLogicalSaturate(
Unsafe.Read<Vector128<SByte>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector128<SByte>>(_dataTable.inArray2Ptr)
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunBasicScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load));
var result = AdvSimd.ShiftLogicalSaturate(
AdvSimd.LoadVector128((SByte*)(_dataTable.inArray1Ptr)),
AdvSimd.LoadVector128((SByte*)(_dataTable.inArray2Ptr))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.ShiftLogicalSaturate), new Type[] { typeof(Vector128<SByte>), typeof(Vector128<SByte>) })
.Invoke(null, new object[] {
Unsafe.Read<Vector128<SByte>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector128<SByte>>(_dataTable.inArray2Ptr)
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<SByte>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load));
var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.ShiftLogicalSaturate), new Type[] { typeof(Vector128<SByte>), typeof(Vector128<SByte>) })
.Invoke(null, new object[] {
AdvSimd.LoadVector128((SByte*)(_dataTable.inArray1Ptr)),
AdvSimd.LoadVector128((SByte*)(_dataTable.inArray2Ptr))
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<SByte>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = AdvSimd.ShiftLogicalSaturate(
_clsVar1,
_clsVar2
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr);
}
public void RunClsVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load));
fixed (Vector128<SByte>* pClsVar1 = &_clsVar1)
fixed (Vector128<SByte>* pClsVar2 = &_clsVar2)
{
var result = AdvSimd.ShiftLogicalSaturate(
AdvSimd.LoadVector128((SByte*)(pClsVar1)),
AdvSimd.LoadVector128((SByte*)(pClsVar2))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr);
}
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var op1 = Unsafe.Read<Vector128<SByte>>(_dataTable.inArray1Ptr);
var op2 = Unsafe.Read<Vector128<SByte>>(_dataTable.inArray2Ptr);
var result = AdvSimd.ShiftLogicalSaturate(op1, op2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, _dataTable.outArrayPtr);
}
public void RunLclVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load));
var op1 = AdvSimd.LoadVector128((SByte*)(_dataTable.inArray1Ptr));
var op2 = AdvSimd.LoadVector128((SByte*)(_dataTable.inArray2Ptr));
var result = AdvSimd.ShiftLogicalSaturate(op1, op2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new SimpleBinaryOpTest__ShiftLogicalSaturate_Vector128_SByte();
var result = AdvSimd.ShiftLogicalSaturate(test._fld1, test._fld2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load));
var test = new SimpleBinaryOpTest__ShiftLogicalSaturate_Vector128_SByte();
fixed (Vector128<SByte>* pFld1 = &test._fld1)
fixed (Vector128<SByte>* pFld2 = &test._fld2)
{
var result = AdvSimd.ShiftLogicalSaturate(
AdvSimd.LoadVector128((SByte*)(pFld1)),
AdvSimd.LoadVector128((SByte*)(pFld2))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = AdvSimd.ShiftLogicalSaturate(_fld1, _fld2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr);
}
public void RunClassFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load));
fixed (Vector128<SByte>* pFld1 = &_fld1)
fixed (Vector128<SByte>* pFld2 = &_fld2)
{
var result = AdvSimd.ShiftLogicalSaturate(
AdvSimd.LoadVector128((SByte*)(pFld1)),
AdvSimd.LoadVector128((SByte*)(pFld2))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr);
}
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = AdvSimd.ShiftLogicalSaturate(test._fld1, test._fld2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunStructLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load));
var test = TestStruct.Create();
var result = AdvSimd.ShiftLogicalSaturate(
AdvSimd.LoadVector128((SByte*)(&test._fld1)),
AdvSimd.LoadVector128((SByte*)(&test._fld2))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
public void RunStructFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load));
var test = TestStruct.Create();
test.RunStructFldScenario_Load(this);
}
public void RunUnsupportedScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario));
bool succeeded = false;
try
{
RunBasicScenario_UnsafeRead();
}
catch (PlatformNotSupportedException)
{
succeeded = true;
}
if (!succeeded)
{
Succeeded = false;
}
}
private void ValidateResult(Vector128<SByte> op1, Vector128<SByte> op2, void* result, [CallerMemberName] string method = "")
{
SByte[] inArray1 = new SByte[Op1ElementCount];
SByte[] inArray2 = new SByte[Op2ElementCount];
SByte[] outArray = new SByte[RetElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<SByte, byte>(ref inArray1[0]), op1);
Unsafe.WriteUnaligned(ref Unsafe.As<SByte, byte>(ref inArray2[0]), op2);
Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<SByte>>());
ValidateResult(inArray1, inArray2, outArray, method);
}
private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "")
{
SByte[] inArray1 = new SByte[Op1ElementCount];
SByte[] inArray2 = new SByte[Op2ElementCount];
SByte[] outArray = new SByte[RetElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<SByte>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector128<SByte>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<SByte>>());
ValidateResult(inArray1, inArray2, outArray, method);
}
private void ValidateResult(SByte[] left, SByte[] right, SByte[] result, [CallerMemberName] string method = "")
{
bool succeeded = true;
for (var i = 0; i < RetElementCount; i++)
{
if (Helpers.ShiftLogicalSaturate(left[i], right[i]) != result[i])
{
succeeded = false;
break;
}
}
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.ShiftLogicalSaturate)}<SByte>(Vector128<SByte>, Vector128<SByte>): {method} failed:");
TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})");
TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})");
TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
| -1 |
dotnet/runtime | 65,922 | Rely on PGO for isinst/castclass | Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | EgorBo | "2022-02-26T22:13:46Z" | "2022-04-01T21:25:56Z" | 063d08018602291f8dbd9d9b6055b8e1af874cb8 | f249a3d58b0b2bb55adca1a56c5a39cf446437b7 | Rely on PGO for isinst/castclass. Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | ./src/tests/JIT/SIMD/VectorHWAccel2_r.csproj | <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
</PropertyGroup>
<PropertyGroup>
<DebugType>None</DebugType>
<Optimize />
</PropertyGroup>
<ItemGroup>
<Compile Include="VectorHWAccel2.cs" />
<Compile Include="VectorUtil.cs" />
</ItemGroup>
</Project>
| <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
</PropertyGroup>
<PropertyGroup>
<DebugType>None</DebugType>
<Optimize />
</PropertyGroup>
<ItemGroup>
<Compile Include="VectorHWAccel2.cs" />
<Compile Include="VectorUtil.cs" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime | 65,922 | Rely on PGO for isinst/castclass | Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | EgorBo | "2022-02-26T22:13:46Z" | "2022-04-01T21:25:56Z" | 063d08018602291f8dbd9d9b6055b8e1af874cb8 | f249a3d58b0b2bb55adca1a56c5a39cf446437b7 | Rely on PGO for isinst/castclass. Follow up to https://github.com/dotnet/runtime/pull/65460 (and [d-o](https://dev.azure.com/dnceng/internal/_git/dotnet-optimization/pullrequest/21334?_a=files))
Example (with `DOTNET_JitCastProfiling=1`):
```csharp
using System.Runtime.CompilerServices;
using System.Threading;
public interface IClass {}
public class ClassA : IClass {}
public class ClassB : ClassA {}
public class Program
{
[MethodImpl(MethodImplOptions.NoInlining)]
static ClassA CastToClassA(object o) => (ClassA)o; // currently we always emit a fast path for ClassA here
// but in my case o is always ClassB (PGO helps)
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsClassA(object o) => o is ClassA; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static IClass CastToIClass(object o) => (IClass)o; // we don't expand it without PGO
[MethodImpl(MethodImplOptions.NoInlining)]
static bool IsIClass(object o) => o is IClass; // we don't expand it without PGO
public static void Main()
{
// promote methods to tier1
var b = new ClassB();
for (int i = 0; i < 100; i++)
{
CastToClassA(b);
IsClassA(b);
CastToIClass(b);
IsIClass(b);
Thread.Sleep(16);
}
}
}
```
Codegen diff: https://www.diffchecker.com/RFblv9RB | ./src/tests/JIT/HardwareIntrinsics/General/Vector256_1/op_BitwiseOr.UInt32.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
namespace JIT.HardwareIntrinsics.General
{
public static partial class Program
{
private static void op_BitwiseOrUInt32()
{
var test = new VectorBinaryOpTest__op_BitwiseOrUInt32();
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
// Validates passing a static member works
test.RunClsVarScenario();
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
// Validates passing an instance member of a class works
test.RunClassFldScenario();
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class VectorBinaryOpTest__op_BitwiseOrUInt32
{
private struct DataTable
{
private byte[] inArray1;
private byte[] inArray2;
private byte[] outArray;
private GCHandle inHandle1;
private GCHandle inHandle2;
private GCHandle outHandle;
private ulong alignment;
public DataTable(UInt32[] inArray1, UInt32[] inArray2, UInt32[] outArray, int alignment)
{
int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<UInt32>();
int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<UInt32>();
int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<UInt32>();
if ((alignment != 32 && alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray)
{
throw new ArgumentException("Invalid value of alignment");
}
this.inArray1 = new byte[alignment * 2];
this.inArray2 = new byte[alignment * 2];
this.outArray = new byte[alignment * 2];
this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned);
this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<UInt32, byte>(ref inArray1[0]), (uint)sizeOfinArray1);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<UInt32, byte>(ref inArray2[0]), (uint)sizeOfinArray2);
}
public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment);
public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle1.Free();
inHandle2.Free();
outHandle.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
{
return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
}
}
private struct TestStruct
{
public Vector256<UInt32> _fld1;
public Vector256<UInt32> _fld2;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<UInt32>, byte>(ref testStruct._fld1), ref Unsafe.As<UInt32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<UInt32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<UInt32>, byte>(ref testStruct._fld2), ref Unsafe.As<UInt32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<UInt32>>());
return testStruct;
}
public void RunStructFldScenario(VectorBinaryOpTest__op_BitwiseOrUInt32 testClass)
{
var result = _fld1 | _fld2;
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr);
}
}
private static readonly int LargestVectorSize = 32;
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector256<UInt32>>() / sizeof(UInt32);
private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector256<UInt32>>() / sizeof(UInt32);
private static readonly int RetElementCount = Unsafe.SizeOf<Vector256<UInt32>>() / sizeof(UInt32);
private static UInt32[] _data1 = new UInt32[Op1ElementCount];
private static UInt32[] _data2 = new UInt32[Op2ElementCount];
private static Vector256<UInt32> _clsVar1;
private static Vector256<UInt32> _clsVar2;
private Vector256<UInt32> _fld1;
private Vector256<UInt32> _fld2;
private DataTable _dataTable;
static VectorBinaryOpTest__op_BitwiseOrUInt32()
{
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<UInt32>, byte>(ref _clsVar1), ref Unsafe.As<UInt32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<UInt32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<UInt32>, byte>(ref _clsVar2), ref Unsafe.As<UInt32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<UInt32>>());
}
public VectorBinaryOpTest__op_BitwiseOrUInt32()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<UInt32>, byte>(ref _fld1), ref Unsafe.As<UInt32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<UInt32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<UInt32>, byte>(ref _fld2), ref Unsafe.As<UInt32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<UInt32>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); }
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt32(); }
_dataTable = new DataTable(_data1, _data2, new UInt32[RetElementCount], LargestVectorSize);
}
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = Unsafe.Read<Vector256<UInt32>>(_dataTable.inArray1Ptr) | Unsafe.Read<Vector256<UInt32>>(_dataTable.inArray2Ptr);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var result = typeof(Vector256<UInt32>).GetMethod("op_BitwiseOr", new Type[] { typeof(Vector256<UInt32>), typeof(Vector256<UInt32>) })
.Invoke(null, new object[] {
Unsafe.Read<Vector256<UInt32>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector256<UInt32>>(_dataTable.inArray2Ptr)
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector256<UInt32>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = _clsVar1 | _clsVar2;
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr);
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var op1 = Unsafe.Read<Vector256<UInt32>>(_dataTable.inArray1Ptr);
var op2 = Unsafe.Read<Vector256<UInt32>>(_dataTable.inArray2Ptr);
var result = op1 | op2;
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new VectorBinaryOpTest__op_BitwiseOrUInt32();
var result = test._fld1 | test._fld2;
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = _fld1 | _fld2;
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr);
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = test._fld1 | test._fld2;
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
private void ValidateResult(Vector256<UInt32> op1, Vector256<UInt32> op2, void* result, [CallerMemberName] string method = "")
{
UInt32[] inArray1 = new UInt32[Op1ElementCount];
UInt32[] inArray2 = new UInt32[Op2ElementCount];
UInt32[] outArray = new UInt32[RetElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray1[0]), op1);
Unsafe.WriteUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray2[0]), op2);
Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector256<UInt32>>());
ValidateResult(inArray1, inArray2, outArray, method);
}
private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "")
{
UInt32[] inArray1 = new UInt32[Op1ElementCount];
UInt32[] inArray2 = new UInt32[Op2ElementCount];
UInt32[] outArray = new UInt32[RetElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector256<UInt32>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector256<UInt32>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector256<UInt32>>());
ValidateResult(inArray1, inArray2, outArray, method);
}
private void ValidateResult(UInt32[] left, UInt32[] right, UInt32[] result, [CallerMemberName] string method = "")
{
bool succeeded = true;
if (result[0] != (uint)(left[0] | right[0]))
{
succeeded = false;
}
else
{
for (var i = 1; i < RetElementCount; i++)
{
if (result[i] != (uint)(left[i] | right[i]))
{
succeeded = false;
break;
}
}
}
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(Vector256)}.op_BitwiseOr<UInt32>(Vector256<UInt32>, Vector256<UInt32>): {method} failed:");
TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})");
TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})");
TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
namespace JIT.HardwareIntrinsics.General
{
public static partial class Program
{
private static void op_BitwiseOrUInt32()
{
var test = new VectorBinaryOpTest__op_BitwiseOrUInt32();
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
// Validates passing a static member works
test.RunClsVarScenario();
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
// Validates passing an instance member of a class works
test.RunClassFldScenario();
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class VectorBinaryOpTest__op_BitwiseOrUInt32
{
private struct DataTable
{
private byte[] inArray1;
private byte[] inArray2;
private byte[] outArray;
private GCHandle inHandle1;
private GCHandle inHandle2;
private GCHandle outHandle;
private ulong alignment;
public DataTable(UInt32[] inArray1, UInt32[] inArray2, UInt32[] outArray, int alignment)
{
int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<UInt32>();
int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<UInt32>();
int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<UInt32>();
if ((alignment != 32 && alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray)
{
throw new ArgumentException("Invalid value of alignment");
}
this.inArray1 = new byte[alignment * 2];
this.inArray2 = new byte[alignment * 2];
this.outArray = new byte[alignment * 2];
this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned);
this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<UInt32, byte>(ref inArray1[0]), (uint)sizeOfinArray1);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<UInt32, byte>(ref inArray2[0]), (uint)sizeOfinArray2);
}
public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment);
public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle1.Free();
inHandle2.Free();
outHandle.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
{
return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
}
}
private struct TestStruct
{
public Vector256<UInt32> _fld1;
public Vector256<UInt32> _fld2;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<UInt32>, byte>(ref testStruct._fld1), ref Unsafe.As<UInt32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<UInt32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<UInt32>, byte>(ref testStruct._fld2), ref Unsafe.As<UInt32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<UInt32>>());
return testStruct;
}
public void RunStructFldScenario(VectorBinaryOpTest__op_BitwiseOrUInt32 testClass)
{
var result = _fld1 | _fld2;
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr);
}
}
private static readonly int LargestVectorSize = 32;
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector256<UInt32>>() / sizeof(UInt32);
private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector256<UInt32>>() / sizeof(UInt32);
private static readonly int RetElementCount = Unsafe.SizeOf<Vector256<UInt32>>() / sizeof(UInt32);
private static UInt32[] _data1 = new UInt32[Op1ElementCount];
private static UInt32[] _data2 = new UInt32[Op2ElementCount];
private static Vector256<UInt32> _clsVar1;
private static Vector256<UInt32> _clsVar2;
private Vector256<UInt32> _fld1;
private Vector256<UInt32> _fld2;
private DataTable _dataTable;
static VectorBinaryOpTest__op_BitwiseOrUInt32()
{
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<UInt32>, byte>(ref _clsVar1), ref Unsafe.As<UInt32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<UInt32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<UInt32>, byte>(ref _clsVar2), ref Unsafe.As<UInt32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<UInt32>>());
}
public VectorBinaryOpTest__op_BitwiseOrUInt32()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<UInt32>, byte>(ref _fld1), ref Unsafe.As<UInt32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<UInt32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<UInt32>, byte>(ref _fld2), ref Unsafe.As<UInt32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<UInt32>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); }
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt32(); }
_dataTable = new DataTable(_data1, _data2, new UInt32[RetElementCount], LargestVectorSize);
}
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = Unsafe.Read<Vector256<UInt32>>(_dataTable.inArray1Ptr) | Unsafe.Read<Vector256<UInt32>>(_dataTable.inArray2Ptr);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var result = typeof(Vector256<UInt32>).GetMethod("op_BitwiseOr", new Type[] { typeof(Vector256<UInt32>), typeof(Vector256<UInt32>) })
.Invoke(null, new object[] {
Unsafe.Read<Vector256<UInt32>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector256<UInt32>>(_dataTable.inArray2Ptr)
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector256<UInt32>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = _clsVar1 | _clsVar2;
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr);
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var op1 = Unsafe.Read<Vector256<UInt32>>(_dataTable.inArray1Ptr);
var op2 = Unsafe.Read<Vector256<UInt32>>(_dataTable.inArray2Ptr);
var result = op1 | op2;
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new VectorBinaryOpTest__op_BitwiseOrUInt32();
var result = test._fld1 | test._fld2;
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = _fld1 | _fld2;
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr);
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = test._fld1 | test._fld2;
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
private void ValidateResult(Vector256<UInt32> op1, Vector256<UInt32> op2, void* result, [CallerMemberName] string method = "")
{
UInt32[] inArray1 = new UInt32[Op1ElementCount];
UInt32[] inArray2 = new UInt32[Op2ElementCount];
UInt32[] outArray = new UInt32[RetElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray1[0]), op1);
Unsafe.WriteUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray2[0]), op2);
Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector256<UInt32>>());
ValidateResult(inArray1, inArray2, outArray, method);
}
private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "")
{
UInt32[] inArray1 = new UInt32[Op1ElementCount];
UInt32[] inArray2 = new UInt32[Op2ElementCount];
UInt32[] outArray = new UInt32[RetElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector256<UInt32>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector256<UInt32>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector256<UInt32>>());
ValidateResult(inArray1, inArray2, outArray, method);
}
private void ValidateResult(UInt32[] left, UInt32[] right, UInt32[] result, [CallerMemberName] string method = "")
{
bool succeeded = true;
if (result[0] != (uint)(left[0] | right[0]))
{
succeeded = false;
}
else
{
for (var i = 1; i < RetElementCount; i++)
{
if (result[i] != (uint)(left[i] | right[i]))
{
succeeded = false;
break;
}
}
}
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(Vector256)}.op_BitwiseOr<UInt32>(Vector256<UInt32>, Vector256<UInt32>): {method} failed:");
TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})");
TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})");
TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
| -1 |